diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..f63309d9a8b23d024723eea7a864ed044f7cd8c0 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,39 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+documentation/docs/get-started/llm-studio-home-screen.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/datasets/configure-dataset.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/datasets/import-h2o-drive-dataset.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/datasets/import-kaggle-dataset.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/datasets/import-s3-bucket.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/datasets/merge-datasets.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/datasets/upload-dataset.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/datasets/upload-local-file.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/datasets/view-imported-dataset.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/best-validation-sample.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/charts-tab.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/chat-tab.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/compare-experiments.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/delete-experiment.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/export-model-to-huggingface.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/local-llm-judge-logs.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/run-experiment.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/set-endpoint.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/set-metric-model.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/stop-experiment.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/view-experiments.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/guide/experiments/worst-validation-sample.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/tutorials/tutorial-1a/configure-dataset.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/tutorials/tutorial-1a/evaluate-experiment.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/tutorials/tutorial-1a/import-dataset.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/tutorials/tutorial-2a/configure-dataset.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/tutorials/tutorial-2a/evaluate-experiment.png filter=lfs diff=lfs merge=lfs -text
+documentation/docs/tutorials/tutorial-2a/import-dataset.png filter=lfs diff=lfs merge=lfs -text
+static/screenshot-1.png filter=lfs diff=lfs merge=lfs -text
+static/screenshot-2.png filter=lfs diff=lfs merge=lfs -text
+static/screenshot-3.png filter=lfs diff=lfs merge=lfs -text
+static/screenshot-4.png filter=lfs diff=lfs merge=lfs -text
+static/screenshot-5.png filter=lfs diff=lfs merge=lfs -text
+static/screenshot-6.png filter=lfs diff=lfs merge=lfs -text
+static/screenshot-7.png filter=lfs diff=lfs merge=lfs -text
+static/screenshot-9.png filter=lfs diff=lfs merge=lfs -text
diff --git a/documentation/.gitignore b/documentation/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..a0023f54f27e067e063e5ab23dc6f160b5a7a4d5
--- /dev/null
+++ b/documentation/.gitignore
@@ -0,0 +1,17 @@
+node_modules
+tmp
+
+# Generated files
+.docusaurus
+.cach-loader
+
+# Misc
+.DS_Store
+.env.local
+.env.development.local
+.env.test.local
+.env.production.local
+
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
diff --git a/documentation/README.md b/documentation/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..cd23f993847d2620a1f901654dc54a3ca31809ce
--- /dev/null
+++ b/documentation/README.md
@@ -0,0 +1,98 @@
+# H2O LLM Studio Documentation
+
+- The LLM Studio documentation is built using [Makersaurus](https://github.com/h2oai/makersaurus/pkgs/npm/makersaurus) which is a very thin wrapper around Facebook's Docusaurus.
+- The documentation is displayed at {{ https://docs.h2o.ai/h2o-llm-studio/ }}
+
+To view, edit, and cut a version of the documentation, the following is required:
+
+- Node.js version 16.14+ (you can check your version by running `node -v`). Use nvm to manage multiple Node versions installed on a single machine.
+
+- To install Node.js and npm with nvm in Mac or Ubuntu, run: `curl -o-
+https://raw.githubusercontent.com/creationix/nvm/v0.33.0/install.sh | bash` and `nvm install node`
+
+- Makersaurus (the H2O themed documentation site) is hosted on H2O's Github npm registry. npm must authenticate to the registry before you can download Makersaurus. Follow the 3 steps below to authenticate the npm package.
+
+ If you have already installed `@h2oai/ui-kit` or any other private `@h2oai`-prefixed npm package you can skip this step.
+
+ **Step 1:** Create a "classic" [personal access token](https://github.com/settings/tokens) (PAT) on Github. Note that you only need to enable the `read:packages` scope for this token.
+
+ **Step 2:** Add the PAT to your `~/.npmrc` file. Create this file if it doesn't exist yet.
+ ```
+ @h2oai:registry=https://npm.pkg.github.com/
+ //npm.pkg.github.com/:_authToken=YOUR-GENERATED-TOKEN
+ ```
+ **Step 3:** Verify that it worked by running the following command:
+ ```
+ npm whoami --registry=https://npm.pkg.github.com
+ ```
+ If this command returns your username, you can proceed to the next step. If you get an error, you are not yet authenticated. You might find the [Github registry docs](https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-npm-registry#authenticating-with-a-personal-access-token) helpful for debugging.
+
+### Documentation structure
+
+
+```
+├── documentation
+│ ├── docs
+│ ├── tmp
+│ ├── makersaurus.config.js
+│ ├── sidebars.js
+│ ├── package.json
+│ ├── package-lock.json
+```
+
+- `documentation/docs`: Contains Markdown documentation files to edit the next documentation version.
+Customize the order of the docs sidebar in `sidebars.js`
+- `documentation/tmp`: Temporary files generated by Makersaurus. Do not edit these files.
+- `documentation/makersaurus.config.js`: Makersaurus [config file](https://h2oai.github.io/makersaurus/api/config)
+- `documentation/sidebars.js`: Sidebar configuration file
+- `documentation/package.json`: npm configuration file
+- `documentation/package-lock.json`: Generated by npm. Do not edit this file.
+
+
+### Edit locally
+
+To setup the local `env` to view and edit the next or past documentation versions ([first, ensure you install
+Node.js](#requirements)):
+
+1. Enter the documentation folder
+
+`cd documentation`
+
+2. Install dependencies
+
+`npm install`
+
+3. Start Makersaurus
+
+`npm start`
+
+- **Next documentation version**: To view your edits for the next documentation version, navigate to the provided URL.
+Then, select **Next** on the **Versions** dropdown menu.
+- **Debug**
+- If you don't see anything after clicking **Next**, run the following command and try again:
+`make setup-doc`
+- Ensure that the following variable is set to `true` in the `makersaurus.config.js` file (located at `docs`):
+`includeCurrentVersion`
+- **Past documentation versions**: To view your edits for past documentation versions (located at
+`docs/versioned_docs/`), navigate to the provided URL (for example, `http://localhost:3000/h2o-llm-studio/`).
+Then, select a *version* (for example, v0.2.0) on the **Versions** dropdown menu.
+
+### Cut a version
+
+To cut a new version after making specific changes at `documentation/docs` to align with the next version of the application, consider the following instructions:
+
+1. Before a new version of the documentation is released, and right before we cut a version (`make version-doc`), change the following variable located in the `makersaurus.config.js` file to `false`: `includeCurrentVersion`
+2. Run: `make version-doc` (for example, `make version-doc DOC_VERSION=v0.3.0`)
+3. After the previous steps are executed and all generated files are pushed to the main branch, trigger the following
+script in GitHub actions: `deploy-to-github-pages.yml`
+4. After publishing the new documentation version, change the following variable located in the
+`makersaurus.config.js` file to `true`: `includeCurrentVersion`
+- This ensures the next doc version to edit will be visible while editing locally
+
+
+## More information
+
+Use the [Makersaurus docs](https://h2oai.github.io/makersaurus/) to learn more about how to edit docs, deploy the site, set up versioning and more.
+
+
+
diff --git a/documentation/app_banner.png b/documentation/app_banner.png
new file mode 100644
index 0000000000000000000000000000000000000000..e3190594401999cb1ced9ede4a093033b94e48f3
Binary files /dev/null and b/documentation/app_banner.png differ
diff --git a/documentation/docs/concepts.md b/documentation/docs/concepts.md
new file mode 100644
index 0000000000000000000000000000000000000000..0cade856020954be6db90f99e01785de3df051b4
--- /dev/null
+++ b/documentation/docs/concepts.md
@@ -0,0 +1,69 @@
+---
+description: Learn about concepts around H2O LLM Studio.
+---
+# Concepts
+
+H2O LLM Studio is based on a few key concepts and uses several key terms across its documentation. Each, in turn, is explained within the sections below.
+
+## LLM
+
+A Large Language Model (LLM) is a type of AI model that uses deep learning techniques and uses massive datasets to analyze and generate human-like language. For example, many AI chatbots or AI search engines are powered by LLMs.
+
+Generally speaking, LLMs can be characterized by the following parameters:
+- size of the training dataset
+- cost of training (computational power)
+- size of the model (parameters)
+- performance after training (or how well the model is able to respond to a particular question)
+
+## Parameters and hyperparameters
+
+In the context of an LLM, parameters and hyperparameters are a crucial part of determinining the model's performance and overall behaviour.
+
+- **Parameters:** The internal variables of the model that are learned during the training process. In the case of an LLM, parameters typically include the weights and biases associated with the neural network layers. The values of parameters directly influence the model's predictions and the quality of generated text.
+
+- **Hyperparameters:** The configuration choices that are set before training the model and are not learned directly from the data (e.g., number of epochs, batch size etc.). These choices impact the learning process and influence the model's overall behavior. Hyperparameters need to be tuned and optimized to achieve the best performance. H2O LLM Studio GUI shows tooltips next to each hyperparameter to explain what each hyperparameter is for. You can also see the following references for more details about hyperparameters in H2O LLM Studio.
+ - Dataset settings
+ - [Experiment settings](./guide/experiments/experiment-settings)
+
+
+## LLM Backbone
+
+LLM Backbone is a key hyperparamter that determines the model's architecture. This option is the most important setting when it comes to experiment creation, as it sets the pretrained model weights. For more information about LLM Backbone, see [Experiment settings](guide/experiments/experiment-settings.md#llm-backbone).
+
+
+## Generative AI
+
+Generative AI refers to AI models that can generate new content, such as images, videos, or text, that did not exist before. These models learn from large datasets and use this knowledge to create new content that is similar in style or content to the original dataset.
+
+
+## Foundation model
+
+A particular adaptive model that has been trained on a large amount of data and starts to derive relationships between words and concepts. Foundation models are fine-tuned to become more specific and adapt to the related domain more efficiently.
+
+## Fine-tuning
+
+Fine-tuning refers to the process of taking a pre-trained language model and further training it on a specific task or domain to improve its performance on that task. It is an important technique used to adapt LLMs to specific tasks and domains.
+
+## LoRA (Low-Rank Adaptation)
+
+Low-Rank Adapation (LoRa) involves modifying the pre-trained model by adjusting its weights and biases to better fit the new task. This adaptation is done in a way that preserves the pre-trained weights from the original dataset while also adjusting for the new task's specific requirements. This method of training or fine-turning models consumes less memory. By using low rank adaptation, the pre-trained model can be quickly adapted to new tasks, without requiring a large amount of new training data.
+
+## Quantization
+
+Quantization is a technique used to reduce the size and memory requirements of a large language model without sacrificing its accuracy. This is done by converting the floating-point numbers used to represent the model's parameters to lower-precision numbers, such as half-floats or bfloat16. Quantization can be used to make language models more accessible to users with limited computing resources.
+
+## 8-bit model training with a low memory footprint
+
+8-bit model training with a low memory footprint refers to a fine-tuning technique that reduces the memory requirements for training neural networks by using 8-bit integers instead of 32-bit floating-point numbers. This approach can significantly reduce the amount of memory needed to store the model's parameters and can make it possible to train larger models on hardware with limited memory capacity.
+
+### BLEU
+
+Bilingual Evaluation Understudy (BLEU) is a model evaluation metric that is used to measure the quality of the predicted text against the input text.
+
+BLEU: BLEU (Bilingual Evaluation Understudy) measures the quality of machine-generated texts by comparing them to reference texts by calculating a score between 0 and 1, where a higher score indicates a better match with the reference text. BLEU is based on the concept of n-grams, which are contiguous sequences of words. The different variations of BLEU such as BLEU-1, BLEU-2, BLEU-3, and BLEU-4 differ in the size of the n-grams considered for evaluation. BLEU-n measures the precision of n-grams (n consecutive words) in the generated text compared to the reference text. It calculates the precision score by counting the number of overlapping n-grams and dividing it by the total number of n-grams in the generated text.
+
+### Perplexity
+
+Perplexity (PPL) is a commonly used evaluation metric. It measures the confidence a model has in its predictions, or in simpler words how 'perplexed' or surprised it is by seeing new data. Perplexity is defined as the exponentiated cross-entropy of a sequence of tokens. Lower perplexity means the model is highly confident and accurate in the sequence of tokens it is responding with.
+
+
diff --git a/documentation/docs/faqs.md b/documentation/docs/faqs.md
new file mode 100644
index 0000000000000000000000000000000000000000..6da205ef4a6a00a1147930ec7beda9797fa9b805
--- /dev/null
+++ b/documentation/docs/faqs.md
@@ -0,0 +1,181 @@
+---
+description: Learn about frequently asked questions.
+---
+import Icon from "@material-ui/core/Icon";
+
+# FAQs
+
+The sections below provide answers to frequently asked questions. If you have additional questions, please send them to [cloud-feedback@h2o.ai](mailto:cloud-feedback@h2o.ai).
+
+---
+
+### What are the general recommendations for using H2O LLM Studio?
+
+The recommendation is to always start with the default settings. From there, the parameters that tend to have the largest impact are:
+- the LLM backbone
+- the number of epochs
+- the learning rate
+- the LoRA settings
+
+:::info
+For more information on experiment settings, see [Experiment Settings](guide/experiments/experiment-settings).
+:::
+
+The parameters that have the largest impact on the amount of GPU memory being used are the [backbone dtype](guide/experiments/experiment-settings#backbone-dtype) and the [max length](guide/experiments/experiment-settings#max-length) (the length of the input sequence being used during model training).
+
+:::info
+For more information, see [this FAQ about GPU out-of-memory issues](#i-encounter-gpu-out-of-memory-issues-what-can-i-change-to-be-able-to-train-large-models).
+:::
+
+While these parameters will change the behavior of the fine-tuned model, the change that will be most impactful is the actual data used for fine tuning. Having clean data and enough samples (i.e., atleast 1000 records) is imperative.
+
+---
+
+### Is the tool multi-user or single user?
+
+While it is possible for multiple users to use the same instance, the tool was created for a single user at a time.
+
+----
+
+### How can human feedback be applied in LLM Studio?
+
+In order to apply human feedback to H2O LLM Studio, there is a problem type called DPO (Direct Preference Optimization), which is specifically used for learning human feedback. For these types of use cases, there would be a selected answer and a rejected answer column to train a reward model. This is a more stable form of the traditional RLHF. For more information, see [this paper about DPO](https://arxiv.org/abs/2305.18290) by Stanford University.
+
+----
+
+### How does H2O LLM Studio evaluate the fine-tuned model?
+
+The valuation options are [BLEU](concepts#bleu), [Perplexity](concepts#perplexity), and an AI Judge. For more information about the traditional NLP similarity metrics, see [BLEU](concepts#bleu) and [Perplexity](concepts#perplexity) explained on the concepts page. You can also opt to use an AI judge by having an LLM model (ChatGPT or a local LLM) judge the performance of the response. This [sample prompt](https://github.com/h2oai/h2o-llmstudio/blob/main/prompts/general.txt) is an example of a prompt that is used to have the LLM evaluate the response.
+
+----
+
+### Can I use a different AI Judge than ChatGPT?
+
+Yes. For instructions on how to use a local LLM to evaluate the fine-tuned model, see [Evaluate model using an AI judge](guide/experiments/evaluate-model-using-llm).
+
+---
+
+### How much data is generally required to fine-tune a model?
+
+There is no clear answer. As a rule of thumb, 1000 to 50000 samples of conversational data should be enough. Quality and diversity is very important. Make sure to try training on a subsample of data using the "sample" parameter to see how big the impact of the dataset size is. Recent studies suggest that less data is needed for larger foundation models.
+
+---
+
+### Are there any recommendations for which backbone to use? Are some backbones better for certain types of tasks?
+
+The majority of the LLM backbones are trained on a very similar corpus of data. The main difference is the size of the model and the number of parameters. Usually, the larger the model, the better they are. The larger models also take longer to train. It is recommended to start with the smallest model and then increase the size if the performance is not satisfactory. If you are looking to train for tasks that are not directly question answering in English, it is also a good idea to look for specialized LLM backbones.
+
+---
+
+### What if my data is not in question-and-answer form and I just have documents? How can I fine-tune the LLM model?
+
+To train a chatbot style model, you need to convert your data into a question and answer format.
+
+If you really want to continue pretraining on your own data without teaching a question-answering style, prepare a dataset with all your data in a single column Dataframe. Make sure that the length of the text in each row is not too long. In the experiment setup, remove all additional tokens (e.g. `<|prompt|>`, `<|answer|>`, for Text Prompt Start and Text Answer Start respectively) and disable **Add Eos Token To Prompt** and **Add Eos Token To Answer**. Deselect everything in the Prompt Column.
+
+There are also other enterprise solutions from H2O.ai that may help you convert your data into a Q&A format. For more information, see [H2O.ai's Generative AI page](https://h2o.ai/) and this blogpost about [H2O LLM DataStudio: Streamlining Data Curation and Data Preparation for LLMs related tasks](https://h2o.ai/blog/2023/streamlining-data-preparation-for-fine-tuning-of-large-language-models/).
+
+---
+
+
+### Can the adapter be downloaded after fine-tuning so that the adapter can be combined with the backbone LLM for deployment?
+
+H2O LLM Studio provides the option to download only the LoRA adapter when a model was trained with LoRA. Once the experiment has finished running, click the **Download adapter** button to download the lora adapter_weights separately from a fine-tuned model.
+
+---
+
+### I encounter GPU out-of-memory issues. What can I change to be able to train large models?
+
+There are various parameters that can be tuned while keeping a specific LLM backbone fixed. It is advised to choose 4bit/8bit precision as a backbone dtype to be able to train models >=7B on a consumer type GPU. [LORA](concepts#lora-low-rank-adaptation) should be enabled. Besides that there are the usual parameters such as batch size and maximum sequence length that can be decreased to save GPU memory (please ensure that your prompt+answer text is not truncated too much by checking the train data insights).
+
+---
+
+### When does the model stop the fine-tuning process?
+
+The number of epochs are set by the user.
+
+---
+
+### What is the maximum dataset size that an LLM Studio instance can handle?
+
+The total dataset size is basically unlimited / only bound by disk space as all training is done in batches. There is no specific rule of thumb for maximum batch size - this depends strongly on backbone, context size, use of flash attention 2.0, use of gradient checkpointing, etc.
+We suggest using a batch size that just fills the RAM for maximum efficiency. While testing for maximum memory consumption, set padding quantile to `0`. Make sure to set it back to `1` when you have found a good setting for the batch size to save on runtime.
+
+----
+
+### Where does H2O LLM Studio store its data?
+
+By default, H2O LLM Studio stores its data in two folders located in the root directory in the app. The folders are named `data` and `output`. Here is the breakdown of the data storage structure:
+- `data/dbs`: This folder contains the user database used within the app.
+- `data/user`: This folder is where uploaded datasets from the user are stored.
+- `output/user`: All experiments conducted in H2O LLM Studio are stored in this folder. For each experiment, a separate folder is created within the `output/user` directory, which contains all the relevant data associated with that particular experiment.
+- `output/download`: Utility folder that is used to store data the user downloads within the app.
+
+It is possible to change the default working directory of H2O LLM Studio by setting the `H2O_LLM_STUDIO_WORKDIR` environment variable. By default, the working directory is set to the root directory of the app.
+
+----
+
+### How can I update H2O LLM Studio?
+
+To update H2O LLM Studio, you have two options:
+
+1. Using the latest main branch: Execute the commands `git checkout main` and `git pull` to obtain the latest updates from the main branch.
+2. Using the latest release tag: Execute the commands `git pull` and `git checkout v0.0.3` (replace 'v0.0.3' with the desired version number) to switch to the latest release branch.
+
+The update process does not remove or erase any existing data folders or experiment records. This means that all your old data, including the user database, uploaded datasets, and experiment results, will still be available to you within the updated version of H2O LLM Studio.
+
+Before updating, it is recommended to run the `git rev-parse --short HEAD` command and save the commit hash.
+This will allow you to revert to your existing version if needed.
+
+---
+
+### Once I have the [LoRA](guide/experiments/experiment-settings.md#lora), what is the recommended way of utilizing it with the base model?
+
+You can also export the LoRA weights. You may add them to the files to be exported [here](https://github.com/h2oai/h2o-llmstudio/blob/main/llm_studio/app_utils/sections/experiment.py#L1552). Before exporting, the LoRA weights are merged back into the original LLM backbone weights to make downstream tasks easier. You do not need to have PEFT, or anything else for your deployment.
+
+---
+
+### How to use H2O LLM Studio in Windows?
+
+Use WSL 2 on Windows
+
+---
+
+### How can I easily fine-tune a large language model (LLM) using the command-line interface (CLI) of H2O LLM Studio when I have limited GPU memory?
+
+If you have limited GPU memory but still want to fine-tune a large language model using H2O LLM Studio's CLI, there are alternative methods you can use to get started quickly.
+
+- [Using Kaggle kernels](https://www.kaggle.com/code/ilu000/h2o-llm-studio-cli/)
+- [Using Google Colab](https://colab.research.google.com/drive/1soqfJjwDJwjjH-VzZYO_pUeLx5xY4N1K?usp=sharing)
+
+---
+
+### Can I run a validation metric on a model post-training, optionally on a different validation dataset?
+
+Yes.
+
+1. After you have finished creating an experiment, click on the more_vert Kebab menu of the relevant experiment and select **New Experiment**.
+
+2. Enable the **Use previous experiments weight** setting found at the top of the screen.
+ This will now load the previous weights, and you can now change eval dataset, metric, and anything else as you see fit. To only do evaluation without any retraining, set the **Epochs** to 0.
+
+----
+
+### What are the hardware/infrastructure sizing recommendations for H2O LLM Studio?
+
+When it comes to hardware requirements, it is important to note that the primary demand centers around the GPU and its associated VRAM. In terms of CPUs, most modern choices should suffice as NLP tasks typically do not heavily stress CPU performance. As for RAM, it's advisable to have a minimum of 128GB, with a stronger recommendation of 256GB or more, particularly when dealing with substantial model weights that must be accommodated in the CPU RAM.
+
+----
+
+### I am seeing an OS error during the H2O LLM Studio training session. What should I do?
+
+If you recieve the following error, it is most likely because of network issues either with your own connection or on the Hugging Face Hub side.
+
+```title="Error"
+OSError: Consistency check failed: file should be of size 4999819336 but has size
+14099570832 ((…)ve/main/ model-00002-of-00003.safetensors).
+```
+
+In most cases, rerunning the experiment will solve it as the download of the model weights will be re-initiated.
+
+---
\ No newline at end of file
diff --git a/documentation/docs/get-started/core-features.md b/documentation/docs/get-started/core-features.md
new file mode 100644
index 0000000000000000000000000000000000000000..847000574efababf88b816decbcf6fe1e57bc785
--- /dev/null
+++ b/documentation/docs/get-started/core-features.md
@@ -0,0 +1,34 @@
+---
+description: Learn about the core features of LLM Studio.
+---
+# Core features
+
+## No-code fine-tuning
+
+NLP practitioners can easily fine-tune models without the need for code expertise. The user interface, which is specifically designed for LLMs, allows users to upload large datasets easily and configure [hyperparameters](../concepts#parameters-and-hyperparameters) to fine-tune the model.
+
+## Highly customizable (wide range of hyperparameters)
+
+H2O LLM Studio supports a wide variety of hyperparameters that can be used to fine-tune the model and supports the following fine-tuning techniques to enable advanced customization:
+
+- [Low-Rank Adaptation (LoRA)](../concepts#lora-low-rank-adaptation)
+- [8-bit model training with a low memory footprint](../concepts#8-bit-model-training-with-a-low-memory-footprint)
+
+## Advanced evaluation metrics and experiment comparison
+
+Advanced evaluation metrics in H2O LLM Studio can be used to validate the answers generated by the LLM. This helps to make data-driven decisions about the model. It also offers visual tracking and comparison of experiment performance, making it easy to analyze and compare different fine-tuned models.You can also visualize how different parameters affect the model performance, and optionally use the [Neptune](https://neptune.ai/) or [W&B](https://wandb.ai/) integration to track and log your experiments.
+
+## Instant publishing models
+
+H2O LLM Studio enables easy model sharing with the community by allowing you to export the model to the [Hugging Face Hub](https://huggingface.co/h2oai) with a single click.
+
+## Instant feedback on model performance
+
+Additionally, H2O LLM Studio lets you chat with the fine-tuned model and receive instant feedback about model performance.
+
+
+
+
+
+
+
diff --git a/documentation/docs/get-started/llm-studio-flow.md b/documentation/docs/get-started/llm-studio-flow.md
new file mode 100644
index 0000000000000000000000000000000000000000..0269337865e2275e84141d05afc959c73ba74ea5
--- /dev/null
+++ b/documentation/docs/get-started/llm-studio-flow.md
@@ -0,0 +1,46 @@
+---
+description: The flow of creating and fine-tuning large language models using H2O LLM Studio.
+---
+# Model flow
+
+The flow of creating and fine-tuning large language models using H2O LLM Studio can be summarized in the following sequential steps:
+
+- [Step 1: Import a dataset](#step-1-import-a-dataset)
+- [Step 2: Create an experiment](#step-2-create-an-experiment)
+- [Step 3: Monitor an experiment](#step-3-monitor-an-experiment)
+- [Step 4: Compare experiments](#step-4-compare-experiments)
+- [Step 5: Export a model to Hugging Face Hub](#step-5-export-a-model-to-hugging-face-hub)
+
+## Step 1: Import a dataset
+
+As the first step in the experiment flow, prep your data and import your dataset to H2O LLM Studio.
+
+- To learn about supported data connectors and data format, see [Supported data connectors and format](../guide/datasets/data-connectors-format).
+- To learn about how to import a dataset to H2O LLM Studio, see [Import a dataset](../guide/datasets/import-dataset).
+- To learn about reviewing and editing a dataset, see [View and manage dataset](../guide/datasets/view-dataset.md).
+
+## Step 2: Create an experiment
+
+As the second step in the experiment flow, create an experiment using the imported dataset. H2O LLM Studio offers several hyperparameter settings that you can adjust for your experiment model. To ensure that your training process is effective, you may need to specify the [hyperparameters](../concepts#parameters-and-hyperparameters) like learning rate, batch size, and the number of epochs. H2O LLM Studio provides an overview of all the parameters you’ll need to specify for your experiment.
+
+- To learn about creating a new experiment, see [Create an experiment](../guide/experiments/create-an-experiment.md).
+- To learn about the settings available for creating an experiment, see [Experiment settings](../guide/experiments/experiment-settings.md).
+
+## Step 3: Monitor an experiment
+
+As the third step in the experiment flow, monitor the launched experiment. H2O LLM Studio allows you to inspect your experiment (model) during and after model training. Simple interactive graphs in H2O LLM Studio allow you to understand the impact of selected hyperparameter values during and after model training. You can then adjust the [hyperparameters](../concepts#parameters-and-hyperparameters) to further optimize model performance.
+
+To learn about viewing and monitoring an experiment, see [View and manage experiments](../guide/experiments/view-an-experiment.md).
+
+## Step 4: Compare experiments
+
+The H2O LLM studio provides a useful feature that allows comparing various experiments and analyzing how different model parameters affect model performance. This feature is a powerful tool for fine-tuning your machine-learning models and ensuring they meet your desired performance metrics.
+
+To learn about comparing multiple experiments, see [Compare experiments](../guide/experiments/compare-experiments.md).
+
+## Step 5: Export a model to Hugging Face Hub
+
+As the final step in the experiment flow, you can export the fine-tuned model to Hugging Face with a single click.
+
+To learn about exporting a trained model to Hugging Face Hub, see, [Export trained model to Hugging Face](../guide/experiments/export-trained-model.md).
+
diff --git a/documentation/docs/get-started/llm-studio-home-screen.png b/documentation/docs/get-started/llm-studio-home-screen.png
new file mode 100644
index 0000000000000000000000000000000000000000..c90762d188ea74f590619c5c5459cf5858d82715
--- /dev/null
+++ b/documentation/docs/get-started/llm-studio-home-screen.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:84a00e1bc49c538613ed0ba2a55084e25c3e833686c4bbf949ef8df17efb0bd4
+size 384959
diff --git a/documentation/docs/get-started/llm-studio-performance.md b/documentation/docs/get-started/llm-studio-performance.md
new file mode 100644
index 0000000000000000000000000000000000000000..ca52ece645dafe8dc2580be5b7dda740f41cb0f4
--- /dev/null
+++ b/documentation/docs/get-started/llm-studio-performance.md
@@ -0,0 +1,166 @@
+---
+description: Setting up and running H2O LLM Studio requires the following minimal prerequisites. This page lists out the speed and performance metrics of H2O LLM Studio based on different hardware setups.
+---
+# H2O LLM Studio performance
+
+Setting up and running H2O LLM Studio requires the following minimal [prerequisites](set-up-llm-studio.md#prerequisites). This page lists out the speed and performance metrics of H2O LLM Studio based on different hardware setups.
+
+The following metrics were measured.
+
+- **Hardware setup:** The type and number of computing devices used to train the model.
+- **LLM backbone:** The underlying architecture of the language model. For more information, see [LLM backbone](concepts.md#llm-backbone).
+- **Quantization:** A technique used to reduce the size and memory requirements of the model. For more information, see [Quantization](concepts.md#quantization).
+- **Train**: The amount of time it took to train the model in hours and minutes.
+- **Validation:** The amount of time it took to validate the mode in hours and minutes.
+
+| Hardware setup | LLM backbone | Quantization | Train (hh:mm:ss)| Validation (hh:mm:ss) |
+|---|---|---|---|---|
+| 8xA10G | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 11:35 | 3:32 |
+| 4xA10G | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 21:13 | 06:35 |
+| 2xA10G | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 37:04 | 12:21 |
+| 1xA10G | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 1:25:29 | 15:50 |
+| 8xA10G | h2oai/h2ogpt-4096-llama2-7b | nf4 | 14:26 | 06:13 |
+| 4xA10G | h2oai/h2ogpt-4096-llama2-7b | nf4 | 26:55 | 11:59 |
+| 2xA10G | h2oai/h2ogpt-4096-llama2-7b | nf4 | 48:24 | 23:37 |
+| 1xA10G | h2oai/h2ogpt-4096-llama2-7b | nf4 | 1:26:59 | 42:17 |
+| 8xA10G | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | OOM | OOM |
+| 4xA10G | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | OOM | OOM |
+| 2xA10G | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | OOM | OOM |
+| 1xA10G | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | OOM | OOM |
+| 8xA10G | h2oai/h2ogpt-4096-llama2-13b | nf4 | 25:07 | 10:58 |
+| 4xA10G | h2oai/h2ogpt-4096-llama2-13b | nf4 | 48:43 | 21:25 |
+| 2xA10G | h2oai/h2ogpt-4096-llama2-13b | nf4 | 1:30:45 | 42:06 |
+| 1xA10G | h2oai/h2ogpt-4096-llama2-13b | nf4 | 2:44:36 | 1:14:20 |
+| 8xA10G | h2oai/h2ogpt-4096-llama2-70b | nf4 | OOM | OOM |
+| 4xA10G | h2oai/h2ogpt-4096-llama2-70b | nf4 | OOM | OOM |
+| 2xA10G | h2oai/h2ogpt-4096-llama2-70b | nf4 | OOM | OOM |
+| 1xA10G | h2oai/h2ogpt-4096-llama2-70b | nf4 | OOM | OOM |
+|---|---|---|---|---|
+| 4xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 7:04 | 3:55 |
+| 2xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 13:14 | 7:23 |
+| 1xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | bfloat16 | 23:36 | 13:25 |
+| 4xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | nf4 | 9:44 | 6:30 |
+| 2xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | nf4 | 18:34 | 12:16 |
+| 1xA100 80GB | h2oai/h2ogpt-4096-llama2-7b | nf4 | 34:06 | 21:51 |
+| 4xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | 11:46 | 5:56 |
+| 2xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | 21:54 | 11:17 |
+| 1xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | bfloat16 | 39:10 | 18:55 |
+| 4xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | nf4 | 16:51 | 10:35 |
+| 2xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | nf4 | 32:05 | 21:00 |
+| 1xA100 80GB | h2oai/h2ogpt-4096-llama2-13b | nf4 | 59:11 | 36:53 |
+| 4xA100 80GB | h2oai/h2ogpt-4096-llama2-70b | nf4 | 1:13:33 | 46:02 |
+| 2xA100 80GB | h2oai/h2ogpt-4096-llama2-70b | nf4 | 2:20:44 | 1:33:42 |
+| 1xA100 80GB | h2oai/h2ogpt-4096-llama2-70b | nf4 | 4:23:57 | 2:44:51 |
+
+:::info
+The runtimes were gathered using the default parameters.
+
+
+Expand to see the default parameters
+
+```
+architecture:
+ backbone_dtype: int4
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: true
+ add_eos_token_to_prompt: true
+ add_eos_token_to_system: true
+ answer_column: output
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 1.0
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: <|answer|>
+ text_prompt_start: <|prompt|>
+ text_system_start: <|system|>
+ train_dataframe: /data/user/oasst/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.01
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ find_unused_parameters: false
+ gpus:
+ - '0'
+ - '1'
+ - '2'
+ - '3'
+ - '4'
+ - '5'
+ - '6'
+ - '7'
+ huggingface_branch: main
+ mixed_precision: true
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+experiment_name: default-8-a10g
+llm_backbone: h2oai/h2ogpt-4096-llama2-7b
+logging:
+ logger: None
+ neptune_project: ''
+output_directory: /output/...
+prediction:
+ batch_size_inference: 0
+ do_sample: false
+ max_length_inference: 256
+ metric: BLEU
+ metric_gpt_model: gpt-3.5-turbo-0301
+ metric_gpt_template: general
+ min_length_inference: 2
+ num_beams: 1
+ num_history: 4
+ repetition_penalty: 1.2
+ stop_tokens: ''
+ temperature: 0.0
+ top_k: 0
+ top_p: 1.0
+problem_type: text_causal_language_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 512
+ padding_quantile: 1.0
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers: []
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: TokenAveragedCrossEntropy
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
+```
+
+:::
\ No newline at end of file
diff --git a/documentation/docs/get-started/set-up-llm-studio.md b/documentation/docs/get-started/set-up-llm-studio.md
new file mode 100644
index 0000000000000000000000000000000000000000..89dc15766b2b94c4207e5100033c8651809fad93
--- /dev/null
+++ b/documentation/docs/get-started/set-up-llm-studio.md
@@ -0,0 +1,328 @@
+---
+description: Learn how to set up LLM Studio.
+---
+import Tabs from "@theme/Tabs";
+import TabItem from "@theme/TabItem";
+
+# Set up H2O LLM Studio
+
+This page guides you through setting up and installing H2O LLM Studio on your local system.
+
+First, download the H2O LLM Studio package from the [H2O LLM Studio Github repository](https://github.com/h2oai/h2o-llmstudio). You can use `git clone` or navigate to the [releases page](https://github.com/h2oai/h2o-llmstudio/releases) and download the `.zip` file found within the **Assets** of the relevant release.
+
+## Prerequisites
+
+H2O LLM Studio requires the following minimum requirements:
+
+- A machine with Ubuntu 16.04+ with atleast one recent Nvidia GPU
+- Have at least 128GB+ of system RAM. Larger models and complex tasks may require 256GB+ or more.
+- Nvidia drivers v470.57.02 or a later version
+- Access to the following URLs:
+ - developer.download.nvidia.com
+ - pypi.org
+ - huggingface.co
+ - download.pytorch.org
+ - cdn-lfs.huggingface.co
+
+:::info Notes
+- Atleast 24GB of GPU memory is recommended for larger models.
+- For more information on performance benchmarks based on the hardware setup, see [H2O LLM Studio performance](llm-studio-performance.md).
+- The required URLs are accessible by default when you start a GCP instance, however, if you have network rules or custom firewalls in place, it is recommended to confirm that the URLs are accessible before running `make setup`.
+:::
+
+## Installation
+
+:::note Installation methods
+
+
+
+
+ The recommended way to install H2O LLM Studio is using pipenv with Python
+ 3.10. To install Python 3.10 on Ubuntu 16.04+, execute the following
+ commands.
+
+
+ System installs (Python 3.10)
+
+
+
+ sudo add-apt-repository ppa:deadsnakes/ppa
+ sudo apt install python3.10
+ sudo apt-get install python3.10-distutils
+ curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
+
+
+
+ Install NVIDIA drivers (if required)
+
+ If you are deploying on a 'bare metal' machine running Ubuntu, you may need
+ to install the required Nvidia drivers and CUDA. The following commands show
+ how to retrieve the latest drivers for a machine running Ubuntu 20.04 as an
+ example. You can update the following based on your respective operating system.
+
+
+
+ wget
+ https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-ubuntu2004.pin{" "}
+
+ sudo mv cuda-ubuntu2004.pin
+ /etc/apt/preferences.d/cuda-repository-pin-600
+ wget
+ https://developer.download.nvidia.com/compute/cuda/11.4.3/local_installers/cuda-repo-ubuntu2004-11-4-local_11.4.3-470.82.01-1_amd64.deb{" "}
+
+ sudo dpkg -i
+ cuda-repo-ubuntu2004-11-4-local_11.4.3-470.82.01-1_amd64.deb
+ sudo apt-key add /var/cuda-repo-ubuntu2004-11-4-local/7fa2af80.pub
+ sudo apt-get -y update
+ sudo apt-get -y install cuda
+
+
+
+ Create virtual environment (pipenv)
+
+ The following command creates a virtual environment using pipenv and will install
+ the dependencies using pipenv.
+
+ make setup
+
+
+
+
+
+ If you wish to use conda or another virtual environment, you can also
+ install the dependencies using the requirements.txt
{" "}
+ file.{" "}
+
+
+ pip install -r requirements.txt
+
+
+
+
+ Follow the steps below to install H2O LLM Studio on a Windows machine
+ using Windows Subsystem for Linux{" "}
+ WSL2
+
+
+ 1. Download the{" "}
+
+ latest nvidia driver
+ {" "}
+ for Windows.{" "}
+
+
+ 2. Open PowerShell or a Windows Command Prompt window in administrator
+ mode.{" "}
+
+
+ 3. Run the following command to confirm that the driver is installed
+ properly and see the driver version.
+
+ nvidia-smi
+
+
+
+ 4. Run the following command to install WSL2.
+
+ wsl --install
+
+
+ 5. Launch the WSL2 Ubuntu installation.
+
+ 6. Install the{" "}
+
+ WSL2 Nvidia Cuda Drivers
+
+ .
+
+
+ wget
+ https://developer.download.nvidia.com/compute/cuda/repos/wsl-ubuntu/x86_64/cuda-wsl-ubuntu.pin{" "}
+
+ sudo mv cuda-ubuntu2004.pin
+ /etc/apt/preferences.d/cuda-repository-pin-600
+ wget
+ https://developer.download.nvidia.com/compute/cuda/12.2.0/local_installers/cuda-repo-wsl-ubuntu-12-2-local_12.2.0-1_amd64.deb{" "}
+
+ sudo dpkg -i cuda-repo-wsl-ubuntu-12-2-local_12.2.0-1_amd64.deb
+ sudo cp /var/cuda-repo-wsl-ubuntu-12-2-local/cuda-*-keyring.gpg
+ /usr/share/keyrings/
+ sudo apt-get update
+ sudo apt-get -y install cuda
+
+
+
+
+ 7. Set up the required python system installs (Python 3.10).
+
+
+ sudo add-apt-repository ppa:deadsnakes/ppa
+ sudo apt install python3.10
+ sudo apt-get install python3.10-distutils
+ curl -sS https://bootstrap.pypa.io/get-pip.py | python3.10
+
+
+
+
+ 8. Create the virtual environment.
+
+
+ sudo apt install -y python3.10-venv
+ python3 -m venv llmstudio
+ source llmstudio/bin/activate
+
+
+
+
+ 9.Clone the H2O LLM Studio repository locally.
+
+
+ git clone https://github.com/h2oai/h2o-llmstudio.git
+ cd h2o-llmstudio
+
+
+
+
+ 10. Install H2O LLM Studio using the `requirements.txt`.
+
+ pip install -r requirements.txt
+
+
+
+ 11. Run the H2O LLM Studio application.
+
+
+ H2O_WAVE_MAX_REQUEST_SIZE=25MB \
+ H2O_WAVE_NO_LOG=True \
+ H2O_WAVE_PRIVATE_DIR="/download/@output/download" \
+ wave run llm_studio.app
+
+
+
+
+ This will start the H2O Wave server and the H2O LLM Studio app. Navigate
+ to http://localhost:10101/ (we recommend using Chrome) to access
+ H2O LLM Studio and start fine-tuning your models.
+
+
+
+:::
+
+## Install custom package
+
+If required, you can install additional Python packages into your environment. This can be done using pip after activating your virtual environment via `make shell`. For example, to install flash-attention, you would use the following commands:
+
+```bash
+make shell
+pip install flash-attn --no-build-isolation
+pip install git+https://github.com/HazyResearch/flash-attention.git#subdirectory=csrc/rotary
+```
+
+Alternatively, you can also directly install the custom package by running the following command.
+
+```bash
+pipenv install package_name
+```
+
+## Run H2O LLM Studio
+
+There are several ways to run H2O LLM Studio depending on your requirements.
+
+1. [Run H2O LLM Studio GUI](#run-h2o-llm-studio-gui)
+2. [Run using Docker](#run-using-docker)
+3. [Run by building your own Docker image](#run-by-building-your-own-docker-image)
+4. [Run with the CLI (command-line interface)](#run-with-command-line-interface-cli)
+
+### Run H2O LLM Studio GUI
+
+Run the following command to start the H2O LLM Studio.
+
+```sh
+make llmstudio
+```
+
+This will start the H2O Wave server and the H2O LLM Studio app. Navigate to [http://localhost:10101/](http://localhost:10101/) (we recommend using Chrome) to access H2O LLM Studio and start fine-tuning your models.
+
+
+
+If you are running H2O LLM Studio with a custom environment other than Pipenv, start the app as follows:
+
+```sh
+H2O_WAVE_MAX_REQUEST_SIZE=25MB \
+H2O_WAVE_NO_LOG=True \
+H2O_WAVE_PRIVATE_DIR="/download/@output/download" \
+wave run llm_studio.app
+```
+
+### Run using Docker
+
+First, install Docker by following the instructions from the [NVIDIA Container Installation Guide](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker). H2O LLM Studio images are stored in the `h2oai dockerhub` container repository.
+
+```sh
+mkdir -p `pwd`/llmstudio_mnt
+docker run \
+ --runtime=nvidia \
+ --shm-size=64g \
+ --init \
+ --rm \
+ -it \
+ -p 10101:10101 \
+ -v `pwd`/llmstudio_mnt:/home/llmstudio/mount \
+ -v ~/.cache:/home/llmstudio/.cache \
+ h2oairelease/h2oai-llmstudio-app:latest
+```
+
+Navigate to [http://localhost:10101/](http://localhost:10101/) (we recommend using Chrome) to access H2O LLM Studio and start fine-tuning your models.
+
+:::info
+Other helpful docker commands are `docker ps` and `docker kill`.
+:::
+
+### Run by building your own Docker image
+
+```sh
+docker build -t h2o-llmstudio .
+mkdir -p `pwd`/llmstudio_mnt
+docker run \
+ --runtime=nvidia \
+ --shm-size=64g \
+ --init \
+ --rm \
+ -it \
+ -p 10101:10101 \
+ -v `pwd`/llmstudio_mnt:/home/llmstudio/mount \
+ -v ~/.cache:/home/llmstudio/.cache \
+ h2o-llmstudio
+```
+
+### Run with command line interface (CLI)
+
+You can also use H2O LLM Studio with the command line interface (CLI) and specify the configuration .yaml file that contains all the experiment parameters. To finetune using H2O LLM Studio with CLI, activate the pipenv environment by running `make shell`.
+
+To specify the path to the configuration file that contains the experiment parameters, run:
+
+```sh
+python llm_studio/train.py -Y {path_to_config_yaml_file}
+```
+
+To run on multiple GPUs in DDP mode, run:
+
+```sh
+bash distributed_train.sh {NR_OF_GPUS} -Y {path_to_config_yaml_file}
+```
+
+:::info
+By default, the framework will run on the first `k` GPUs. If you want to specify specific GPUs to run on, use the `CUDA_VISIBLE_DEVICES` environment variable before the command.
+:::
+
+To start an interactive chat with your trained model, run:
+
+```sh
+python llm_studio/prompt.py -e {experiment_name}
+```
+
+`experiment_name` is the output folder of the experiment you want to chat with. The interactive chat will also work with models that were fine-tuned using the GUI.
diff --git a/documentation/docs/get-started/videos.md b/documentation/docs/get-started/videos.md
new file mode 100644
index 0000000000000000000000000000000000000000..7a97e74d3864c37d5bf1f2dc5e22db252c8743ab
--- /dev/null
+++ b/documentation/docs/get-started/videos.md
@@ -0,0 +1,49 @@
+---
+description: Learn from a collection of videos about LLM Studio.
+---
+import ReactPlayer from 'react-player'
+
+
+# Videos
+
+## Discovering the Potential of LLMs
+
+VIDEO
+
+
+:::info Note
+ In this video, Andreea Turcu delves in-depth into the world of language models, showcasing how users can use H2O.ai's LLM Studio to their full advantage.
+:::
+
+---
+
+## The Fine Art of Fine-Tuning Large Language Models
+
+VIDEO
+
+
+:::info Note
+ In this video, Pascal Pfeiffer, Principal Data Scientist at H2O.ai and Kaggle Grandmaster, announces the release of H2O LLM Studio and talks about fine-tuning LLMs using H2O LLM Studio at H2O World India 2023.
+:::
+
+---
+
+## Basic introduction to H2O LLM Studio
+
+VIDEO
+
+
+:::info Note
+ In this video, Avkash Chauhan, founder of Prodramp Inc, gives a basic introduction about H2O LLM Studio.
+:::
+
+----
+
+## LLM Fine-Tuning, Falcon 40b, and the State of Open-Source
+
+VIDEO
+
+
+:::info Note
+ In this video, Pascal Pfeiffer, the Principal Data Scientist at h2o.ai is interviewed about LLM fine-tuning, being a Kaggle Grandmaster, H2O.ai, Falcon 40b, the state of open-source, and more.
+:::
\ No newline at end of file
diff --git a/documentation/docs/get-started/what-is-h2o-llm-studio.md b/documentation/docs/get-started/what-is-h2o-llm-studio.md
new file mode 100644
index 0000000000000000000000000000000000000000..df8157e108ff237e060196f809355db0b6b92c6d
--- /dev/null
+++ b/documentation/docs/get-started/what-is-h2o-llm-studio.md
@@ -0,0 +1,16 @@
+---
+description: H2O LLM Studio is an open-source, no-code LLM graphical user interface (GUI) designed for fine-tuning state-of-the-art large language models.
+---
+# What is H2O LLM Studio?
+
+H2O LLM Studio is an open-source, no-code [LLM](../concepts#llm) graphical user interface (GUI) designed for fine-tuning state-of-the-art large language models.
+
+[Fine-tuning](../concepts#fine-tuning) a pretrained language model requires coding expertise and extensive knowledge about the model and its [hyperparameters](../concepts#parameters-and-hyperparameters), however H2O LLM Studio enables NLP practitioners to fine-tune their LLMs easily with no need for coding and better flexibility over customization.
+
+H2O LLM Studio also lets you chat with the fine-tuned model and receive instant feedback about model performance.
+
+## Who is H2O LLM Studio for?
+
+H2O LLM Studio is a free and open-source tool that is designed for anyone who wants to fine-tune their own language models. It is designed to be easy to use and accessible to everyone regardless of their technical expertise.
+
+NLP practitioners and data scientists in particular may find it useful to easily and effectively create and fine-tune large language models.
\ No newline at end of file
diff --git a/documentation/docs/guide/datasets/configure-dataset.png b/documentation/docs/guide/datasets/configure-dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..f58b3f835a442c565d5131801a03355d1a9bd77d
--- /dev/null
+++ b/documentation/docs/guide/datasets/configure-dataset.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:880e2f565656ab223605dcc8fe44e6397681951670977b11951293ae7931921b
+size 304806
diff --git a/documentation/docs/guide/datasets/data-connectors-format.md b/documentation/docs/guide/datasets/data-connectors-format.md
new file mode 100644
index 0000000000000000000000000000000000000000..f4a06454bf532a572466894eb9cbe6d62824bc1d
--- /dev/null
+++ b/documentation/docs/guide/datasets/data-connectors-format.md
@@ -0,0 +1,35 @@
+# Supported data connectors and format
+
+## Data connectors
+
+H2O LLM Studio supports the following data connectors to access or upload external data sources.
+
+- **Upload**: Upload a local dataset from your machine.
+- **Local**: Specify the file location of the dataset on your machine.
+- **AWS S3 (Amazon AWS S3)**: Connect to an Amazon AWS S3 data bucket.
+- **Azure Datalake**: Connect to a dataset in Azure Datalake.
+- **H2O Drive**: Upload a dataset from H2O Drive.
+- **Kaggle**: Connect to a dataset hosted on Kaggle.
+- **Hugging Face**: Connect to a dataset on Hugging Face.
+
+## Data format
+
+- Each data connector requires either a single `.csv` or `.pq` file, or the data to be in a `.zip` file for a successful import.
+
+- H2O LLM studio requires a `.csv` file with a minimum of two columns, where one contains the instructions and the other has the model’s expected output. You can also include an additional validation dataframe in the same format or allow for an automatic train/validation split to assess the model’s performance.
+
+- Optionally, a **Parent Id** can be used for training nested data prompts that are linked to a parent question.
+
+- During an experiment you can adapt the data representation with the following settings:
+ - **Prompt Column:** The column in the dataset containing the user prompt.
+ - **Answer Column:** The column in the dataset containing the expected output.
+ - **Parent Id Column:** An optional column specifying the parent id to be used for chained conversations. The value of this column needs to match an additional column with the name `id`. If provided, the prompt will be concatenated after preceding parent rows.
+ - **Id Column:** This column is only required when the `Parent Id` is set. It identifies the record ID used for linked conversations. The value in the `Parent Id` column must match this column's value.
+
+:::info
+To train a chatbot style model, you need to convert your data into a question and answer format. There are other enterprise solutions by H2O.ai that may help you prep your data. For more information, see [H2O.ai's Generative AI page](https://h2o.ai/) and this blogpost about [H2O LLM DataStudio: Streamlining Data Curation and Data Preparation for LLMs related tasks](https://h2o.ai/blog/2023/streamlining-data-preparation-for-fine-tuning-of-large-language-models/).
+
+## Example data
+
+H2O LLM Studio provides a sample dataset (converted dataset from [OpenAssistant/oasst2](https://huggingface.co/datasets/OpenAssistant/oasst2))
+that can be downloaded [here](https://www.kaggle.com/code/philippsinger/openassistant-conversations-dataset-oasst2?scriptVersionId=160485459). It is recommended to use `train_full.csv` for training. This dataset is also downloaded and prepared by default when first starting the GUI. Multiple dataframes can be uploaded into a single dataset by uploading a `.zip` archive.
\ No newline at end of file
diff --git a/documentation/docs/guide/datasets/import-dataset.md b/documentation/docs/guide/datasets/import-dataset.md
new file mode 100644
index 0000000000000000000000000000000000000000..d7b7b2aca046487ad83e85cf9d855d3a9b25229d
--- /dev/null
+++ b/documentation/docs/guide/datasets/import-dataset.md
@@ -0,0 +1,222 @@
+---
+description: H2O LLM Studio provides a number of data connectors to support importing data from local or external sources and requires your data to be in a certain format for successful importing of data.
+---
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+import Admonition from '@theme/Admonition';
+import upload_dataset from './upload-dataset.png';
+import upload_local_file from './upload-local-file.png';
+import import_s3_bucket from './import-s3-bucket.png';
+import import_kaggle_dataset from './import-kaggle-dataset.png';
+import import_h2odrive_dataset from './import-h2o-drive-dataset.png';
+import DatasetNameTooltip from '../../tooltips/experiments/_dataset-name.mdx';
+import ProblemTypeTooltip from '../../tooltips/experiments/_problem-type.mdx';
+import TrainDataframeTooltip from '../../tooltips/experiments/_train-dataframe.mdx';
+import ValidationDataframeTooltip from '../../tooltips/experiments/_validation-dataframe.mdx';
+import SystemColumnTooltip from '../../tooltips/experiments/_system-column.mdx';
+import PromptColumnTooltip from '../../tooltips/experiments/_prompt-column.mdx';
+import RejectedPromptColumnTooltip from '../../tooltips/experiments/_rejected-prompt-column.mdx';
+import AnswerColumnTooltip from '../../tooltips/experiments/_answer-column.mdx';
+import RejectedAnswerColumnTooltip from '../../tooltips/experiments/_rejected-answer-column.mdx';
+import ParentIdColumnTooltip from '../../tooltips/experiments/_parent-id-column.mdx';
+
+# Import a dataset
+
+H2O LLM Studio provides a number of data connectors to support importing data from local or external sources and requires your data to be in a certain format for successful importing of data.
+
+For more information, see [Supported data connectors and format](data-connectors-format).
+
+## Import data
+
+Follow the relevant steps below to import a dataset to H2O LLM Studio.
+
+1. On the H2O LLM Studio left-navigation pane, click **Import dataset**.
+2. Select the relevant **Source** (data connector) that you want to use from the dropdown list .
+ :::note Data sources
+
+
+
+
+ Drag and drop the file, or click Browse and select the file you want to upload.
+
+
+ Click Upload .
+
+
+
+
+
+
+
+ Enter the file path as the File Location or select the relevant local directory that the dataset is located in.
+
+
+ Click Continue .
+
+
+
+
+
+
+
+ Enter values for the following fields:
+
+
+ S3 bucket name:
+ The name of the S3 bucket including the reletive file paths.
+
+
+ AWS access key:
+ The access key associated with your S3 bucket. This field is optional. If the S3 bucket is public, you can leave this empty for anonymous access.
+
+
+ AWS access secret:
+ The access secret associated with your S3 bucket. This field is optional. If the S3 bucket is public, you can leave this empty for anonymous access.
+
+
+ File name:
+ Enter the file name of the dataset that you want to import.
+
+
+
+
+
+ Click Continue .
+
+
+
+
+
+
+
+ Enter values for the following fields:
+
+
+ Datalake connection string:
+ Enter your Azure connection string to connect to Datalake storage.
+
+
+ Datalake container name:
+ Enter the name of the Azure Data Lake container where your dataset is stored, including the relative path to the file within the container.
+
+
+ File name:
+ Specify the exact name of the file you want to import.
+
+
+
+
+ Click Continue .
+
+
+
+
+
+
+ Select the dataset you want to upload from the list of datasets in H2O Drive.
+
+
+ Click Continue .
+
+
+
+
+
+
+
+ Enter values for the following fields:
+
+
+ Kaggle API command:
+ Enter the Kaggle API command that you want to execute.
+
+
+ Kaggle username:
+ Your Kaggle username for API authentication
+
+
+ Kaggle secret key:
+ Your Kaggle secret key for API authentication.
+
+
+
+
+ Click Continue .
+
+
+
+
+
+
+
+ Enter values for the following fields:
+
+
+ Hugging Face dataset:
+ Enter the name of the Hugging Face dataset.
+
+
+ Split:
+ Enter the specific data split you want to import (e.g., "train", "test").
+
+
+ Hugging Face API token (optional):
+ Enter your Hugging Face API token to authenticate access to private datasets or datasets with gated access.
+
+
+
+
+ Click Continue .
+
+
+
+
+ :::
+
+## Configure dataset
+
+Once you have successfully uploaded or imported your dataset, you can configure the dataset settings. Depending on the problem type, you may need to specify the following:
+
+:::info Tip
+You can upload a `.zip` file with both training and validation sets to avoid having to separately upload files.
+:::
+
+- **Dataset name:**
+
+- **Problem Type:**
+
+- **Train Dataframe:**
+
+- **Validation Dataframe:**
+
+- **System Column:**
+
+- **Prompt Column:**
+
+- **Rejected Prompt Column:**
+
+- #### **Answer Column:**
+
+
+
+- **Rejected Answer Column:**
+
+- **Parent Id Column:**
+
+
+
+## Data validity check
+
+H2O LLM Studio will provide a preview of the dataset input (sample questions) and output (sample answers) according to the content of the imported dataset. Review the text to ensure that the input and output is as intended, and then click **Continue**.
+
+## View dataset
+
+You will now be redirected to the **View datasets** screen. You should be able to see the dataset you just imported listed on the screen.
+
+
+
+For more information about viewing dataset summary and statistics, see [View and manage datasets](view-dataset)
\ No newline at end of file
diff --git a/documentation/docs/guide/datasets/import-h2o-drive-dataset.png b/documentation/docs/guide/datasets/import-h2o-drive-dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..8aa0b79925de2816124cde72e07926d82807b554
--- /dev/null
+++ b/documentation/docs/guide/datasets/import-h2o-drive-dataset.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:eb68dae92d6765662f07ffc5147a818a9f4498ad5fad17302c115db9bdae6602
+size 186513
diff --git a/documentation/docs/guide/datasets/import-kaggle-dataset.png b/documentation/docs/guide/datasets/import-kaggle-dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..ad6712b74ea7a318db05bca61a42b2c0aae40ffa
--- /dev/null
+++ b/documentation/docs/guide/datasets/import-kaggle-dataset.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0ac0f882fb5b6349167d192e536557a397f9781c3fd0bfb7c8f0b42eddfb9372
+size 265330
diff --git a/documentation/docs/guide/datasets/import-s3-bucket.png b/documentation/docs/guide/datasets/import-s3-bucket.png
new file mode 100644
index 0000000000000000000000000000000000000000..e0bb8c07e36bdf8cc1a844c868412b953ec40149
--- /dev/null
+++ b/documentation/docs/guide/datasets/import-s3-bucket.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0ba8a02c48a197ca7fb77a6ec6d27cb392b52e47ae15e52e37b7844df5bf1cc6
+size 290319
diff --git a/documentation/docs/guide/datasets/merge-datasets.md b/documentation/docs/guide/datasets/merge-datasets.md
new file mode 100644
index 0000000000000000000000000000000000000000..39b99ea7edb528e1ece17820e60407f7eadabc07
--- /dev/null
+++ b/documentation/docs/guide/datasets/merge-datasets.md
@@ -0,0 +1,34 @@
+---
+description: H2O LLM Studio enables you to merge imported datasets into one main dataset. This functionality can be used to merge training and validation data together into one dataset or extend your existing dataset with more data and increase your dataset size.
+---
+import Icon from "@material-ui/core/Icon";
+
+# Merge datasets
+
+H2O LLM Studio enables you to merge imported datasets into one main dataset. This functionality can be used to merge training and validation data together into one dataset or extend your existing dataset with more data and increase your dataset size.
+
+:::info
+H2O LLM Studio does not merge dataset files in the sense that rows are combined, and duplicate rows are removed. "Merge", in this case, refers to bringing the dataset files a dataset might have to a single dataset (another dataset), continuing other dataset files already.
+:::
+
+Generally, you might want to merge datasets in H2O LLM Studio to have both the training data .csv and validation data .csv in one final dataset.
+
+1. On the H2O LLM Studio left-navigation pane, click **View datasets**.
+2. Click the more_vert Kebab menu of the dataset you want to merge with.
+3. Click **Edit dataset**.
+4. Click **Merge with existing dataset**.
+5. Select the dataset you want that you want to merge with.
+ 
+6. Click **Merge**.
+7. Adjust the dataset configuration if needed. For more information about the configurations, see [Configure dataset](./import-dataset#configure-dataset).
+8. Click **Continue**.
+9. Review the text to ensure that the input and output is as intended, and then click **Continue**.
+
+Your datasets are now merged.
+
+:::info
+Alternatively, you can also merge datasets at the point of [importing a dataset](./import-dataset) or combine both datasets (.csv files) into a `.zip` file before uploading it as a whole dataset.
+:::
+
+
+
diff --git a/documentation/docs/guide/datasets/merge-datasets.png b/documentation/docs/guide/datasets/merge-datasets.png
new file mode 100644
index 0000000000000000000000000000000000000000..03f7a9db5277bffe5d00f8e9b5e80070417f613b
--- /dev/null
+++ b/documentation/docs/guide/datasets/merge-datasets.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:582e3ab3bba086c5a687900a27f63494d77ced33b2149137a7bf027ce71311ea
+size 250653
diff --git a/documentation/docs/guide/datasets/upload-dataset.png b/documentation/docs/guide/datasets/upload-dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..5e18909d57bf2ae400e1ab62b77a537cb760536e
--- /dev/null
+++ b/documentation/docs/guide/datasets/upload-dataset.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8bb0a28d5187770c0cb463fb54af8a48e0c9d3234b9e967412687695cf95ca1b
+size 261792
diff --git a/documentation/docs/guide/datasets/upload-local-file.png b/documentation/docs/guide/datasets/upload-local-file.png
new file mode 100644
index 0000000000000000000000000000000000000000..fe000d9474d704d893f7ec737a5163483871c6de
--- /dev/null
+++ b/documentation/docs/guide/datasets/upload-local-file.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2801c27c876346d14b2ddd1c1bc7efb965221b29d211ab987cc92238993e3acc
+size 245249
diff --git a/documentation/docs/guide/datasets/view-dataset.md b/documentation/docs/guide/datasets/view-dataset.md
new file mode 100644
index 0000000000000000000000000000000000000000..e0814ebae5510e252cb028098796892c74602b08
--- /dev/null
+++ b/documentation/docs/guide/datasets/view-dataset.md
@@ -0,0 +1,74 @@
+---
+description: You can view, review, edit, or delete your datasets once you have imported them. You can also start a new experiment using a dataset you have imported.
+---
+import Icon from "@material-ui/core/Icon";
+
+# View and manage dataset
+
+You can view, review, edit, or delete your datasets once you have imported them. You can also start a new experiment using a dataset you have imported.
+
+## View a dataset
+
+To view an imported dataset:
+
+1. On the H2O LLM Studio left-navigation pane, click **View datasets**.
+
+2. You will see the datasets table with a list of all the datasets you have imported so far. Click the name of the dataset that you want to view.
+
+ 
+
+ :::info
+ For more information about the dataset details you see on the table above, see [dataset configurations](import-dataset.md#configure-a-dataset).
+ :::
+
+## Dataset tabs
+
+You will see the following tabs that provide details and different aspects of your dataset.
+
+- **Sample train data** : This tab contains sample training data from the imported dataset.
+
+- **Sample train visualization:** This tab visualizes a few sample training data from the imported dataset in a question-answer format; simulating the way the chatbot would answer questions based on the training data.
+
+- **Train data statistics:** This tab contains metrics about the training data (e.g., unique values) from the imported dataset.
+
+- **Summary:** This tab contains the following details about the dataset.
+
+ | Name | Description |
+ | ----------- | ------------------------------------ |
+ | **Name** | Name of the dataset. |
+ | **Problem type** | Problem type of the dataset. |
+ | **Train dataframe** | Name of the training dataframe in the imported dataset. An imported dataset can contain train, test, and validation dataframes. |
+ | **Train rows** | The number of rows the train dataframe contains. |
+ | **Validation dataframe** | Name of the validation dataframe in the imported dataset. An imported dataset can contain train, test, and validation dataframes. |
+ | **Validation rows** | The number of rows the validation dataframe contains. |
+ | **Labels** | The labels the imported dataset contains. |
+
+
+## Edit a dataset
+
+To edit an imported dataset,
+
+1. On the H2O LLM Studio left-navigation pane, click **View datasets**. You will see the datasets table with a list of all the datasets you have imported so far.
+2. Locate the row of the dataset you want to edit and click the more_vert Kebab menu.
+3. Select **Edit dataset**.
+4. Make the desired changes to the dataset configuration. You can also [merge the dataset with an existing dataset](merge-datasets) at this point.
+5. Click **Continue** and review the dataset with your changes.
+
+
+
+## Delete a dataset
+
+When a dataset is no longer needed, you can delete it. Deleted datasets are permanently removed from the H2O LLM Studio instance.
+
+:::caution
+You can only delete datasets that are not linked to any experiments. If you wish to delete a dataset that is linked to an experiment, first [delete the experiment](../experiments/view-an-experiment#delete-an-experiment), and then delete the dataset.
+:::
+
+1. On the H2O LLM Studio left-navigation pane, click **View datasets**.
+2. Click **Delete datasets**.
+3. Select the dataset(s) that you want to delete.
+4. Click **Delete** to confirm deletion.
\ No newline at end of file
diff --git a/documentation/docs/guide/datasets/view-imported-dataset.png b/documentation/docs/guide/datasets/view-imported-dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..587d3ed89305d808f0d21845ea47560617dc24bf
--- /dev/null
+++ b/documentation/docs/guide/datasets/view-imported-dataset.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6325e4fddcb76ad47773b4e2d87c98fc8ea89ac0c98241f07edd74f1a812ac25
+size 284768
diff --git a/documentation/docs/guide/experiments/best-validation-sample.png b/documentation/docs/guide/experiments/best-validation-sample.png
new file mode 100644
index 0000000000000000000000000000000000000000..427fc2fbfb81d2b87ac45ca29b8290cae2ece4bb
--- /dev/null
+++ b/documentation/docs/guide/experiments/best-validation-sample.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8633b12aa84077dee61548aa23c0b6dfc7f2ee447452acf78b4919254c6b0eef
+size 200806
diff --git a/documentation/docs/guide/experiments/charts-tab.png b/documentation/docs/guide/experiments/charts-tab.png
new file mode 100644
index 0000000000000000000000000000000000000000..7e3bbb77cc1a451d80e233d1b1a68d6920cdf67a
--- /dev/null
+++ b/documentation/docs/guide/experiments/charts-tab.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3b5e59e50aba8960ed0f304067e6eacc753c9985402d7d2d788cc068f26ce9f5
+size 233767
diff --git a/documentation/docs/guide/experiments/chat-tab.png b/documentation/docs/guide/experiments/chat-tab.png
new file mode 100644
index 0000000000000000000000000000000000000000..11da8345406198fcb43481ad6b0f1fbeed790f03
--- /dev/null
+++ b/documentation/docs/guide/experiments/chat-tab.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dcd8a9acec8596d9208a7252c116a33f61f6fd5e4b2dec6b88d1d387df136fc3
+size 177861
diff --git a/documentation/docs/guide/experiments/compare-experiments.md b/documentation/docs/guide/experiments/compare-experiments.md
new file mode 100644
index 0000000000000000000000000000000000000000..47a9512c300c037cf578e6a7860c4c35c5ee4ca2
--- /dev/null
+++ b/documentation/docs/guide/experiments/compare-experiments.md
@@ -0,0 +1,21 @@
+---
+description: Using H2O LLM Studio, you can compare experiments and analyze how different model parameters affect model performance.
+---
+# Compare experiments
+
+Using H2O LLM Studio, you can compare experiments and analyze how different model parameters affect model performance.
+
+Follow the relevant steps below to compare experiments in H2O LLM Studio.
+
+1. On the H2O LLM Studio left-navigation pane, click **View experiments**.
+2. Click **Compare experiments**.
+3. Select the experiments you want to compare.
+4. Click **Compare experiments**.
+
+ 
+
+ The **Charts** tab visually represents the comparison of train/validation loss, metrics, and learning rate of selected experiments. The **Config** tab compares the configuration settings of selected experiments.
+
+:::info note
+In addition, H2O LLM Studio also integrates with [Neptune](https://neptune.ai/) and [W&B](https://wandb.ai/), two powerful experiment tracking platforms. By enabling Neptune or W&B logging when starting an experiment, you can easily track and visualize all aspects of your experiment in real time. This includes model performance, hyperparameter tuning, and other relevant metrics.
+:::
\ No newline at end of file
diff --git a/documentation/docs/guide/experiments/compare-experiments.png b/documentation/docs/guide/experiments/compare-experiments.png
new file mode 100644
index 0000000000000000000000000000000000000000..35fd20c3e8342d4092b0fb1c4aafc482f678cf4a
--- /dev/null
+++ b/documentation/docs/guide/experiments/compare-experiments.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:43b80e769ca48b1a71dc9a5c4d7a17c0050bacb98bbb745b182dfff3078a7681
+size 257127
diff --git a/documentation/docs/guide/experiments/create-an-experiment.md b/documentation/docs/guide/experiments/create-an-experiment.md
new file mode 100644
index 0000000000000000000000000000000000000000..87bae9a6d0f0e55cd01ed5e6632f730b58c08c38
--- /dev/null
+++ b/documentation/docs/guide/experiments/create-an-experiment.md
@@ -0,0 +1,68 @@
+---
+description: This page highlights the steps needed to create an experiment in H2O LLM Studio.
+---
+import Icon from "@material-ui/core/Icon";
+
+# Create an experiment
+
+Follow the relevant steps below to create an experiment in H2O LLM Studio.
+
+1. On the H2O LLM Studio left-navigation pane, click **Create experiment**. Alternatively, you can click **New experiment** on the more_vert Kebab menu of the [View datasets](../datasets/view-dataset.md) page.
+
+2. Select the **Dataset** you want to use to fine-tune an LLM model.
+
+3. Select the **Problem type**.
+
+4. Provide a meaningful **Experiment name**.
+
+5. Define the parameters. The most important parameters are:
+ - **LLM Backbone**: This parameter determines the LLM architecture to use. It is the foundation model that you continue training. H2O LLM Studio has a predefined list of recommended foundation models available in the dropdown list. You can also type in the name of a [Hugging Face model](https://huggingface.co/models) that is not in the list, for example: `h2oai/h2o-danube2-1.8b-sft` or the path of a local folder that has the model you would like to fine-tune.
+ - **Mask Prompt Labels**: This option controls whether to mask the prompt labels during training and only train on the loss of the answer.
+ - Hyperparameters such as **Learning rate**, **Batch size**, and number of epochs determine the training process. You can refer to the tooltips that are shown next to each hyperparameter in the GUI to learn more about them.
+ - **Evaluate Before Training**: This option lets you evaluate the model before training, which can help you judge the quality of the LLM backbone before fine-tuning.
+
+ H2O LLM Studio provides several metric options for evaluating the performance of your model. In addition to the BLEU score, H2O LLM Studio also offers the GPT3.5 and GPT4 metrics that utilize the OpenAI API to determine whether the predicted answer is more favorable than the ground truth answer. To use these metrics, you can either export your OpenAI API key as an environment variable before starting LLM Studio, or you can specify it in the **Settings** menu within the UI.
+
+ :::info note
+ H2O LLM Studio provides an overview of all the parameters you need to specify for your experiment. The default settings are suitable when you first start an experiment. To learn more about the parameters, see [Experiment settings](experiment-settings.md).
+ :::
+
+6. Click **Run experiment**.
+
+ 
+
+## Run an experiment on the OASST data via CLI
+
+The steps below provide an example of how to to run an experiment on [OASST](https://huggingface.co/OpenAssistant) data via the command line interface (CLI).
+
+1. Get the training dataset (`train_full.csv`), [OpenAssistant Conversations Dataset OASST2](https://www.kaggle.com/code/philippsinger/openassistant-conversations-dataset-oasst2?scriptVersionId=160485459) and place it into the `examples/data_oasst2` folder; or download it directly using the [Kaggle API](https://www.kaggle.com/docs/api) command given below.
+
+ ```bash
+ kaggle kernels output philippsinger/openassistant-conversations-dataset-oasst2 -p examples/data_oasst2/
+ ```
+
+2. Go into the interactive shell or open a new terminal window. Install the dependencies first, if you have not installed them already.
+
+ ```bash
+ make setup # installs all dependencies
+ make shell
+ ```
+
+3. Run the following command to run the experiment.
+
+ ```bash
+ python llm_studio/train.py -Y examples/example_oasst2.yaml
+ ```
+
+After the experiment is completed, you can find all output artifacts in the `examples/output_oasst2` folder.
+You can then use the `prompt.py` script to chat with your model.
+
+```bash
+python llm_studio/prompt.py -e examples/output_oasst2
+```
+
+4. To publish the model to Hugging Face, use the following command:
+
+ ```bash
+ python llm_studio/publish_to_hugging_face.py -p {path_to_experiment} -d {device} -a {api_key} -u {user_id} -m {model_name} -s {safe_serialization}
+ ```
diff --git a/documentation/docs/guide/experiments/delete-experiment.png b/documentation/docs/guide/experiments/delete-experiment.png
new file mode 100644
index 0000000000000000000000000000000000000000..841cf2414f98cac7e0afab46c00c9a9dd218683c
--- /dev/null
+++ b/documentation/docs/guide/experiments/delete-experiment.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:86e59f8f17d396c994f72984e613be56a1a287684aa160e29b5199594e9b1c6d
+size 252894
diff --git a/documentation/docs/guide/experiments/evaluate-model-using-llm.md b/documentation/docs/guide/experiments/evaluate-model-using-llm.md
new file mode 100644
index 0000000000000000000000000000000000000000..946a13878ed0f0831d20900dd3b3a4db0f78b19c
--- /dev/null
+++ b/documentation/docs/guide/experiments/evaluate-model-using-llm.md
@@ -0,0 +1,29 @@
+# Evaluate model using an AI judge
+
+H2O LLM Studio provides the option to use an AI Judge like ChatGPT or a local LLM deployment to evaluate a fine-tuned model.
+
+Follow the instructions below to specify a local LLM to evaluate the responses of the fine-tuned model.
+
+1. Have an endpoint running of the local LLM deployment, which supports the OpenAI API format; specifically the [Chat Completions API](https://platform.openai.com/docs/guides/text-generation/chat-completions-api).
+
+2. Start the H2O LLM Studio server with the following environment variable that points to the endpoint.
+ ```
+ OPENAI_API_BASE="http://111.111.111.111:8000/v1"
+ ```
+
+3. Once H2O LLM Studio is up and running, click **Settings** on the left navigation panel to validate that the endpoint is being used correctly. The **Use OpenAI API on Azure** setting must be set to Off, and the environment variable that was set above should be the **OpenAI API Endpoint** value as shown below.
+ 
+
+ :::info
+ Note that changing the value of this field here on the GUI has no effect. This is only for testing the correct setting of the environment variable.
+ :::
+
+4. Run an experiment using `GPT` as the **Metric** and the relevant model name available at your endpoint as the **Metric Gpt Model**.
+ 
+
+5. Validate that it is working as intended by checking the logs. Calls to the LLM judge should now be directed to your own LLM endpoint.
+ 
+
+
+
+
diff --git a/documentation/docs/guide/experiments/experiment-settings.md b/documentation/docs/guide/experiments/experiment-settings.md
new file mode 100644
index 0000000000000000000000000000000000000000..fea7364908910d38ea92a062ac5209e78716e9b6
--- /dev/null
+++ b/documentation/docs/guide/experiments/experiment-settings.md
@@ -0,0 +1,468 @@
+---
+description: All the settings needed for creating an experiment are explored in this page.
+---
+import GeneralSettingsDataset from '../../tooltips/experiments/_dataset.mdx';
+import GeneralSettingsProblemType from '../../tooltips/experiments/_problem-type.mdx';
+import GSImportConfigFromYaml from '../../tooltips/experiments/_import-config-from-yaml.mdx';
+import GSExperimentName from '../../tooltips/experiments/_experiment-name.mdx';
+import GSLLMBackbone from '../../tooltips/experiments/_llm-backbone.mdx';
+import DSTrainDataframe from '../../tooltips/experiments/_train-dataframe.mdx';
+import DSvalidationStrategy from '../../tooltips/experiments/_validation-strategy.mdx';
+import DSvalidationSize from '../../tooltips/experiments/_validation-size.mdx';
+import DSdataSample from '../../tooltips/experiments/_data-sample.mdx';
+import DSpromptColumn from '../../tooltips/experiments/_prompt-column.mdx';
+import DSPromptColumnSeparator from '../../tooltips/experiments/_prompt-column-separator.mdx';
+import DSsystemColumn from '../../tooltips/experiments/_system-column.mdx';
+import DSanswerColumn from '../../tooltips/experiments/_answer-column.mdx';
+import DSparentIdColumn from '../../tooltips/experiments/_parent-id-column.mdx';
+import DStextPromptStart from '../../tooltips/experiments/_text-prompt-start.mdx';
+import DStextAnswerSeparator from '../../tooltips/experiments/_text-answer-separator.mdx';
+import DSaddEosTokentoprompt from '../../tooltips/experiments/_add-eos-token-to-prompt.mdx';
+import DSaddEosTokentoanswer from '../../tooltips/experiments/_add-eos-token-to-answer.mdx';
+import DSmaskPromptlabels from '../../tooltips/experiments/_mask-prompt-labels.mdx';
+import TSmaxLength from '../../tooltips/experiments/_max-length.mdx';
+import TSaddpromptanswertokens from '../../tooltips/experiments/_add-prompt-answer-tokens.mdx';
+import TSpaddingQuantile from '../../tooltips/experiments/_padding-quantile.mdx';
+import ASBackboneDtype from '../../tooltips/experiments/_backbone-dtype.mdx';
+import ASGradientcheckpointing from '../../tooltips/experiments/_gradient-checkpointing.mdx';
+import ASintermediateDropout from '../../tooltips/experiments/_intermediate-dropout.mdx';
+import ASpretrainedWeights from '../../tooltips/experiments/_pretrained-weights.mdx';
+import TSoptimizer from '../../tooltips/experiments/_optimizer.mdx';
+import TSlossfunction from '../../tooltips/experiments/_loss-function.mdx';
+import TSlearningRate from '../../tooltips/experiments/_learning-rate.mdx';
+import TSdifferentialLearningRateLayers from '../../tooltips/experiments/_differential-learning-rate-layers.mdx';
+import TSfreezeLayers from '../../tooltips/experiments/_freeze-layers.mdx';
+import TSattentionImplementation from '../../tooltips/experiments/_attention-implementation.mdx';
+import TSbatchSize from '../../tooltips/experiments/_batch-size.mdx';
+import TSepochs from '../../tooltips/experiments/_epochs.mdx';
+import TSschedule from '../../tooltips/experiments/_schedule.mdx';
+import TSminLearningRateRatio from '../../tooltips/experiments/_min-learning-rate-ratio.mdx';
+import TSwarmupEpochs from '../../tooltips/experiments/_warmup-epochs.mdx';
+import TSweightDecay from '../../tooltips/experiments/_weight-decay.mdx';
+import TSGradientclip from '../../tooltips/experiments/_gradient-clip.mdx';
+import TSgradAccumulation from '../../tooltips/experiments/_grad-accumulation.mdx';
+import TSlora from '../../tooltips/experiments/_lora.mdx';
+import TSuseDora from '../../tooltips/experiments/_use-dora.mdx';
+import TSloraR from '../../tooltips/experiments/_lora-r.mdx';
+import TSloraAlpha from '../../tooltips/experiments/_lora-alpha.mdx';
+import TSloraDropout from '../../tooltips/experiments/_lora-dropout.mdx';
+import TSuseRSlora from '../../tooltips/experiments/_use-rslora.mdx';
+import TSloraTargetModules from '../../tooltips/experiments/_lora-target-modules.mdx';
+import TSloraUnfreezeLayers from '../../tooltips/experiments/_lora-unfreeze-layers.mdx';
+import TSsavecheckpoint from '../../tooltips/experiments/_save-checkpoint.mdx';
+import TSevaluationepochs from '../../tooltips/experiments/_evaluation-epochs.mdx';
+import TSevaluationbeforetraining from '../../tooltips/experiments/_evaluate-before-training.mdx';
+import TStrainvalidationdata from '../../tooltips/experiments/_train-validation-data.mdx';
+import AStokenmaskprobability from '../../tooltips/experiments/_token-mask-probability.mdx';
+import ASskipParentprobability from '../../tooltips/experiments/_skip-parent-probability.mdx';
+import ASrandomparentprobability from '../../tooltips/experiments/_random-parent-probability.mdx';
+import ASneftunenoisealpha from '../../tooltips/experiments/_neftune_noise_alpha.mdx';
+import PSmetric from '../../tooltips/experiments/_metric.mdx';
+import PSmetricgptmodel from '../../tooltips/experiments/_metric-gpt-model.mdx';
+import PSmetricgpttemplate from '../../tooltips/experiments/_metric-gpt-template.mdx';
+import PSminlengthinference from '../../tooltips/experiments/_min-length-inference.mdx';
+import PSmaxlengthinference from '../../tooltips/experiments/_max-length-inference.mdx';
+import PSbatchsizeinference from '../../tooltips/experiments/_batch-size-inference.mdx';
+import PSdosample from '../../tooltips/experiments/_do-sample.mdx';
+import PSnumbeams from '../../tooltips/experiments/_num-beams.mdx';
+import PStemperature from '../../tooltips/experiments/_temperature.mdx';
+import PSrepetitionpenalty from '../../tooltips/experiments/_repetition-penalty.mdx';
+import PSstoptokens from '../../tooltips/experiments/_stop-tokens.mdx';
+import PStopk from '../../tooltips/experiments/_top-k.mdx';
+import PStopp from '../../tooltips/experiments/_top-p.mdx';
+import ESgpus from '../../tooltips/experiments/_gpus.mdx';
+import ESmixedprecision from '../../tooltips/experiments/_mixed-precision.mdx';
+import EScompilemodel from '../../tooltips/experiments/_compile-model.mdx';
+import ESfindunusedparameters from '../../tooltips/experiments/_find-unused-parameters.mdx';
+import EStrustremotecode from '../../tooltips/experiments/_trust-remote-code.mdx';
+import EShuggingfacebranch from '../../tooltips/experiments/_huggingface-branch.mdx';
+import ESnumofworkers from '../../tooltips/experiments/_number-of-workers.mdx';
+import ESseed from '../../tooltips/experiments/_seed.mdx';
+import LSlogstepsize from '../../tooltips/experiments/_log-step-size.mdx';
+import LSlogallranks from '../../tooltips/experiments/_log-all-ranks.mdx';
+import LSlogger from '../../tooltips/experiments/_logger.mdx';
+import LSneptuneproject from '../../tooltips/experiments/_neptune-project.mdx';
+import LSwandbproject from '../../tooltips/experiments/_wandb-project.mdx';
+import LSwandbentity from '../../tooltips/experiments/_wandb-entity.mdx';
+import NumClasses from '../../tooltips/experiments/_num-classes.mdx';
+
+# Experiment settings
+
+The settings for creating an experiment are grouped into the following sections:
+ - [General settings](#general-settings)
+ - [Dataset settings](#dataset-settings)
+ - [Tokenizer settings](#tokenizer-settings)
+ - [Architecture settings](#architecture-settings)
+ - [Training settings](#training-settings)
+ - [Augmentation settings](#augmentation-settings)
+ - [Prediction settings](#prediction-settings)
+ - [Environment settings](#environment-settings)
+ - [Logging settings](#logging-settings)
+
+The settings under each category are listed and described below.
+
+## General settings
+
+### Dataset
+
+
+
+### Problem type
+
+
+
+### Import config from YAML
+
+
+
+### Experiment name
+
+
+
+### LLM backbone
+
+
+
+## Dataset settings
+
+### Train dataframe
+
+
+
+### Validation strategy
+
+
+
+### Validation size
+
+
+
+### Data sample
+
+
+
+### System column
+
+
+
+### Prompt column
+
+
+
+### Prompt column separator
+
+
+
+### Answer column
+
+
+
+### Parent ID column
+
+
+
+### ID column
+
+
+
+### Text prompt start
+
+
+
+### Text answer separator
+
+
+
+### Add EOS token to prompt
+
+
+
+### Add EOS token to answer
+
+
+
+### Mask prompt labels
+
+
+
+### Num classes
+
+
+
+The **Num classes** field should be set to the total number of classes in the [answer column](../datasets/import-dataset.md#answer-column) of the dataset.
+
+## Tokenizer settings
+
+### Max length
+
+
+
+### Add prompt answer tokens
+
+
+
+### Padding quantile
+
+
+
+## Architecture settings
+
+### Backbone Dtype
+
+
+
+### Gradient Checkpointing
+
+
+
+### Intermediate dropout
+
+
+
+### Pretrained weights
+
+
+
+## Training settings
+
+### Loss function
+
+
+
+For multiclass classification problems, set the loss function to **Cross-entropy**.
+
+### Optimizer
+
+
+
+### Learning rate
+
+
+
+### Differential learning rate layers
+
+
+
+By default, H2O LLM Studio applies **Differential learning rate Layers**, with the learning rate for the `classification_head` being 10 times smaller than the learning rate for the rest of the model.
+
+### Freeze layers
+
+
+
+### Attention Implementation
+
+
+
+### Batch size
+
+
+
+### Epochs
+
+
+
+### Schedule
+
+
+
+### Min Learning Rate Ratio
+
+
+
+### Warmup epochs
+
+
+
+### Weight decay
+
+
+
+### Gradient clip
+
+
+
+### Grad accumulation
+
+
+
+### Lora
+
+
+
+### Use Dora
+
+
+
+### Lora R
+
+
+
+### Lora Alpha
+
+
+
+### Lora dropout
+
+
+
+### Use RS Lora
+
+
+
+### Lora target modules
+
+
+
+### Lora unfreeze layers
+
+
+
+### Save checkpoint
+
+
+
+### Evaluation epochs
+
+
+
+### Evaluate before training
+
+
+
+### Train validation data
+
+
+
+## Augmentation settings
+
+### Token mask probability
+
+
+
+### Skip parent probability
+
+
+
+### Random parent probability
+
+
+
+### Neftune noise alpha
+
+
+
+## Prediction settings
+
+### Metric
+
+
+
+### Metric GPT model
+
+
+
+### Metric GPT template
+
+
+
+### Min length inference
+
+
+
+### Max length inference
+
+
+
+### Batch size inference
+
+
+
+### Do sample
+
+
+
+### Num beams
+
+
+
+### Temperature
+
+
+
+### Repetition penalty
+
+
+
+### Stop tokens
+
+
+
+### Top K
+
+
+
+### Top P
+
+
+
+## Environment settings
+
+### GPUs
+
+
+
+### Mixed precision
+
+
+
+### Compile model
+
+
+
+### Find unused parameters
+
+
+
+### Trust remote code
+
+
+
+### Hugging Face branch
+
+
+
+### Number of workers
+
+
+
+### Seed
+
+
+
+## Logging settings
+
+### Log step size
+
+
+
+### Log all ranks
+
+
+
+### Logger
+
+
+
+### Neptune project
+
+
+
+### W&B project
+
+
+
+### W&B entity
+
+
+
+
diff --git a/documentation/docs/guide/experiments/export-model-to-huggingface.png b/documentation/docs/guide/experiments/export-model-to-huggingface.png
new file mode 100644
index 0000000000000000000000000000000000000000..8448b78c55dba66b657b88558ec11057a0f95944
--- /dev/null
+++ b/documentation/docs/guide/experiments/export-model-to-huggingface.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8c3d1bbaf69ae3d70eb064a57e65a7141731326104d3e9e1c1a22f1849d880b7
+size 305567
diff --git a/documentation/docs/guide/experiments/export-trained-model.md b/documentation/docs/guide/experiments/export-trained-model.md
new file mode 100644
index 0000000000000000000000000000000000000000..210b3fe02d818faf88c7bbc332b0a339cf4fbd3f
--- /dev/null
+++ b/documentation/docs/guide/experiments/export-trained-model.md
@@ -0,0 +1,61 @@
+---
+description: If you are ready to share your trained model with a broader community, H2O LLM Studio allows you to export the fine-tuned model to Hugging Face Hub with a single click.
+---
+# Publish model to HuggingFace
+
+If you are ready to share your trained model with a broader community, H2O LLM Studio allows you to export the fine-tuned model to [Hugging Face](https://huggingface.co/) with a single click.
+
+:::info note
+Before publishing your model to the Hugging Face Hub, you need to have an API key with write access. To obtain an API token with write access, follow the [instructions provided by Hugging Face](https://huggingface.co/docs/hub/security-tokens), which involve creating an account, logging in, and generating an access token with the appropriate permission.
+:::
+
+To publish a trained model to Hugging Face Hub:
+
+1. On the H2O LLM Studio left-navigation pane, click **View experiments**. You will see the experiments table with a list of all the experiments you have launched so far.
+
+2. Click the name of the experiment that you want to export as a model.
+
+3. Click **Push checkpoint to huggingface**.
+
+4. Enter the **Account name** on Hugging Face to push the model to a particular account. Leaving it empty will push it to the default user account.
+
+5. Enter the **Hugging Face API** Key with write access.
+
+6. Click **Export**.
+
+ 
+
+## Download a model
+
+Click **Download model** on the **View experiments** page to download the model locally.
+
+Use the following code snippet to utilize the converted model in Jupyter Notebook or Google Colab.
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+model_name = "path_to_downloaded_model" # either local folder or Hugging Face model name
+
+# Important: The prompt needs to be in the same format the model was trained with.
+# You can find an example prompt in the experiment logs.
+prompt = "<|prompt|>How are you?<|endoftext|><|answer|>"
+
+tokenizer = AutoTokenizer.from_pretrained(model_name)
+model = AutoModelForCausalLM.from_pretrained(model_name)
+model.cuda().eval()
+
+inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda")
+# generate configuration can be modified to your needs
+tokens = model.generate(
+ **inputs, # Input any question for the model. Ex: "What is the capital of USA?"
+ max_new_tokens=256,
+ temperature=0.3,
+ repetition_penalty=1.2,
+ num_beams=1
+)[0]
+tokens = tokens[inputs["input_ids"].shape[1]:]
+answer = tokenizer.decode(tokens, skip_special_tokens=True)
+print(answer)
+```
+
+You can enter any question for the model and change the parameters to get different outputs.
\ No newline at end of file
diff --git a/documentation/docs/guide/experiments/import-to-h2ogpt.md b/documentation/docs/guide/experiments/import-to-h2ogpt.md
new file mode 100644
index 0000000000000000000000000000000000000000..d2b92249d36e17905dc37d4abedd31a5f260d29c
--- /dev/null
+++ b/documentation/docs/guide/experiments/import-to-h2ogpt.md
@@ -0,0 +1,48 @@
+---
+description: Once the model has been fine-tuned using H2O LLM Studio, you can then use h2oGPT to query, summarize, and chat with your model.
+---
+import Tabs from '@theme/Tabs';
+import TabItem from '@theme/TabItem';
+
+# Import a model to h2oGPT
+
+Once the model has been fine-tuned using H2O LLM Studio, you can then use [h2oGPT](https://github.com/h2oai/h2ogpt/blob/main/README.md) to query, summarize, and chat with your model.
+
+The most common method to get the model from H2O LLM Studio over to h2oGPT, is to import it into h2oGPT via HuggingFace. However, if your data is sensitive, you can also choose to download the model locally to your machine, and then import it directly into h2oGPT.
+
+You can use any of the following methods:
+
+- Publish the model to HuggingFace and import the model from HuggingFace
+- Download the model and import it to h2oGPT by specifying the local folder path
+- Download the model and upload it to h2oGPT using the file upload option on the UI
+- Pull a model from a Github repository or a resolved web link
+
+## Steps
+
+1. [Publish the model to HuggingFace](export-trained-model.md) or [download the model locally](export-trained-model.md#download-a-model).
+
+2. If you opt to download the model, make sure you extract the downloaded .zip file.
+
+3. Use the following command to import it into h2oGPT.
+ ```
+ python generate.py --base_model=[link_or_path_to_folder]
+ ```
+
+ :::note Examples
+
+
+ python generate.py --base_model=HuggingFaceH4/zephyr-7b-beta
+
+
+ python generate.py --base_model=zephyr-7b-beta.Q5_K_M.gguf
+
+
+ python generate.py --base_model=TheBloke/zephyr-7B-beta-AWQ
+
+
+ :::
+
+:::info
+For more information, see the [h2oGPT documentation](https://github.com/h2oai/h2ogpt/blob/main/docs/FAQ.md#adding-models).
+:::
+
diff --git a/documentation/docs/guide/experiments/local-llm-judge-logs.png b/documentation/docs/guide/experiments/local-llm-judge-logs.png
new file mode 100644
index 0000000000000000000000000000000000000000..3d4591c2194283e3799ba6e0c1e0a97564c50864
--- /dev/null
+++ b/documentation/docs/guide/experiments/local-llm-judge-logs.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:9dc7efc22ec88fa93aa7800a18a12cb1aa54dd1458bc4a2a61a1ed318d241987
+size 293633
diff --git a/documentation/docs/guide/experiments/run-experiment.png b/documentation/docs/guide/experiments/run-experiment.png
new file mode 100644
index 0000000000000000000000000000000000000000..c98a774871594fdd8d8939f4b46a06b04da9993c
--- /dev/null
+++ b/documentation/docs/guide/experiments/run-experiment.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f7573a8ab68bcd52c198c2a61f65cb79c46da9e7bc3707e46488a94675917572
+size 242564
diff --git a/documentation/docs/guide/experiments/set-endpoint.png b/documentation/docs/guide/experiments/set-endpoint.png
new file mode 100644
index 0000000000000000000000000000000000000000..c1f721e49341b4cad00afee60677fbe49fe9912a
--- /dev/null
+++ b/documentation/docs/guide/experiments/set-endpoint.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8f10e80a78557921b5120f2b5364ee3e618442c7a199f25d07adba076a0f4c48
+size 355226
diff --git a/documentation/docs/guide/experiments/set-metric-model.png b/documentation/docs/guide/experiments/set-metric-model.png
new file mode 100644
index 0000000000000000000000000000000000000000..da9294abf4feb81a6e077ac06698e75987dc953d
--- /dev/null
+++ b/documentation/docs/guide/experiments/set-metric-model.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b834328a0a3823f99ebfcccadb9d1118109d178dfdcab78f8dae7db4174a722a
+size 314770
diff --git a/documentation/docs/guide/experiments/stop-experiment.png b/documentation/docs/guide/experiments/stop-experiment.png
new file mode 100644
index 0000000000000000000000000000000000000000..2af497eb9555ee924befa2f409e76b9e09196477
--- /dev/null
+++ b/documentation/docs/guide/experiments/stop-experiment.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:965388c636077a656ba435ad3a6df7c54cc4fee5bc3b09dbfd90275645cf6cdf
+size 252683
diff --git a/documentation/docs/guide/experiments/supported-problem-types.md b/documentation/docs/guide/experiments/supported-problem-types.md
new file mode 100644
index 0000000000000000000000000000000000000000..b6a856044606314edba676fd478edbadb35803a8
--- /dev/null
+++ b/documentation/docs/guide/experiments/supported-problem-types.md
@@ -0,0 +1,30 @@
+# Supported problem types
+
+## Overview
+
+H2O LLM Studio supports various problem types that allow users to fine-tune models for different tasks. The five supported problem types are explained below.
+
+## Causal language modeling
+
+- **Description:** Causal language modeling involves predicting the next token in a sequence, based only on the preceding tokens (i.e., the left side of the sequence). It is commonly used for tasks such as text generation. It is used to fine-tune large language models.
+
+## Causal classification modeling
+
+- **Description:** Causal classification modeling involves assigning one or more categorical target labels to an input text. It is used for fine-tuning models to perform text classification tasks.
+
+- **Supported classification tasks:** Binary, multi-class, and multi-label classification.
+
+## Causal regression modeling
+
+- **Description:** Causal regression modeling assigns one or more continuous target labels to an input text. It is used to fine-tune models for text regression tasks.
+
+- **Supported regression tasks:** Multi-label regression.
+
+## Sequence to sequence modeling
+
+- **Description:** A type of machine learning architecture designed to transform one sequence into another. It is commonly used for tasks like machine translation, text summarization, and speech recognition.
+
+## DPO modeling
+
+- **Description:** The DPO modeling is used to fine-tune large language models using Direct Preference Optimization (DPO), a method that helps large, unsupervised language models better match human preferences using a simple classification approach.
+
diff --git a/documentation/docs/guide/experiments/view-an-experiment.md b/documentation/docs/guide/experiments/view-an-experiment.md
new file mode 100644
index 0000000000000000000000000000000000000000..aec0b4fd937419eb6404596c135876dc8e7516db
--- /dev/null
+++ b/documentation/docs/guide/experiments/view-an-experiment.md
@@ -0,0 +1,85 @@
+---
+description: You can view, rename, stop, or delete your experiments once you launch them. You can also create a new experiment based on an experiment you have already launched.
+---
+import Icon from "@material-ui/core/Icon";
+
+# View and manage experiments
+
+You can view, rename, stop, or delete your experiments once you launch them. You can also create a new experiment based on an experiment you have already launched.
+
+## View an experiment
+
+To view an experiment:
+
+1. On the H2O LLM Studio left-navigation pane, click **View experiments**.
+
+2. You will see the experiments table with a list of all the experiments you have launched so far. Click the name of the experiment that you want to view.
+
+ 
+
+## Experiment tabs
+
+Once you click the name of the experiment, you will see the following tabs that provide details and different aspects of your experiment.
+
+- **Charts** : This tab visually represents the train/validation loss, metrics, and learning rate. These charts allow you to easily track your model’s performance as it trains.
+
+ 
+
+- **Summary** : This tab contains the following details about an experiment.
+
+ | Name | Description |
+ | ----------- | ------------------------------------ |
+ | **Name** | Name of the experiment. |
+ | **Dataset** | Name of the dataset. |
+ | **Problem type** | The problem type of the experiment. |
+ | **Seed** | The random seed value that H2O LLM Studio uses during model training. |
+ | **GPU list** | The list of GPUs H2O LLM Studio can use for the experiment. |
+ | **Loss** | The loss function. |
+ | **Metric** | The metric to evaluate the model’s performance. |
+ | **Val metric** | The measure of how well the experiment was performed. |
+
+- **Train data insights** : This tab displays the model’s first batch, so you can verify that the input data representation is correct. Also, it provides insight into how your data is being processed and can help identify potential issues early on in the experiment.
+
+- **Validation prediction insights** : This tab displays model predictions for random, best, and worst validation samples. This tab becomes available after the first validation run and allows you to evaluate how well your model generalizes to new data.
+
+ 
+
+ 
+
+ The **Worst validation samples** give you an idea of where the model is having issues, and the model can be used to fine-tune further.
+
+- **Logs and Config tabs** : These two tabs show you the logs and configuration of the experiment. You can keep track of any changes made and quickly troubleshoot the issues that arise.
+
+- **Chat** : This tab provides a unique opportunity to interact with your trained model and get instant feedback on its performance. The **Chat** tab becomes available after the training is completed and can be used to evaluate how well your model performs in a conversational setting.
+
+ :::info note
+ You can use the **Chat** feature only when there are no other experiments running. The chatbot is unavailable if the GPU is occupied by another experiment.
+ :::
+
+ 
+
+## Stop an experiment
+
+You can stop a running experiment if you no longer need it to be completed.
+
+1. On the H2O LLM Studio left-navigation pane, click **View experiments**.
+2. Click **Stop experiments**.
+3. Select the experiment(s) that you want to stop.
+4. Click **Stop experiments**.
+
+You can also click **Stop experiment** on the more_vert Kebab menu of the relevant experiment row to stop an experiment from running.
+
+ 
+
+## Delete an experiment
+
+When an experiment is no longer needed, you can delete it. Deleted experiments are permanently removed from the H2O LLM Studio instance.
+
+1. On the H2O LLM Studio left-navigation pane, click **View experiments**.
+2. Click **Delete experiments**.
+3. Select the experiment(s) that you want to delete and click **Delete experiments**.
+4. Click **Delete** to confirm deletion.
+
+You can also click **Delete experiment** in the kebab menu of the relevant experiment row to delete an experiment.
+
+ 
\ No newline at end of file
diff --git a/documentation/docs/guide/experiments/view-experiments.png b/documentation/docs/guide/experiments/view-experiments.png
new file mode 100644
index 0000000000000000000000000000000000000000..9f1633bad742b24b111d767bf14a7f686b2c6c85
--- /dev/null
+++ b/documentation/docs/guide/experiments/view-experiments.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2b2682c347b3cc34869f0718a608f6aeb0fa18903f1c38050ebca76c9544061a
+size 226328
diff --git a/documentation/docs/guide/experiments/worst-validation-sample.png b/documentation/docs/guide/experiments/worst-validation-sample.png
new file mode 100644
index 0000000000000000000000000000000000000000..4e00c097ed603c0cdad7e3c9c42a69ab1f0bb97e
--- /dev/null
+++ b/documentation/docs/guide/experiments/worst-validation-sample.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:437642132711703f3b4564deedef74c99ebfe0e436e1d632906849cf2d62e796
+size 256703
diff --git a/documentation/docs/index.md b/documentation/docs/index.md
new file mode 100644
index 0000000000000000000000000000000000000000..7823eb6f7e0b0156f99bed7b963503a7312ff000
--- /dev/null
+++ b/documentation/docs/index.md
@@ -0,0 +1,110 @@
+---
+slug: /
+displayed_sidebar: defaultSidebar
+title: H2O LLM Studio | Docs
+hide_table_of_contents: true
+hide_title: true
+description: 'A framework and no-code GUI designed for fine-tuning state-of-the-art large language models (LLMs)'
+---
+
+import H2OHome from '@site/src/components/H2OHome';
+
+
diff --git a/documentation/docs/key-terms.md b/documentation/docs/key-terms.md
new file mode 100644
index 0000000000000000000000000000000000000000..e934e3113b6152c13fff8b5c53126a383561d03c
--- /dev/null
+++ b/documentation/docs/key-terms.md
@@ -0,0 +1,78 @@
+---
+description: H2O LLM Studio uses several key terms across its documentation.
+---
+# Key terms
+
+H2O LLM Studio uses several key terms across its documentation, and each, in turn, is explained in the sections below.
+
+## Prompt Engineering
+
+Prompt engineering involves crafting precise and effective input queries to guide language models in generating desired outputs or responses.
+
+## Agents
+
+Software entities or components that interact with data or perform tasks within a system.
+
+## ELO
+
+An algorithm or method used to assess and rank the performance or accuracy of language models based on their proficiency in understanding and processing textual data.
+
+## Vector Store
+
+A Vector Store stores numerical representations of text for fast access in language models.
+
+## Pre-training
+
+The initial phase of training a machine learning model on a large dataset to learn general features before fine-tuning on a specific task.
+
+## Attention
+
+A mechanism that enables models to focus on specific parts of input data relevant to the task at hand, enhancing their understanding and performance.
+
+## Embedding
+
+Embedding refers to a mathematical representation of words or tokens in a numerical vector space, enabling machine learning models to understand and process language based on their context and relationships.
+
+## Language Model
+
+A language model is an AI system that understands and generates human language, predicting and generating text based on patterns and context within a given sequence of words.
+
+## Transformer
+
+A Transformer refers to a neural network architecture specifically designed for processing sequential data like text, using attention mechanisms to learn contextual relationships between words or tokens.
+
+## Encoders and Decoders
+
+Encoders and decoders are vital parts of sequence-to-sequence models used in natural language processing. Encoders process input data into a fixed-size representation, while decoders generate an output sequence based on that representation.
+
+## Text generation
+
+Text generation is the process of creating written content, such as sentences or paragraphs, using machine learning or AI algorithms based on patterns learned from existing text data.
+
+## In-context learning
+
+In-context learning refers to the process where a machine learning model continuously improves and adapts by considering the context of new information within its existing knowledge, enhancing its accuracy and understanding over time.
+
+## Few-shot learning
+
+Few-shot learning refers to a machine learning technique where a model can learn from a very small amount of labeled data to generalize and make predictions accurately on new, unseen data.
+
+## Summarization
+
+Summarization is the process of condensing a larger piece of text into a shorter, coherent version while retaining its essential information.
+
+## Fine-tuning
+
+Fine-tuning refers to adjusting and optimizing a pre-trained machine learning model using specific data to enhance its performance for a particular task.
+
+## GPT
+
+GPT stands for "Generative Pre-trained Transformer," a type of language model that uses transformers to understand and generate human-like text based on vast amounts of training data.
+
+# GPU deployment
+
+GPU deployment is the utilization of graphics processing units (GPUs) to execute and accelerate the computations involved in deploying machine learning models, improving speed and efficiency in model inference or training.
+
+# Tokenization
+
+Tokenization is the process of breaking text into smaller units, typically words or phrases, to analyze or process them individually within a natural language processing system.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_add-eos-token-to-answer.mdx b/documentation/docs/tooltips/experiments/_add-eos-token-to-answer.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..d884c3564e7f1fc58bf6afe56fc64b0244f8b87f
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_add-eos-token-to-answer.mdx
@@ -0,0 +1 @@
+Adds EOS token at end of answer.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_add-eos-token-to-prompt.mdx b/documentation/docs/tooltips/experiments/_add-eos-token-to-prompt.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..2b78f90d04f38c07dcf54bb185e7047e28d97384
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_add-eos-token-to-prompt.mdx
@@ -0,0 +1 @@
+Adds EOS token at end of prompt.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_add-eos-token-to-system.mdx b/documentation/docs/tooltips/experiments/_add-eos-token-to-system.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..a1904cd07e18666c89c759f637e9401fd3505ba0
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_add-eos-token-to-system.mdx
@@ -0,0 +1 @@
+Adds EOS token at end of system input.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_add-prompt-answer-tokens.mdx b/documentation/docs/tooltips/experiments/_add-prompt-answer-tokens.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..b136c51bf39789a688899a8699f2042ab11652c5
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_add-prompt-answer-tokens.mdx
@@ -0,0 +1 @@
+Adds system, prompt and answer tokens as new tokens to the tokenizer. It is recommended to also set `Force Embedding Gradients` in this case.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_answer-column.mdx b/documentation/docs/tooltips/experiments/_answer-column.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..68741d133450f7e0caf8a41cb01cee6dcc81a226
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_answer-column.mdx
@@ -0,0 +1,10 @@
+The column in the dataset containing the expected output.
+
+For classification, this needs to be an integer column starting from zero containing the class label, while for regression, it needs to be a float column.
+
+Multiple target columns can be selected for classification and regression supporting multilabel problems.
+In detail, we support the following cases:
+- Multi-class classification requires a single column containing the class label
+- Binary classification requires a single column containing a binary integer label
+- Multilabel classification requires each column to refer to one label encoded with a binary integer label
+- For regression, each target column requires a float value
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_attention-implementation.mdx b/documentation/docs/tooltips/experiments/_attention-implementation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..3428d812a3f1c91f9f7488b4eea41400b20f551a
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_attention-implementation.mdx
@@ -0,0 +1,6 @@
+Allows to change the utilized attention implementation.
+
+- **Auto** selection will automatically choose the implementation based on system availability.
+- **Eager** relies on vanilla attention implementation in Python.
+- **SDPA** uses scaled dot product attention in PyTorch.
+- **Flash Attention 2** explicitly uses FA2 which requires the flash_attn package.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_backbone-dtype.mdx b/documentation/docs/tooltips/experiments/_backbone-dtype.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..1114950ef7be496b1e57eed0ad7ce273dc064939
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_backbone-dtype.mdx
@@ -0,0 +1 @@
+The datatype of the weights in the LLM backbone.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_batch-size-inference.mdx b/documentation/docs/tooltips/experiments/_batch-size-inference.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..e425403b073336be02f1ec978dee8d987b7bd4c5
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_batch-size-inference.mdx
@@ -0,0 +1 @@
+Defines the size of a mini-batch uses during an iteration of the inference. **Batch size** defines the batch size used per GPU.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_batch-size.mdx b/documentation/docs/tooltips/experiments/_batch-size.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..e321bd718f95363b93a795475d688020da295a37
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_batch-size.mdx
@@ -0,0 +1,3 @@
+Defines the number of training examples a mini-batch uses during an iteration of the training model to estimate the error gradient before updating the model weights. **Batch size** defines the batch size used per a single GPU.
+
+During model training, the training data is packed into mini-batches of a fixed size.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_beta.mdx b/documentation/docs/tooltips/experiments/_beta.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..575762b09f5fd76b2d76f386eb9bca984c474f4d
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_beta.mdx
@@ -0,0 +1,3 @@
+Beta is a temperature parameter utilized in measuring DPO losses, ordinarily within the scope of 0.1 to 0.5.
+This parameter regulates the deviation from the reference model, where the reference model becomes disregarded as beta approaches zero.
+For more detailed information, please refer to section (3) of the given research paper: [https://arxiv.org/pdf/2305.18290.pdf](https://arxiv.org/pdf/2305.18290.pdf).
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_chatbot-name.mdx b/documentation/docs/tooltips/experiments/_chatbot-name.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..1e4ac7271d28ff2f2517fa092a2ea7897704d2da
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_chatbot-name.mdx
@@ -0,0 +1 @@
+Set the name for your chatbot. This is the name that will be used by the chatbot to identify itself in the chat.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_chatbot_author.mdx b/documentation/docs/tooltips/experiments/_chatbot_author.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..a17c7125f3debd89855b294a07497d574811e0d7
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_chatbot_author.mdx
@@ -0,0 +1 @@
+Set the author for your chatbot. This is the name that will teached to the model as the author of the chatbot.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_compile-model.mdx b/documentation/docs/tooltips/experiments/_compile-model.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..19ce6dc5ed3e22d68dd5d0a1ee9e387514927bf3
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_compile-model.mdx
@@ -0,0 +1 @@
+Compiles the model with Torch. Experimental!
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_data-sample-choice.mdx b/documentation/docs/tooltips/experiments/_data-sample-choice.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..c24d44a79160c0c175089774c22f116be1a6c26c
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_data-sample-choice.mdx
@@ -0,0 +1,3 @@
+Specifies the data H2O LLM Studio do sample according to the percentage set in the **Data sample** setting. H2O LLM Studio does not sample the unselected data.
+
+The **Data sample choice** setting is only available if the value in the *Data sample* setting is less than **1.0**.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_data-sample.mdx b/documentation/docs/tooltips/experiments/_data-sample.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..3f746d7d439e233ce9efc7f238e67d13a42e004b
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_data-sample.mdx
@@ -0,0 +1,3 @@
+Defines the percentage of the data to use for the experiment. The default percentage is 100% (1).
+
+Changing the default value can significantly increase the training speed. Still, it might lead to a substantially poor accuracy value. Using 100% (1) of the data for final models is highly recommended.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_dataset-name.mdx b/documentation/docs/tooltips/experiments/_dataset-name.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..7a90746c2b75da85d434a720d59bb591fe560b76
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_dataset-name.mdx
@@ -0,0 +1 @@
+A suitable name for the whole dataset which includes both the train dataframe and validation dataframe.
diff --git a/documentation/docs/tooltips/experiments/_dataset.mdx b/documentation/docs/tooltips/experiments/_dataset.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..0e9959a7dce3a7d813819a84d27578a2a1632e6b
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_dataset.mdx
@@ -0,0 +1 @@
+Select the dataset for the experiment.
diff --git a/documentation/docs/tooltips/experiments/_deepspeed-allgather-bucket-size.mdx b/documentation/docs/tooltips/experiments/_deepspeed-allgather-bucket-size.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..e482551e8718796b9797451800b50616c31dd993
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_deepspeed-allgather-bucket-size.mdx
@@ -0,0 +1 @@
+Number of elements allgather at a time. Limits the memory required for the allgather for large model sizes. Smaller values use less GPU memory, but slow down training and validating.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_deepspeed-method.mdx b/documentation/docs/tooltips/experiments/_deepspeed-method.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..fa2e7c97293de30229ed72314193d4d2a343e30c
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_deepspeed-method.mdx
@@ -0,0 +1,2 @@
+- ZeRO-2, gradient partitioning across GPUs.
+- ZeRO-3, parameteter partitioning across GPUs.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_deepspeed-offload-optimizer.mdx b/documentation/docs/tooltips/experiments/_deepspeed-offload-optimizer.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..d83fa269a019ca29942adbd9c06078ffd3b6d83d
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_deepspeed-offload-optimizer.mdx
@@ -0,0 +1 @@
+Whether to offload optimizer to cpu for saving more GPU ram during training. Note that turn on offload_optimizer would further slow down training.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_deepspeed-reduce-bucket-size.mdx b/documentation/docs/tooltips/experiments/_deepspeed-reduce-bucket-size.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..afe5ef0cb1de40639f4d5e372b55ae46c0df8d9d
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_deepspeed-reduce-bucket-size.mdx
@@ -0,0 +1 @@
+Number of elements reduced/allreduced at a time. Limits the memory required for the allgather for large model sizes. Smaller values use less memory, but slow down training and validating.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-live-parameters.mdx b/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-live-parameters.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..70814112197fd57d3a147a27e7580a6e4986c42a
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-live-parameters.mdx
@@ -0,0 +1 @@
+The maximum number of parameters resident per GPU before releasing. Smaller values use less memory, but slow down training.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-reuse-distance.mdx b/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-reuse-distance.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..0e5beb51a6fc8861d29a49d40d12ce6bfe8cca51
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_deepspeed-stage3-max-reuse-distance.mdx
@@ -0,0 +1 @@
+Do not release a parameter if it will be reused within this threshold of parameters. Smaller values use less memory, but slow down training.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_deepspeed-stage3-param-persistence-threshold.mdx b/documentation/docs/tooltips/experiments/_deepspeed-stage3-param-persistence-threshold.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..7d6181b98d7c5f5c34b947765f3369b5bb828b37
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_deepspeed-stage3-param-persistence-threshold.mdx
@@ -0,0 +1 @@
+Do not partition parameters smaller than this threshold. Smaller values use less memory, but can greatly increase communication and slow down training and validating. (especially latency-bound messages).
diff --git a/documentation/docs/tooltips/experiments/_deepspeed-stage3-prefetch-bucket-size.mdx b/documentation/docs/tooltips/experiments/_deepspeed-stage3-prefetch-bucket-size.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..7d2ee2c6256c7362392fc76869fcfd140e5e7084
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_deepspeed-stage3-prefetch-bucket-size.mdx
@@ -0,0 +1 @@
+Maximum number of parameter elements to fetch ahead of use. Smaller values use less memory, but slow down training and validating.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_differential-learning-rate-layers.mdx b/documentation/docs/tooltips/experiments/_differential-learning-rate-layers.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..8fa1c083a700b617d252ee851e1514a1e9d99a07
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_differential-learning-rate-layers.mdx
@@ -0,0 +1,8 @@
+Defines the learning rate to apply to certain layers of a model. H2O LLM Studio applies the regular learning rate to layers without a specified learning rate.
+
+- **Backbone**
+ - H2O LLM Studio applies a different learning rate to a body of the neural network architecture.
+- **Value Head**
+ - H2O LLM Studio applies a different learning rate to a value head of the neural network architecture.
+
+A common strategy is to apply a lower learning rate to the backbone of a model for better convergence and training stability.
diff --git a/documentation/docs/tooltips/experiments/_differential_learning_rate.mdx b/documentation/docs/tooltips/experiments/_differential_learning_rate.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..2c81230745462154ce78b287bb701300b788a903
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_differential_learning_rate.mdx
@@ -0,0 +1,3 @@
+Defines the differential learning rate that is applied to all selected differential learning rate layers.
+
+The **Differential learning rate** setting is available only if the following setting has at least one option selected: **Differential learning rate layers**.
diff --git a/documentation/docs/tooltips/experiments/_do-sample.mdx b/documentation/docs/tooltips/experiments/_do-sample.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..31c7c3dbbdf84453bbb09ea5c2dd32ffae5c6a2a
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_do-sample.mdx
@@ -0,0 +1 @@
+Determines whether to sample from the next token distribution instead of choosing the token with the highest probability. If turned **On**, the next token in a predicted sequence is sampled based on the probabilities. If turned **Off**, the highest probability is always chosen.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_epochs.mdx b/documentation/docs/tooltips/experiments/_epochs.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..1101bfd8040482b9fb84738215b02689aed0d9d6
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_epochs.mdx
@@ -0,0 +1,5 @@
+Defines the number of epochs to train the model. In other words, it specifies the number of times the learning algorithm goes through the entire training dataset.
+
+- The **Epochs** setting is an important setting to tune because it balances under- and overfitting.
+- The learning rate highly impacts the optimal value of the epochs.
+- H2O LLM Studio enables you to utilize a pre-trained model trained on zero epochs (where H2O LLM Studio does not train the model and the pretrained model (experiment) can be evaluated as-is):
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_evaluate-before-training.mdx b/documentation/docs/tooltips/experiments/_evaluate-before-training.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..8682612658ffb30cc08e739e124e7e5aa2f8044c
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_evaluate-before-training.mdx
@@ -0,0 +1 @@
+This option lets you evaluate the model before training, which can help you judge the quality of the LLM backbone before fine-tuning.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_evaluation-epochs.mdx b/documentation/docs/tooltips/experiments/_evaluation-epochs.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..7f173b947a2c13ac497f9d5dcfa1d09b06df0b00
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_evaluation-epochs.mdx
@@ -0,0 +1,5 @@
+Defines the number of epochs H2O LLM Studio uses before each validation loop for model training. In other words, it determines the frequency (in a number of epochs) to run the model evaluation on the validation data.
+
+- Increasing the number of *Evaluation Epochs* can speed up an experiment.
+- The **Evaluation epochs** setting is available only if the following setting is turned **Off**: **Save Best Checkpoint**.
+- Can be a fraction of an epoch
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_experiment-name.mdx b/documentation/docs/tooltips/experiments/_experiment-name.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..eabccc41d36e368a1cd15fd1a83996ffa8473542
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_experiment-name.mdx
@@ -0,0 +1 @@
+It defines the name of the experiment.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_find-unused-parameters.mdx b/documentation/docs/tooltips/experiments/_find-unused-parameters.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..2d57bc8c0daf562c27b3665edc7105a93a433733
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_find-unused-parameters.mdx
@@ -0,0 +1,3 @@
+In Distributed Data Parallel (DDP) mode, `prepare_for_backward()` is called at the end of DDP forward pass. It traverses the autograd graph to find unused parameters when `find_unused_parameters` is set to True in DDP constructor.
+
+Note that traversing the autograd graph introduces extra overheads, so applications should only set to True when necessary.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_freeze-layers.mdx b/documentation/docs/tooltips/experiments/_freeze-layers.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..67721a595fdad09062328f930f5a7b3ead0d3e75
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_freeze-layers.mdx
@@ -0,0 +1 @@
+An optional list of layers to freeze during training. Full layer names will be matched against selected substrings. Only available without LoRA training.
diff --git a/documentation/docs/tooltips/experiments/_gpus.mdx b/documentation/docs/tooltips/experiments/_gpus.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..888bfb7b14dfd9462102004f95484b4091d50a9d
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_gpus.mdx
@@ -0,0 +1 @@
+Determines the list of GPUs H2O LLM Studio can use for the experiment. GPUs are listed by name, referring to their system ID (starting from 1).
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_grad-accumulation.mdx b/documentation/docs/tooltips/experiments/_grad-accumulation.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..f5a140f5a060e715e517d9ab0a76e8b5f648dfed
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_grad-accumulation.mdx
@@ -0,0 +1,4 @@
+Defines the number of gradient accumulations before H2O LLM Studio updates the neural network weights during model training.
+
+- Grad accumulation can be beneficial if only small batches are selected for training. With gradient accumulation, the loss and gradients are calculated after each batch, but it waits for the selected accumulations before updating the model weights. You can control the batch size through the **Batch size** setting.
+- Changing the default value of *Grad Accumulation* might require adjusting the learning rate and batch size.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_gradient-checkpointing.mdx b/documentation/docs/tooltips/experiments/_gradient-checkpointing.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..49ea0c587693b4264c2a9bffb84d55b0806fe325
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_gradient-checkpointing.mdx
@@ -0,0 +1,6 @@
+Determines whether H2O LLM Studio activates gradient checkpointing (GC) when training the model. Starting GC reduces the video random access memory (VRAM) footprint at the cost of a longer runtime (an additional forward pass). Turning **On** GC enables it during the training process.
+
+**Caution**
+Gradient checkpointing is an experimental setting that is not compatible with all backbones or all other settings.
+
+Activating *GC* comes at the cost of a longer training time; for that reason, try training without *GC* first and only activate when experiencing *GPU out-of-memory (OOM)* errors.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_gradient-clip.mdx b/documentation/docs/tooltips/experiments/_gradient-clip.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..3ef2cc4e54b02840562836e56f220906c05c8c35
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_gradient-clip.mdx
@@ -0,0 +1,3 @@
+Defines the maximum norm of the gradients H2O LLM Studio specifies during model training. Defaults to **0**, no clipping. When a value greater than 0 is specified, H2O LLM Studio modifies the gradients during model training. H2O LLM Studio uses the specified value as an upper limit for the norm of the gradients, calculated using the Euclidean norm over all gradients per batch.
+
+This setting can help model convergence when extreme gradient values cause high volatility of weight updates.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_huggingface-branch.mdx b/documentation/docs/tooltips/experiments/_huggingface-branch.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..439483d285ba5e11994e2ea535b6f6d6db408fb6
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_huggingface-branch.mdx
@@ -0,0 +1 @@
+The **Hugging Face Branch** defines which branch to use in a Hugging Face repository. The default value is "main".
diff --git a/documentation/docs/tooltips/experiments/_id-column.mdx b/documentation/docs/tooltips/experiments/_id-column.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..45e388aae3376f99bd1650f551215b059ecdc9ea
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_id-column.mdx
@@ -0,0 +1 @@
+This column is only required when the `Parent Id` is set. It identifies the record ID used for linked conversations. The value in the `Parent Id` column must match this column's value.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_import-config-from-yaml.mdx b/documentation/docs/tooltips/experiments/_import-config-from-yaml.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..86b78d4a4bfc60eae5ce09b88cdf6477a1259ed1
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_import-config-from-yaml.mdx
@@ -0,0 +1,3 @@
+Defines the `.yml` file that defines the experiment settings.
+
+- H2O LLM Studio supports a `.yml` file import and export functionality. You can download the config settings of finished experiments, make changes, and re-upload them when starting a new experiment in any instance of H2O LLM Studio.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_intermediate-dropout.mdx b/documentation/docs/tooltips/experiments/_intermediate-dropout.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..b0647ceda763a3424265a20ee23a7284bcf4764a
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_intermediate-dropout.mdx
@@ -0,0 +1 @@
+Defines the custom dropout rate H2O LLM Studio uses for intermediate layers in the transformer model.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_learning-rate.mdx b/documentation/docs/tooltips/experiments/_learning-rate.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..6cee2f23c9c07a98fabe335ac05d36ae5fedccc4
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_learning-rate.mdx
@@ -0,0 +1,4 @@
+Defines the learning rate H2O LLM Studio uses when training the model, specifically when updating the neural network's weights. The learning rate is the speed at which the model updates its weights after processing each mini-batch of data.
+
+- Learning rate is an important setting to tune as it balances under- and overfitting.
+- The number of epochs highly impacts the optimal value of the learning rate.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_limit_chained_samples.mdx b/documentation/docs/tooltips/experiments/_limit_chained_samples.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..0f2f31437f23f86abb2298b336f8e00d8d49a71e
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_limit_chained_samples.mdx
@@ -0,0 +1 @@
+If set to True, model will be only trained on the full chained sample in case of nested conversations. If set to False, each separate sample is used for training.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_llm-backbone.mdx b/documentation/docs/tooltips/experiments/_llm-backbone.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..d5a9f2c6e43d20bbaf6013de92be1e234fd68150
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_llm-backbone.mdx
@@ -0,0 +1,5 @@
+The **LLM Backbone** option is the most important setting as it sets the pretrained model weights.
+
+- Use smaller models for quicker experiments and larger models for higher accuracy
+- Aim to leverage models pre-trained on tasks similar to your use case when possible
+- Select a model from the dropdown list or type in the name of a Hugging Face model of your preference
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_log-all-ranks.mdx b/documentation/docs/tooltips/experiments/_log-all-ranks.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..a19b7d4d784eb565db73090b1e7cc4d5d7b744b2
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_log-all-ranks.mdx
@@ -0,0 +1 @@
+If used, the local logging will include the output of all ranks (DDP mode).
diff --git a/documentation/docs/tooltips/experiments/_log-step-size.mdx b/documentation/docs/tooltips/experiments/_log-step-size.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..c47ec86f1954918e5c49206782fe398027794d1f
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_log-step-size.mdx
@@ -0,0 +1,4 @@
+Specifies the interval for logging during training. Two options are available:
+
+- **Absolute**: The default setting. Uses the total number of training samples processed as the x-axis for logging.
+- **Relative**: Uses the proportion of training data seen so far as the x-axis for logging.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_logger.mdx b/documentation/docs/tooltips/experiments/_logger.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..6c3af88a2f6be42d35b3c32c290a0a319fa85e58
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_logger.mdx
@@ -0,0 +1,9 @@
+Defines the logger type that H2O LLM Studio uses for model training
+
+Options
+- **None**
+ - H2O LLM Studio does not use any logger.
+- **Neptune**
+ - H2O LLM Studio uses Neptune as a logger to track the experiment. To use Neptune, you must specify a **Neptune API token** in the settings or as a `NEPTUNE_API_TOKEN` environment variable and a **Neptune project**.
+- **W&B**
+ - H2O LLM Studio uses W&B as a logger to track the experiment. To use W&B, you must specify a **W&B API key** in the settings or as a `WANDB_API_KEY` environment variable and a **W&B project** and **W&B entity**.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_lora-alpha.mdx b/documentation/docs/tooltips/experiments/_lora-alpha.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..933e2ce1d6fc11176b7ba0ed74591e8c7ee7eb1a
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_lora-alpha.mdx
@@ -0,0 +1 @@
+The scaling factor for the lora weights.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_lora-dropout.mdx b/documentation/docs/tooltips/experiments/_lora-dropout.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..980a7e3ad0ec19cc47e42f3118daaf640a05c3c2
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_lora-dropout.mdx
@@ -0,0 +1 @@
+The probability of applying dropout to the LoRA weights during training.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_lora-r.mdx b/documentation/docs/tooltips/experiments/_lora-r.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..48ea770a4ce912dca52f171a3ea43a45b76df1bd
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_lora-r.mdx
@@ -0,0 +1 @@
+The dimension of the matrix decomposition used in LoRA.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_lora-target-modules.mdx b/documentation/docs/tooltips/experiments/_lora-target-modules.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..4d1f900c1128ad7827cf44d2946d5c200eea0676
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_lora-target-modules.mdx
@@ -0,0 +1 @@
+The modules in the model to apply the LoRA approximation to. Defaults to all linear layers.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_lora-unfreeze-layers.mdx b/documentation/docs/tooltips/experiments/_lora-unfreeze-layers.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..23f7641547b1dc8e568bfd61618294387e3ace3b
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_lora-unfreeze-layers.mdx
@@ -0,0 +1,3 @@
+An optional list of backbone layers to unfreeze during training.
+By default, all backbone layers are frozen when training with LoRA, here certain layers can be additionally trained, such as embedding or head layer.
+Full layer names will be matched against selected substrings. Only available with LoRA training.
diff --git a/documentation/docs/tooltips/experiments/_lora.mdx b/documentation/docs/tooltips/experiments/_lora.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..db92a793b6260787f0e7de87f786cb7583e1e8a8
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_lora.mdx
@@ -0,0 +1 @@
+Whether to use low rank approximations (LoRA) during training.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_loss-function.mdx b/documentation/docs/tooltips/experiments/_loss-function.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..ebf6ca195f6ad02001068c25542a9088be6b47a4
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_loss-function.mdx
@@ -0,0 +1 @@
+Defines the loss function H2O LLM Studio utilizes during model training. The loss function is a differentiable function measuring the prediction error. The model utilizes gradients of the loss function to update the model weights during training. The options depend on the selected Problem Type.
diff --git a/documentation/docs/tooltips/experiments/_mask-prompt-labels.mdx b/documentation/docs/tooltips/experiments/_mask-prompt-labels.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..14bf2a85884f6539b4121609127e94a5c267e73f
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_mask-prompt-labels.mdx
@@ -0,0 +1 @@
+Whether to mask the prompt labels during training and only train on the loss of the answer.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_max-length-inference.mdx b/documentation/docs/tooltips/experiments/_max-length-inference.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..2dbdc65fae8ba252ae3bac0c1eb45b6f99d5b87c
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_max-length-inference.mdx
@@ -0,0 +1,4 @@
+Defines the max length value H2O LLM Studio uses for the generated text.
+
+- Similar to the **Max Length** setting in the *tokenizer settings* section, this setting specifies the maximum number of tokens to predict for a given prediction sample.
+- This setting impacts the evaluation metrics and should depend on the dataset and average output sequence length that is expected to be predicted.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_max-length.mdx b/documentation/docs/tooltips/experiments/_max-length.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..2d675c2134c80a843e4d6361d8917018556d0f2c
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_max-length.mdx
@@ -0,0 +1,7 @@
+Defines the maximum length of the input sequence H2O LLM Studio uses during model training. In other words, this setting specifies the maximum number of tokens an input text is transformed for model training.
+
+A higher token count leads to higher memory usage that slows down training while increasing the probability of obtaining a higher accuracy value.
+
+In case of Causal Language Modeling, this includes both prompt and answer, or all prompts and answers in case of chained samples.
+
+In Sequence to Sequence Modeling, this refers to the length of the prompt, or the length of a full chained sample.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_max-time.mdx b/documentation/docs/tooltips/experiments/_max-time.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..259d1365b88ddad65fa2dbad20f75c160cfb80ee
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_max-time.mdx
@@ -0,0 +1 @@
+The maximum amount of time you allow the computation to run for in seconds, zero disabling this setting. Generation will still finish the current pass after allocated time has been passed.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_metric-gpt-model.mdx b/documentation/docs/tooltips/experiments/_metric-gpt-model.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..c17cab68dd0a89a66de2629f028583c3a772d5b1
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_metric-gpt-model.mdx
@@ -0,0 +1 @@
+Defines the OpenAI model endpoint for the GPT metric.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_metric-gpt-template.mdx b/documentation/docs/tooltips/experiments/_metric-gpt-template.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..c741f5369ee3041f378f3549ebf3d85c0c24e4de
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_metric-gpt-template.mdx
@@ -0,0 +1 @@
+The template to use for GPT-based evaluation. Note that for mt-bench, the validation dataset will be replaced accordingly; to approximate the original implementation as close as possible, we suggest to use gpt-4-0613 as the gpt judge model and use 1024 for the max length inference.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_metric.mdx b/documentation/docs/tooltips/experiments/_metric.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..590d16ac734b18afe86369e13e34692522006d64
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_metric.mdx
@@ -0,0 +1,18 @@
+Defines the metric to evaluate the model's performance.
+
+We provide several metric options for evaluating the performance of your model. The options depend on the selected Problem Type:
+
+Causal Language Modeling, DPO Modeling, Sequence to Sequence Modeling
+- In addition to the BLEU and the Perplexity score, we offer GPT metrics that utilize the OpenAI API to determine whether
+the predicted answer is more favorable than the ground truth answer.
+- To use these metrics, you can either export your OpenAI API key as an environment variable before starting LLM Studio,
+or you can specify it in the Settings Menu within the UI.
+
+Causal Classification Modeling
+- AUC: Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC).
+- Accuracy: Compute the accuracy of the model.
+- LogLoss: Compute the log loss of the model.
+
+Causal Regression Modeling
+- MSE: Compute Mean Squared Error of the model.
+- MAE: Compute Mean Absolute Error of the model.
diff --git a/documentation/docs/tooltips/experiments/_min-learning-rate-ratio.mdx b/documentation/docs/tooltips/experiments/_min-learning-rate-ratio.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..927b736ae798052dd08bb7938824fa9482fa8365
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_min-learning-rate-ratio.mdx
@@ -0,0 +1,10 @@
+The minimum learning rate ratio determines the lowest learning rate that will be used during training as a fraction of the initial learning rate. This is particularly useful when using learning rate schedules like "Cosine" or "Linear" that decrease the learning rate over time.
+
+For example, if the initial learning rate is 0.001 and the min_learning_rate_ratio is set to 0.1, the learning rate will never drop below 0.0001 (0.001 * 0.1) during training.
+
+Setting this to a value greater than 0 can help prevent the learning rate from becoming too small, which might slow down training or cause the model to get stuck in local optima.
+
+- A value of 0.0 allows the learning rate to potentially reach zero by the end of training.
+- Typical values range from 0.01 to 0.1, depending on the specific task and model.
+
+This parameter cannot be set when using the **Constant** learning rate schedule.
diff --git a/documentation/docs/tooltips/experiments/_min-length-inference.mdx b/documentation/docs/tooltips/experiments/_min-length-inference.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..57ad37b6e96ef78fa7fd79dee6289fdfc24aad89
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_min-length-inference.mdx
@@ -0,0 +1,3 @@
+Defines the min length value H2O LLM Studio uses for the generated text.
+
+- This setting impacts the evaluation metrics and should depend on the dataset and average output sequence length that is expected to be predicted.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_mixed-precision-dtype.mdx b/documentation/docs/tooltips/experiments/_mixed-precision-dtype.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..a9c986a5e6187867643caffe467391f23b6b079c
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_mixed-precision-dtype.mdx
@@ -0,0 +1 @@
+The datatype used for mixed precision.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_mixed-precision.mdx b/documentation/docs/tooltips/experiments/_mixed-precision.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..8bc4a002f7ca251733587230b82f7824c551d0e1
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_mixed-precision.mdx
@@ -0,0 +1,3 @@
+Determines whether to use mixed-precision. When turned **Off**, H2O LLM Studio does not use mixed-precision.
+
+Mixed-precision is a technique that helps decrease memory consumption and increases training speed.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_neftune_noise_alpha.mdx b/documentation/docs/tooltips/experiments/_neftune_noise_alpha.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..9f9923cf0d7144c015f43381a8acef60eff53af2
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_neftune_noise_alpha.mdx
@@ -0,0 +1,2 @@
+Will add noise to the input embeddings as proposed by https://arxiv.org/abs/2310.05914 (NEFTune: Noisy Embeddings Improve Instruction Finetuning)
+
diff --git a/documentation/docs/tooltips/experiments/_neptune-project.mdx b/documentation/docs/tooltips/experiments/_neptune-project.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..aefbdac0e130745a0181105adb3fa1ad2e9bf50b
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_neptune-project.mdx
@@ -0,0 +1 @@
+Defines the Neptune project to access if you selected Neptune in the **Logger** setting.
diff --git a/documentation/docs/tooltips/experiments/_num-beams.mdx b/documentation/docs/tooltips/experiments/_num-beams.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..d396204998ae6ff45d01992183873765ca877614
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_num-beams.mdx
@@ -0,0 +1,3 @@
+Defines the number of beams to use for beam search. *Num Beams* default value is 1 (a single beam); no beam search.
+
+A higher *Num Beams* value can increase prediction runtime while potentially improving accuracy.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_num-classes.mdx b/documentation/docs/tooltips/experiments/_num-classes.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..544720dcc1129e7204d465cb8d39e19a1eefe025
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_num-classes.mdx
@@ -0,0 +1 @@
+The number of possible classes for the classification task. For binary classification, a single class should be selected.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_num-history.mdx b/documentation/docs/tooltips/experiments/_num-history.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..2d449124daf13c51e114f406f3511e7d47376336
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_num-history.mdx
@@ -0,0 +1,2 @@
+Number of previous prompts and answers the model should use as history and prepend to the current prompt.
+As an example, if Num History is 2, the model will use the previous prompt-answer pair.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_number-of-workers.mdx b/documentation/docs/tooltips/experiments/_number-of-workers.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..03da84d522933d0becd89d97c663796e22195705
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_number-of-workers.mdx
@@ -0,0 +1 @@
+Defines the number of workers H2O LLM Studio uses for the *DataLoader*. In other words, it defines the number of CPU processes to use when reading and loading data to GPUs during model training.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_only-last-answer.mdx b/documentation/docs/tooltips/experiments/_only-last-answer.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..3dd2d10fc7a99af1738bac21ddaf31e40238d484
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_only-last-answer.mdx
@@ -0,0 +1,3 @@
+Enable this flag to only train on the loss of the last answer in a conversation chain.
+
+The setting is only available if *Parent Id Column* is set and *Mask Prompt Labels* is enabled.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_optimizer.mdx b/documentation/docs/tooltips/experiments/_optimizer.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..e25cd5549613d36ae4e5754e4a7c60423e1814c8
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_optimizer.mdx
@@ -0,0 +1,17 @@
+Defines the algorithm or method (optimizer) to use for model training. The selected algorithm or method defines how the model should change the attributes of the neural network, such as weights and learning rate. Optimizers solve optimization problems and make more accurate updates to attributes to reduce learning losses.
+
+
+Options:
+
+- **Adadelta**
+ - To learn about Adadelta, see ADADELTA: An Adaptive Learning Rate Method .
+- **Adam**
+ - To learn about Adam, see Adam: A Method for Stochastic Optimization .
+- **AdamW**
+ - To learn about AdamW, see Decoupled Weight Decay Regularization .
+- **AdamW8bit**
+ - To learn about AdamW, see Decoupled Weight Decay Regularization .
+- **RMSprop**
+ - To learn about RMSprop, see Neural Networks for Machine Learning .
+- **SGD**
+ - H2O LLM Studio uses a stochastic gradient descent optimizer.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_padding-quantile.mdx b/documentation/docs/tooltips/experiments/_padding-quantile.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..1c37076a118042bb07d7e9a919e79d3f189ff2af
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_padding-quantile.mdx
@@ -0,0 +1,7 @@
+Defines the padding quantile H2O LLM Studio uses to select the maximum token length per batch. H2O LLM Studio performs padding of shorter sequences up to the specified padding quantile instead of the selected **Max length**. H2O LLM Studio truncates longer sequences.
+
+- Lowering the quantile can significantly increase training runtime and reduce memory usage in unevenly distributed sequence lengths but can hurt performance
+- The setting depends on the batch size and should be adjusted accordingly
+- No padding is done in inference, and the selected **Max Length** is guaranteed
+- Setting to 0 disables padding
+- In case of distributed training, the quantile will be calculated across all GPUs
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_parent-id-column.mdx b/documentation/docs/tooltips/experiments/_parent-id-column.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..91965a236d44179e4f51ccd14fe9a43b8f4ca160
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_parent-id-column.mdx
@@ -0,0 +1 @@
+An optional column specifying the parent id to be used for chained conversations. The value of this column needs to match an additional column with the name `id`. If provided, the prompt will be concatenated after preceding parent rows.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_personalize.mdx b/documentation/docs/tooltips/experiments/_personalize.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..3e73ff4b880bee452675978583333dbec6f06891
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_personalize.mdx
@@ -0,0 +1 @@
+When active, this will replace certain words from the dataset to personalize the chatbot. Allows you to set a "Chatbot Name" and a "Chatbot Author". This setting is only useful in conjunction with the default dataset.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_pretrained-weights.mdx b/documentation/docs/tooltips/experiments/_pretrained-weights.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..06eb508069bc822e9decbeb6a256641c79f0f755
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_pretrained-weights.mdx
@@ -0,0 +1 @@
+Allows you to specify a local path to the pretrained weights.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_problem-type.mdx b/documentation/docs/tooltips/experiments/_problem-type.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..a34cd1e20203401b0bd1a6e449c82e0aee0307b3
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_problem-type.mdx
@@ -0,0 +1,12 @@
+Defines the problem type of the experiment, which also defines the settings H2O LLM Studio displays for the experiment.
+
+- Causal Language Modeling: Used to fine-tune large language models
+
+- Causal Classification Modeling: Used to fine-tune causal classification models
+
+- Causal Regression Modeling: Used to fine-tune causal regression models
+
+- Sequence To Sequence Modeling: Used to fine-tune large sequence to sequence models
+
+- DPO Modeling: Used to fine-tune large language models using Direct Preference Optimization
+
diff --git a/documentation/docs/tooltips/experiments/_prompt-column-separator.mdx b/documentation/docs/tooltips/experiments/_prompt-column-separator.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..d96f3d85d1812827db9ca3833f1c7664a1920147
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_prompt-column-separator.mdx
@@ -0,0 +1 @@
+If multiple prompt columns are selected, the columns are concatenated with the separator defined here. If only a single prompt column is selected, this setting is ignored.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_prompt-column.mdx b/documentation/docs/tooltips/experiments/_prompt-column.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..a7377020e9237185eed1e5654ddfc45e87da0a96
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_prompt-column.mdx
@@ -0,0 +1 @@
+One column or multiple columns in the dataset containing the user prompt. If multiple columns are selected, the columns are concatenated with a separator defined in **Prompt Column Separator**.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_random-parent-probability.mdx b/documentation/docs/tooltips/experiments/_random-parent-probability.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..6b182feb90f8a077768afaebc9ac6057aa9cf6ca
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_random-parent-probability.mdx
@@ -0,0 +1 @@
+While training, each sample will be concatenated to a random other sample simulating unrelated chained conversations. Can be specified without using a `Parent Column`.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_rejected-answer-column.mdx b/documentation/docs/tooltips/experiments/_rejected-answer-column.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..608d5225950e96ee7d5fcc0e03b3f5350cab5922
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_rejected-answer-column.mdx
@@ -0,0 +1,3 @@
+The column in the dataset containing the rejected response, i.e. an answer that is not preferred by the user.
+
+See https://arxiv.org/abs/2305.18290 for more details.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_rejected-prompt-column.mdx b/documentation/docs/tooltips/experiments/_rejected-prompt-column.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..74a211d0e24056d1fc9da540b1b0bb2beaf5ba12
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_rejected-prompt-column.mdx
@@ -0,0 +1 @@
+The column in the dataset containing the user prompt for the rejected answer. By default this can be set to None to take the same prompt as for the accepted answer and should only be changed if the accepted and rejected answers exhibit different prompts, such as when using KTOPairLoss.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_repetition-penalty.mdx b/documentation/docs/tooltips/experiments/_repetition-penalty.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..e552202e88437b15584a8c287791cd11787f141f
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_repetition-penalty.mdx
@@ -0,0 +1 @@
+The parameter for repetition penalty. 1.0 means no penalty. See https://arxiv.org/pdf/1909.05858.pdf for more details.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_save-checkpoint.mdx b/documentation/docs/tooltips/experiments/_save-checkpoint.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..2a6fd971e7a805b953eb4dd558ea0e1c903a2788
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_save-checkpoint.mdx
@@ -0,0 +1,15 @@
+Specifies how H2O LLM Studio should save the model checkpoints.
+
+When set to **Last** it will always save the last checkpoint, this is the recommended setting.
+
+When set to **Best** it saves the model weights for the epoch exhibiting the best validation metric.
+- This setting should be turned on with care as it has the potential to lead to overfitting of the validation data.
+- The default goal should be to attempt to tune models so that the last epoch is the best epoch.
+- Suppose an evident decline for later epochs is observed in logging. In that case, it is usually better to adjust hyperparameters, such as reducing the number of epochs or increasing regularization, instead of turning this setting on.
+
+When set to **Each evaluation epoch** it will save the model weights for each evaluation epoch.
+- This can be useful for debugging and experimenting, but will consume more disk space.
+- Models uploaded to Hugging Face Hub will only contain the last checkpoint.
+- Local downloads will contain all checkpoints.
+
+When set to **Disable** it will not save the checkpoint at all. This can be useful for debugging and experimenting in order to save disk space, but will disable certain functionalities like chatting or pushing to HF.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_schedule.mdx b/documentation/docs/tooltips/experiments/_schedule.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..fac04d46c358b8a14bb8d082b0d5290718135970
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_schedule.mdx
@@ -0,0 +1,10 @@
+Defines the learning rate schedule H2O LLM Studio utilizes during model training. Specifying a learning rate schedule prevents the learning rate from staying the same. Instead, a learning rate schedule causes the learning rate to change over iterations, typically decreasing the learning rate to achieve a better model performance and training convergence.
+
+
+Options
+- **Constant**
+ - H2O LLM Studio applies a constant learning rate during the training process.
+- **Cosine**
+ - H2O LLM Studio applies a cosine learning rate that follows the values of the cosine function.
+- **Linear**
+ - H2O LLM Studio applies a linear learning rate that decreases the learning rate linearly.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_seed.mdx b/documentation/docs/tooltips/experiments/_seed.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..a4bbf1ac81f24abc670753796e4998b4b8cd8483
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_seed.mdx
@@ -0,0 +1 @@
+Defines the random seed value that H2O LLM Studio uses during model training. It defaults to -1, an arbitrary value. When the value is modified (not -1), the random seed allows results to be reproducible—defining a seed aids in obtaining predictable and repeatable results every time. Otherwise, not modifying the default seed value (-1) leads to random numbers at every invocation.
diff --git a/documentation/docs/tooltips/experiments/_skip-parent-probability.mdx b/documentation/docs/tooltips/experiments/_skip-parent-probability.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..d74818aa8300ee8dfdf1a5563ac617825738cccb
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_skip-parent-probability.mdx
@@ -0,0 +1 @@
+If `Parent Column` is set, this random augmentation will skip parent concatenation during training at each parent with this specified probability.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_stop-tokens.mdx b/documentation/docs/tooltips/experiments/_stop-tokens.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..0dec2c397ed19770c3996394cd236525644368f4
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_stop-tokens.mdx
@@ -0,0 +1 @@
+Will stop generation at occurrence of these additional tokens; multiple tokens should be split by comma `,`.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_system-column.mdx b/documentation/docs/tooltips/experiments/_system-column.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..0d9cec6d5303aee003524944c0c8ef1184287fa6
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_system-column.mdx
@@ -0,0 +1 @@
+The column in the dataset containing the system input which is always prepended for a full sample.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_temperature.mdx b/documentation/docs/tooltips/experiments/_temperature.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..2d3ef7b81fc36d4bc7950af8cd0a83584f16858e
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_temperature.mdx
@@ -0,0 +1,4 @@
+Defines the temperature to use for sampling from the next token distribution during validation and inference. In other words, the defined temperature controls the randomness of predictions by scaling the logits before applying softmax . A higher temperature makes the distribution more random.
+
+- Modify the temperature value if you have the **Do Sample** setting enabled (**On**).
+- To learn more about this setting, refer to the following article: How to generate text: using different decoding methods for language generation with Transformers .
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_text-answer-separator.mdx b/documentation/docs/tooltips/experiments/_text-answer-separator.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..d02df5c704554e26c7d406e86aeea104f731c987
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_text-answer-separator.mdx
@@ -0,0 +1 @@
+Optional text to append to each prompt / prepend to each answer.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_text-prompt-start.mdx b/documentation/docs/tooltips/experiments/_text-prompt-start.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..72a489d85664fc6bfb4974deaf630c563ae388b1
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_text-prompt-start.mdx
@@ -0,0 +1 @@
+Optional text to prepend to each prompt.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_text-system-start.mdx b/documentation/docs/tooltips/experiments/_text-system-start.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..8fd08b2931e4662d260476a36f40cf5604f8d99f
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_text-system-start.mdx
@@ -0,0 +1 @@
+Optional text to prepend to each system input.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_token-mask-probability.mdx b/documentation/docs/tooltips/experiments/_token-mask-probability.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..155b44051e452ee10e7874d88555ef6119bd4534
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_token-mask-probability.mdx
@@ -0,0 +1,4 @@
+Defines the random probability of the input text tokens to be randomly masked during training.
+
+- Increasing this setting can be helpful to avoid overfitting and apply regularization
+- Each token is randomly replaced by a masking token based on the specified probability
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_tokenizer-kwargs.mdx b/documentation/docs/tooltips/experiments/_tokenizer-kwargs.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..49cbbacac2b4939e42f875f6423ac739e44922af
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_tokenizer-kwargs.mdx
@@ -0,0 +1 @@
+Custom tokenizer arguments specified as json string. Should be only changed if you know what you are doing in order to adapt to specific use cases or models.
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_top-k.mdx b/documentation/docs/tooltips/experiments/_top-k.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..f0d09dcd3c2ad3ebea33de6e1e92744f28436ec6
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_top-k.mdx
@@ -0,0 +1 @@
+If > 0, only keep the top k tokens with the highest probability (top-k filtering).
\ No newline at end of file
diff --git a/documentation/docs/tooltips/experiments/_top-p.mdx b/documentation/docs/tooltips/experiments/_top-p.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..799d48c20998ba5eec331cff238ae64dfdc1ed29
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_top-p.mdx
@@ -0,0 +1 @@
+If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering).
diff --git a/documentation/docs/tooltips/experiments/_train-dataframe.mdx b/documentation/docs/tooltips/experiments/_train-dataframe.mdx
new file mode 100644
index 0000000000000000000000000000000000000000..a0a03cf76aa940be75370a105a922d8c18b34d7f
--- /dev/null
+++ b/documentation/docs/tooltips/experiments/_train-dataframe.mdx
@@ -0,0 +1,3 @@
+Defines a `.csv` or `.pq` file containing a dataframe with training records that H2O LLM Studio uses to *train* the model.
+
+- The records are combined into mini-batches when training the model.
\ No newline at end of file
diff --git a/documentation/docs/tutorials/tutorial-1a/configure-dataset.png b/documentation/docs/tutorials/tutorial-1a/configure-dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..34f0cc6991dfc1423321018ac75cd57274f6f330
--- /dev/null
+++ b/documentation/docs/tutorials/tutorial-1a/configure-dataset.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e1efdcfc87492eda89533759ef8004037a355ac4a580231fefcd4c01bda40213
+size 195864
diff --git a/documentation/docs/tutorials/tutorial-1a/dataset-settings.png b/documentation/docs/tutorials/tutorial-1a/dataset-settings.png
new file mode 100644
index 0000000000000000000000000000000000000000..55c38d4732b23e0f5f441b02624aeb805bcfa489
Binary files /dev/null and b/documentation/docs/tutorials/tutorial-1a/dataset-settings.png differ
diff --git a/documentation/docs/tutorials/tutorial-1a/evaluate-experiment.png b/documentation/docs/tutorials/tutorial-1a/evaluate-experiment.png
new file mode 100644
index 0000000000000000000000000000000000000000..6c9039ba34b0c40330553ed0a7d66635310ad9d1
--- /dev/null
+++ b/documentation/docs/tutorials/tutorial-1a/evaluate-experiment.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:7f47b4bd6a2f5736a2677b98ca1577359d744fcb156c669b2aa4371d0826e638
+size 318237
diff --git a/documentation/docs/tutorials/tutorial-1a/experiment-name.png b/documentation/docs/tutorials/tutorial-1a/experiment-name.png
new file mode 100644
index 0000000000000000000000000000000000000000..907340e54afa87fd48acdc6e29e4927461c7a434
Binary files /dev/null and b/documentation/docs/tutorials/tutorial-1a/experiment-name.png differ
diff --git a/documentation/docs/tutorials/tutorial-1a/import-dataset.png b/documentation/docs/tutorials/tutorial-1a/import-dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..89c9f31476ea4638b336ac85daa294da6ab029a1
--- /dev/null
+++ b/documentation/docs/tutorials/tutorial-1a/import-dataset.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c27c5b50ce56a6d456ebfcbb56bdc6556424c5b9f636ec2a7f22340fd2c93912
+size 179150
diff --git a/documentation/docs/tutorials/tutorial-1a/prediction-settings.png b/documentation/docs/tutorials/tutorial-1a/prediction-settings.png
new file mode 100644
index 0000000000000000000000000000000000000000..341a31246a4be08f8fd45a9e9e87d8f256bc0611
Binary files /dev/null and b/documentation/docs/tutorials/tutorial-1a/prediction-settings.png differ
diff --git a/documentation/docs/tutorials/tutorial-1a/training-settings.png b/documentation/docs/tutorials/tutorial-1a/training-settings.png
new file mode 100644
index 0000000000000000000000000000000000000000..b7dec2e95bdf9b2412a2238cd3ae4399cd0a6262
Binary files /dev/null and b/documentation/docs/tutorials/tutorial-1a/training-settings.png differ
diff --git a/documentation/docs/tutorials/tutorial-1a/tutorial-1a.md b/documentation/docs/tutorials/tutorial-1a/tutorial-1a.md
new file mode 100644
index 0000000000000000000000000000000000000000..b73ef4078d57ab0404a782b39b2787313ddfd795
--- /dev/null
+++ b/documentation/docs/tutorials/tutorial-1a/tutorial-1a.md
@@ -0,0 +1,78 @@
+---
+description: This tutorial will guide you through the process of setting up and conducting an experiment for causal classification modeling using H2O LLM Studio. It covers how to import datasets, configure key experiment settings, and create a new experiment.
+---
+
+import Icon from "@material-ui/core/Icon";
+
+# Create an experiment for causal classification modeling
+
+## Overview
+
+This tutorial will guide you through the process of setting up and conducting an experiment for causal classification modeling [problem type](../../guide/experiments/supported-problem-types.md) using H2O LLM Studio. It covers how to import datasets, configure key experiment settings, and create a new experiment. By following these steps, you will learn how to design experiments that can identify causal relationships in classification tasks.
+
+## Objectives
+
+1. Learn how to import datasets from Hugging Face into H2O LLM Studio.
+2. Set up an experiment for causal classification modeling with appropriate parameters.
+
+## Prerequisites
+
+1. Access to the latest version of H2O LLM Studio.
+2. Basic understanding of classification and causal models.
+
+## Step 1: Import dataset
+
+For this tutorial, we'll use the IMDb movie review dataset on Hugging Face. The dataset contains 25,000 movie reviews for training, each labeled as either positive or negative. Let's import the dataset.
+
+1. Click on **Import dataset**.
+2. Select **Hugging Face** as the data source from the **Source** dropdown.
+3. In the **Hugging Face dataset** field, enter `stanfordnlp/imdb`.
+4. In the **Split** field, enter `train`.
+5. Click **Continue**.
+
+
+
+## Step 2: Configure dataset
+
+In this step, we'll review and adjust the dataset settings for our experiment.
+
+1. In the **Dataset name** field, enter `classification`.
+2. In the **Problem type** dropdown, select **Causal classification modeling**.
+ :::info note
+ If the dataset is configured correctly, the **Causal classification modeling** problem type will be pre-selected automatically.
+ :::
+3. In the **Train dataframe** dropdown, leave the default train dataframe as `imdb_train.pq`.
+4. In the **Validation dataframe** dropdown, leave the default value as `None`.
+5. In the **Prompt column** dropdown, select **Text**.
+6. In the **Answer column** dropdown, select **Label**.
+7. Click **Continue**.
+
+8. In the **Sample data visualization** page, click **Continue** if the input data and labels appear correctly.
+
+## Step 3: Create a new experiment
+
+Now that the dataset is imported, it's time to start a new experiment for causal classification modeling.
+
+1. From the **View datasets** page, click on the more_vert Kebab menu next to the `imdb_train` dataset, then select **New experiment**.
+2. In **General settings**, enter `tutorial-1a` in the **Experiment name** text box.
+ 
+3. In **Dataset settings**, set the **Data sample** to 0.1.
+4. In **Dataset settings**, set the **Num classes** to 1.
+ 
+5. In **Training settings**, select the **BinaryCrossEntrophyLoss** from the **Loss function** dropdown.
+ 
+6. In **Prediction settings**, select **LogLoss** from the **Metric** dropdown.
+ 
+7. Leave the other configurations at their default values.
+8. Click **Run experiment**.
+
+## Step 4: Evaluate experiment
+
+After successfully creating the new experiment, click on the experiment name to access the experiment tabs. These tabs provide detailed information and insights into various aspects of your experiment. For more information about the experiment tabs, see [Experiment tabs](../../guide/experiments/view-an-experiment.md#experiment-tabs).
+
+
+
+## Summary
+
+In this tutorial, we walked through the process of setting up a causal classification experiment using H2O LLM Studio. You learned how to import the IMDb dataset from Hugging Face, configure the dataset and experiment settings, and create a new experiment. With these steps, you're now ready to explore different datasets and experiment with various configurations for causal classification [problem type](../../guide/experiments/supported-problem-types.md) in H2O LLM Studio.
+
diff --git a/documentation/docs/tutorials/tutorial-2a/configure-dataset.png b/documentation/docs/tutorials/tutorial-2a/configure-dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..5565bc1784ae2b592f459f836954bafd183a12d1
--- /dev/null
+++ b/documentation/docs/tutorials/tutorial-2a/configure-dataset.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6912ffbe298a44b1d2beab610d1ba0ff65532a361fe0a0da570837b43604bf7c
+size 201165
diff --git a/documentation/docs/tutorials/tutorial-2a/dataset-settings.png b/documentation/docs/tutorials/tutorial-2a/dataset-settings.png
new file mode 100644
index 0000000000000000000000000000000000000000..1c03fb1441a8c589a2c3b37d14b2475b22c3cab7
Binary files /dev/null and b/documentation/docs/tutorials/tutorial-2a/dataset-settings.png differ
diff --git a/documentation/docs/tutorials/tutorial-2a/evaluate-experiment.png b/documentation/docs/tutorials/tutorial-2a/evaluate-experiment.png
new file mode 100644
index 0000000000000000000000000000000000000000..8b734f28f2a2cd4adcadd6c16f3b51977592838f
--- /dev/null
+++ b/documentation/docs/tutorials/tutorial-2a/evaluate-experiment.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:10d8293c310958605796801b982c539d7cc9a98a6e078ec0fc02201f19e67da8
+size 341298
diff --git a/documentation/docs/tutorials/tutorial-2a/general-settings.png b/documentation/docs/tutorials/tutorial-2a/general-settings.png
new file mode 100644
index 0000000000000000000000000000000000000000..804cded140d45f64e4f8f0a39e251da8b7424821
Binary files /dev/null and b/documentation/docs/tutorials/tutorial-2a/general-settings.png differ
diff --git a/documentation/docs/tutorials/tutorial-2a/import-dataset.png b/documentation/docs/tutorials/tutorial-2a/import-dataset.png
new file mode 100644
index 0000000000000000000000000000000000000000..80e62472e539ad8b411a4cb0a341aebc25ccce3f
--- /dev/null
+++ b/documentation/docs/tutorials/tutorial-2a/import-dataset.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2f5b9914c5ba63b587f20ed76136f3c6c3ada9d4f7c344973415cd1af428aead
+size 179093
diff --git a/documentation/docs/tutorials/tutorial-2a/prediction-settings.png b/documentation/docs/tutorials/tutorial-2a/prediction-settings.png
new file mode 100644
index 0000000000000000000000000000000000000000..42d388d94d7684f170e7c608bd4ec4467a98c36a
Binary files /dev/null and b/documentation/docs/tutorials/tutorial-2a/prediction-settings.png differ
diff --git a/documentation/docs/tutorials/tutorial-2a/training-settings.png b/documentation/docs/tutorials/tutorial-2a/training-settings.png
new file mode 100644
index 0000000000000000000000000000000000000000..a50bbf93ba036e50f92a4ef4c71a4882a3cb5202
Binary files /dev/null and b/documentation/docs/tutorials/tutorial-2a/training-settings.png differ
diff --git a/documentation/docs/tutorials/tutorial-2a/tutorial-2a.md b/documentation/docs/tutorials/tutorial-2a/tutorial-2a.md
new file mode 100644
index 0000000000000000000000000000000000000000..ce942094fc8491016396bbb28966b5fb4b925938
--- /dev/null
+++ b/documentation/docs/tutorials/tutorial-2a/tutorial-2a.md
@@ -0,0 +1,73 @@
+---
+description: This tutorial will guide you through the process of setting up and conducting an experiment for causal regression modeling using H2O LLM Studio. It covers how to import datasets from Hugging Face, configure key experiment settings, and create a new experiment.
+---
+
+import Icon from "@material-ui/core/Icon";
+
+# Create an experiment for causal regression modeling
+
+## Overview
+
+This tutorial will guide you through the process of setting up and conducting an experiment for causal regression modeling [problem type](../../guide/experiments/supported-problem-types.md) using H2O LLM Studio. It covers how to import datasets from Hugging Face, configure key experiment settings, and create a new experiment. By following these steps, you will learn how to design experiments that can identify causal relationships in regression tasks.
+
+## Objectives
+
+1. Learn how to import datasets from Hugging Face into H2O LLM Studio.
+2. Set up an experiment for causal regression modeling with appropriate parameters.
+
+## Prerequisites
+
+1. Access to the latest version of H2O LLM Studio.
+2. Basic understanding of regression and causal models.
+
+## Step 1: Import dataset
+
+For this tutorial, we'll use the open-source **Helpfulness Dataset (CC-BY-4.0)** from Hugging Face. The dataset contains 21, 362 samples, each containing a prompt, a response, and five human-annotated attributes of the response, each ranging between 0 and 4 where higher means better for each attribute.
+
+1. Click on **Import dataset**.
+2. Select **Hugging Face** as the data source from the **Source** dropdown.
+3. In the **Hugging Face dataset** field, enter `nvidia/HelpSteer2`.
+4. In the **Split** field, enter `train`.
+5. Click **Continue**.
+
+
+
+## Step 2: Configure dataset
+
+In this step, we'll review and adjust the dataset settings for our experiment.
+
+1. In the **Dataset name** field, enter `regression`.
+2. In the **Problem type** dropdown, select **Causal regression modeling**.
+3. In the **Train dataframe** dropdown, leave the default train dataframe.
+4. In the **Validation dataframe** dropdown, leave the default validation dataframe.
+5. In the **Prompt column** dropdown, select **Prompt**.
+6. In the **Answer column** dropdown, select **helpfulness**.
+7. Click **Continue**.
+ 
+8. On the **Sample data visualization** page, click **Continue** if the input data and labels appear correctly.
+
+## Step 3: Create a new experiment
+
+Now that the dataset is imported, let's create a new experiment for causal regression modeling.
+
+1. From the **View datasets** page, click on the more_vert Kebab menu next to the `regression` dataset, then select **New experiment**.
+2. In **General settings**, enter `tutorial-2a` in the **Experiment name** text box.
+ 
+3. In **Dataset settings**, set the **Data Sample** to 0.1.
+ 
+4. In **Training settings**, select the **MSELoss** from the **Loss function** dropdown.
+ 
+5. In **Prediction settings**, select **MSE** from the **Metric** dropdown.
+ 
+6. Leave the other configurations at their default values.
+7. Click **Run experiment**.
+
+## Step 4: Evaluate experiment
+
+After successfully creating the new experiment, click on the experiment name to access the experiment tabs. These tabs provide detailed information and insights into various aspects of your experiment. For more information about the experiment tabs, see [Experiment tabs](../../guide/experiments/view-an-experiment.md#experiment-tabs).
+
+
+
+## Summary
+
+In this tutorial, we covered the process of setting up a causal regression experiment using H2O LLM Studio. You learned how to import a dataset from Hugging Face, configure both dataset and experiment settings, and create a new experiment. With these steps, you're now ready to explore other datasets and experiment with various configurations for causal regression [problem type](../../guide/experiments/supported-problem-types.md) in H2O LLM Studio.
\ No newline at end of file
diff --git a/documentation/makersaurus.config.js b/documentation/makersaurus.config.js
new file mode 100644
index 0000000000000000000000000000000000000000..b7525b34d7d02e51ef7e98d5cef184ab378040ec
--- /dev/null
+++ b/documentation/makersaurus.config.js
@@ -0,0 +1,27 @@
+module.exports = {
+ title: "H2O LLM Studio | Docs",
+ tagline: "This code for this site was generated by Makersaurus",
+ url: "https://docs.h2o.ai",
+ baseUrl: "/h2o-llmstudio/",
+ projectName: "h2o-llmstudio", // Usually your repo name
+ feedbackAssignee: "sherenem", // Should be a github username
+ feedbackLabels: ["llmstudio"],
+ searchFilter: "h2o-llmstudio",
+ showLastUpdateAuthor: false,
+ includeCurrentVersion: true,
+ versions: {
+ current: {
+ label: " ",
+ path: "/",
+ },
+ },
+ dependencies: {
+ "@emotion/react": "^11.10.5",
+ "@emotion/styled": "^11.10.5",
+ "@material-ui/core": "^4.12.4",
+ "@material/card": "^14.0.0",
+ "@mui/icons-material": "^5.10.16",
+ "@mui/material": "^5.10.16",
+ "react-player": "^2.11.0",
+ },
+};
diff --git a/documentation/package-lock.json b/documentation/package-lock.json
new file mode 100644
index 0000000000000000000000000000000000000000..8d1bacbfee5a29960bf42801ab54fdec03d82468
--- /dev/null
+++ b/documentation/package-lock.json
@@ -0,0 +1,20683 @@
+{
+ "name": "h2o-llmstudio-docs",
+ "version": "0.0.0",
+ "lockfileVersion": 2,
+ "requires": true,
+ "packages": {
+ "": {
+ "name": "h2o-llmstudio-docs",
+ "version": "0.0.0",
+ "dependencies": {
+ "@h2oai/makersaurus": "^0.8.4",
+ "docusaurus-plugin-includes": "^1.1.4"
+ }
+ },
+ "node_modules/@ampproject/remapping": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz",
+ "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.0",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/code-frame": {
+ "version": "7.24.7",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz",
+ "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/highlight": "^7.24.7",
+ "picocolors": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/compat-data": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.5.tgz",
+ "integrity": "sha512-4Jc/YuIaYqKnDDz892kPIledykKg12Aw1PYX5i/TY28anJtacvM1Rrr8wbieB9GfEJwlzqT0hUEao0CxEebiDA==",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/core": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.5.tgz",
+ "integrity": "sha512-SBuTAjg91A3eKOvD+bPEz3LlhHZRNu1nFOVts9lzDJTXshHTjII0BAtDS3Y2DAkdZdDKWVZGVwkDfc4Clxn1dg==",
+ "dependencies": {
+ "@ampproject/remapping": "^2.2.0",
+ "@babel/code-frame": "^7.22.5",
+ "@babel/generator": "^7.22.5",
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helpers": "^7.22.5",
+ "@babel/parser": "^7.22.5",
+ "@babel/template": "^7.22.5",
+ "@babel/traverse": "^7.22.5",
+ "@babel/types": "^7.22.5",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.2",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@babel/core/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/@babel/generator": {
+ "version": "7.25.6",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.25.6.tgz",
+ "integrity": "sha512-VPC82gr1seXOpkjAAKoLhP50vx4vGNlF4msF64dSFq1P8RfB+QAuJWGHPXXPc8QyfVWwwB/TNNU4+ayZmHNbZw==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.25.6",
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.25",
+ "jsesc": "^2.5.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-annotate-as-pure": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz",
+ "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==",
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-builder-binary-assignment-operator-visitor": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.5.tgz",
+ "integrity": "sha512-m1EP3lVOPptR+2DwD125gziZNcmoNSHGmJROKoy87loWUQyJaVXDgpmruWqDARZSmtYQ+Dl25okU8+qhVzuykw==",
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.5.tgz",
+ "integrity": "sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw==",
+ "dependencies": {
+ "@babel/compat-data": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.5",
+ "browserslist": "^4.21.3",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-compilation-targets/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/@babel/helper-create-class-features-plugin": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.5.tgz",
+ "integrity": "sha512-xkb58MyOYIslxu3gKmVXmjTtUPvBU4odYzbiIQbWwLKIHCsx6UGZGX6F1IznMFVnDdirseUZopzN+ZRt8Xb33Q==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-member-expression-to-functions": "^7.22.5",
+ "@babel/helper-optimise-call-expression": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.5",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/@babel/helper-create-regexp-features-plugin": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.5.tgz",
+ "integrity": "sha512-1VpEFOIbMRaXyDeUwUfmTIxExLwQ+zkW+Bh5zXpApA3oQedBx9v/updixWxnx/bZpKw7u8VxWjb/qWpIcmPq8A==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "regexpu-core": "^5.3.1",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/@babel/helper-define-polyfill-provider": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.0.tgz",
+ "integrity": "sha512-RnanLx5ETe6aybRi1cO/edaRH+bNYWaryCEmjDDYyNr4wnSzyOp8T0dWipmqVHKEY3AbVKUom50AKSlj1zmKbg==",
+ "dependencies": {
+ "@babel/helper-compilation-targets": "^7.17.7",
+ "@babel/helper-plugin-utils": "^7.16.7",
+ "debug": "^4.1.1",
+ "lodash.debounce": "^4.0.8",
+ "resolve": "^1.14.2",
+ "semver": "^6.1.2"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.4.0-0"
+ }
+ },
+ "node_modules/@babel/helper-define-polyfill-provider/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/@babel/helper-environment-visitor": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz",
+ "integrity": "sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q==",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-function-name": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz",
+ "integrity": "sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==",
+ "dependencies": {
+ "@babel/template": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-hoist-variables": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz",
+ "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==",
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-member-expression-to-functions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.22.5.tgz",
+ "integrity": "sha512-aBiH1NKMG0H2cGZqspNvsaBe6wNGjbJjuLy29aU+eDZjSbbN53BaxlpB02xm9v34pLTZ1nIQPFYn2qMZoa5BQQ==",
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-imports": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz",
+ "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==",
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-module-transforms": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.5.tgz",
+ "integrity": "sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==",
+ "dependencies": {
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-module-imports": "^7.22.5",
+ "@babel/helper-simple-access": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.5",
+ "@babel/helper-validator-identifier": "^7.22.5",
+ "@babel/template": "^7.22.5",
+ "@babel/traverse": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-optimise-call-expression": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz",
+ "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==",
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-plugin-utils": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz",
+ "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg==",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-remap-async-to-generator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.5.tgz",
+ "integrity": "sha512-cU0Sq1Rf4Z55fgz7haOakIyM7+x/uCFwXpLPaeRzfoUtAEAuUZjZvFPjL/rk5rW693dIgn2hng1W7xbT7lWT4g==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-wrap-function": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/helper-replace-supers": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.5.tgz",
+ "integrity": "sha512-aLdNM5I3kdI/V9xGNyKSF3X/gTyMUBohTZ+/3QdQKAA9vxIiy12E+8E2HoOP1/DjeqU+g6as35QHJNMDDYpuCg==",
+ "dependencies": {
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-member-expression-to-functions": "^7.22.5",
+ "@babel/helper-optimise-call-expression": "^7.22.5",
+ "@babel/template": "^7.22.5",
+ "@babel/traverse": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-simple-access": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz",
+ "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==",
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-skip-transparent-expression-wrappers": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz",
+ "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==",
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-split-export-declaration": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.5.tgz",
+ "integrity": "sha512-thqK5QFghPKWLhAV321lxF95yCg2K3Ob5yw+M3VHWfdia0IkPXUtoLH8x/6Fh486QUvzhb8YOWHChTVen2/PoQ==",
+ "dependencies": {
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-string-parser": {
+ "version": "7.24.8",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.8.tgz",
+ "integrity": "sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-identifier": {
+ "version": "7.24.7",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz",
+ "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-validator-option": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz",
+ "integrity": "sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw==",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helper-wrap-function": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.5.tgz",
+ "integrity": "sha512-bYqLIBSEshYcYQyfks8ewYA8S30yaGSeRslcvKMvoUk6HHPySbxHq9YRi6ghhzEU+yhQv9bP/jXnygkStOcqZw==",
+ "dependencies": {
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/template": "^7.22.5",
+ "@babel/traverse": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/helpers": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.5.tgz",
+ "integrity": "sha512-pSXRmfE1vzcUIDFQcSGA5Mr+GxBV9oiRKDuDxXvWQQBCh8HoIjs/2DlDB7H8smac1IVrB9/xdXj2N3Wol9Cr+Q==",
+ "dependencies": {
+ "@babel/template": "^7.22.5",
+ "@babel/traverse": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/highlight": {
+ "version": "7.24.7",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz",
+ "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-validator-identifier": "^7.24.7",
+ "chalk": "^2.4.2",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "license": "MIT",
+ "dependencies": {
+ "color-convert": "^1.9.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "license": "MIT",
+ "dependencies": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "license": "MIT",
+ "dependencies": {
+ "color-name": "1.1.3"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==",
+ "license": "MIT"
+ },
+ "node_modules/@babel/highlight/node_modules/escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/highlight/node_modules/supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "license": "MIT",
+ "dependencies": {
+ "has-flag": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/@babel/parser": {
+ "version": "7.25.6",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.25.6.tgz",
+ "integrity": "sha512-trGdfBdbD0l1ZPmcJ83eNxB9rbEax4ALFTF7fN386TMYbeCQbyme5cOEXQhbGXKebwGaB/J52w1mrklMcbgy6Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/types": "^7.25.6"
+ },
+ "bin": {
+ "parser": "bin/babel-parser.js"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.5.tgz",
+ "integrity": "sha512-NP1M5Rf+u2Gw9qfSO4ihjcTGW5zXTi36ITLd4/EoAcEhIZ0yjMqmftDNl3QC19CX7olhrjpyU454g/2W7X0jvQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.5.tgz",
+ "integrity": "sha512-31Bb65aZaUwqCbWMnZPduIZxCBngHFlzyN6Dq6KAJjtx+lx6ohKHubc61OomYi7XwVD4Ol0XCVz4h+pYFR048g==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/plugin-transform-optional-chaining": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.13.0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-object-rest-spread": {
+ "version": "7.12.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz",
+ "integrity": "sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==",
+ "deprecated": "This proposal has been merged to the ECMAScript standard and thus this plugin is no longer maintained. Please use @babel/plugin-transform-object-rest-spread instead.",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.0",
+ "@babel/plugin-transform-parameters": "^7.12.1"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-private-property-in-object": {
+ "version": "7.21.0-placeholder-for-preset-env.2",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz",
+ "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==",
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-proposal-unicode-property-regex": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz",
+ "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.18.6",
+ "@babel/helper-plugin-utils": "^7.18.6"
+ },
+ "engines": {
+ "node": ">=4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-async-generators": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
+ "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-class-properties": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz",
+ "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-class-static-block": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz",
+ "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-dynamic-import": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz",
+ "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-export-namespace-from": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz",
+ "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-import-assertions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz",
+ "integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-import-attributes": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz",
+ "integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-import-meta": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz",
+ "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-json-strings": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
+ "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-jsx": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz",
+ "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-logical-assignment-operators": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz",
+ "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-nullish-coalescing-operator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
+ "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-numeric-separator": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz",
+ "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-object-rest-spread": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
+ "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-optional-catch-binding": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
+ "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-optional-chaining": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
+ "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-private-property-in-object": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz",
+ "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-top-level-await": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz",
+ "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-typescript": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz",
+ "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-syntax-unicode-sets-regex": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz",
+ "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.18.6",
+ "@babel/helper-plugin-utils": "^7.18.6"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-arrow-functions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz",
+ "integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-async-generator-functions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.22.5.tgz",
+ "integrity": "sha512-gGOEvFzm3fWoyD5uZq7vVTD57pPJ3PczPUD/xCFGjzBpUosnklmXyKnGQbbbGs1NPNPskFex0j93yKbHt0cHyg==",
+ "dependencies": {
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-remap-async-to-generator": "^7.22.5",
+ "@babel/plugin-syntax-async-generators": "^7.8.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-async-to-generator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz",
+ "integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-remap-async-to-generator": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-block-scoped-functions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz",
+ "integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-block-scoping": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.22.5.tgz",
+ "integrity": "sha512-EcACl1i5fSQ6bt+YGuU/XGCeZKStLmyVGytWkpyhCLeQVA0eu6Wtiw92V+I1T/hnezUv7j74dA/Ro69gWcU+hg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-class-properties": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz",
+ "integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==",
+ "dependencies": {
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-class-static-block": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.5.tgz",
+ "integrity": "sha512-SPToJ5eYZLxlnp1UzdARpOGeC2GbHvr9d/UV0EukuVx8atktg194oe+C5BqQ8jRTkgLRVOPYeXRSBg1IlMoVRA==",
+ "dependencies": {
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-class-static-block": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.12.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-classes": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.5.tgz",
+ "integrity": "sha512-2edQhLfibpWpsVBx2n/GKOz6JdGQvLruZQfGr9l1qes2KQaWswjBzhQF7UDUZMNaMMQeYnQzxwOMPsbYF7wqPQ==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-optimise-call-expression": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.5",
+ "globals": "^11.1.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-computed-properties": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz",
+ "integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/template": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-destructuring": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.22.5.tgz",
+ "integrity": "sha512-GfqcFuGW8vnEqTUBM7UtPd5A4q797LTvvwKxXTgRsFjoqaJiEg9deBG6kWeQYkVEL569NpnmpC0Pkr/8BLKGnQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-dotall-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz",
+ "integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-duplicate-keys": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz",
+ "integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-dynamic-import": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.5.tgz",
+ "integrity": "sha512-0MC3ppTB1AMxd8fXjSrbPa7LT9hrImt+/fcj+Pg5YMD7UQyWp/02+JWpdnCymmsXwIx5Z+sYn1bwCn4ZJNvhqQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-exponentiation-operator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz",
+ "integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==",
+ "dependencies": {
+ "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-export-namespace-from": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.5.tgz",
+ "integrity": "sha512-X4hhm7FRnPgd4nDA4b/5V280xCx6oL7Oob5+9qVS5C13Zq4bh1qq7LU0GgRU6b5dBWBvhGaXYVB4AcN6+ol6vg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-export-namespace-from": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-for-of": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.5.tgz",
+ "integrity": "sha512-3kxQjX1dU9uudwSshyLeEipvrLjBCVthCgeTp6CzE/9JYrlAIaeekVxRpCWsDDfYTfRZRoCeZatCQvwo+wvK8A==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-function-name": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz",
+ "integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==",
+ "dependencies": {
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-json-strings": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.5.tgz",
+ "integrity": "sha512-DuCRB7fu8MyTLbEQd1ew3R85nx/88yMoqo2uPSjevMj3yoN7CDM8jkgrY0wmVxfJZyJ/B9fE1iq7EQppWQmR5A==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-json-strings": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz",
+ "integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-logical-assignment-operators": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.5.tgz",
+ "integrity": "sha512-MQQOUW1KL8X0cDWfbwYP+TbVbZm16QmQXJQ+vndPtH/BoO0lOKpVoEDMI7+PskYxH+IiE0tS8xZye0qr1lGzSA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-member-expression-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz",
+ "integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-amd": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.22.5.tgz",
+ "integrity": "sha512-R+PTfLTcYEmb1+kK7FNkhQ1gP4KgjpSO6HfH9+f8/yfp2Nt3ggBjiVpRwmwTlfqZLafYKJACy36yDXlEmI9HjQ==",
+ "dependencies": {
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-commonjs": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.22.5.tgz",
+ "integrity": "sha512-B4pzOXj+ONRmuaQTg05b3y/4DuFz3WcCNAXPLb2Q0GT0TrGKGxNKV4jwsXts+StaM0LQczZbOpj8o1DLPDJIiA==",
+ "dependencies": {
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-simple-access": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-systemjs": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.22.5.tgz",
+ "integrity": "sha512-emtEpoaTMsOs6Tzz+nbmcePl6AKVtS1yC4YNAeMun9U8YCsgadPNxnOPQ8GhHFB2qdx+LZu9LgoC0Lthuu05DQ==",
+ "dependencies": {
+ "@babel/helper-hoist-variables": "^7.22.5",
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-identifier": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-modules-umd": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz",
+ "integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==",
+ "dependencies": {
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-named-capturing-groups-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz",
+ "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-new-target": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz",
+ "integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-nullish-coalescing-operator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.5.tgz",
+ "integrity": "sha512-6CF8g6z1dNYZ/VXok5uYkkBBICHZPiGEl7oDnAx2Mt1hlHVHOSIKWJaXHjQJA5VB43KZnXZDIexMchY4y2PGdA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-numeric-separator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.5.tgz",
+ "integrity": "sha512-NbslED1/6M+sXiwwtcAB/nieypGw02Ejf4KtDeMkCEpP6gWFMX1wI9WKYua+4oBneCCEmulOkRpwywypVZzs/g==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-object-rest-spread": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.5.tgz",
+ "integrity": "sha512-Kk3lyDmEslH9DnvCDA1s1kkd3YWQITiBOHngOtDL9Pt6BZjzqb6hiOlb8VfjiiQJ2unmegBqZu0rx5RxJb5vmQ==",
+ "dependencies": {
+ "@babel/compat-data": "^7.22.5",
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-transform-parameters": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-object-super": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz",
+ "integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-optional-catch-binding": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.5.tgz",
+ "integrity": "sha512-pH8orJahy+hzZje5b8e2QIlBWQvGpelS76C63Z+jhZKsmzfNaPQ+LaW6dcJ9bxTpo1mtXbgHwy765Ro3jftmUg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-optional-chaining": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.22.5.tgz",
+ "integrity": "sha512-AconbMKOMkyG+xCng2JogMCDcqW8wedQAqpVIL4cOSescZ7+iW8utC6YDZLMCSUIReEA733gzRSaOSXMAt/4WQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-parameters": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.5.tgz",
+ "integrity": "sha512-AVkFUBurORBREOmHRKo06FjHYgjrabpdqRSwq6+C7R5iTCZOsM4QbcB27St0a4U6fffyAOqh3s/qEfybAhfivg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-private-methods": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz",
+ "integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==",
+ "dependencies": {
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-private-property-in-object": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.5.tgz",
+ "integrity": "sha512-/9xnaTTJcVoBtSSmrVyhtSvO3kbqS2ODoh2juEU72c3aYonNF0OMGiaz2gjukyKM2wBBYJP38S4JiE0Wfb5VMQ==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-private-property-in-object": "^7.14.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-property-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz",
+ "integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-constant-elements": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.22.5.tgz",
+ "integrity": "sha512-BF5SXoO+nX3h5OhlN78XbbDrBOffv+AxPP2ENaJOVqjWCgBDeOY3WcaUcddutGSfoap+5NEQ/q/4I3WZIvgkXA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-display-name": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.22.5.tgz",
+ "integrity": "sha512-PVk3WPYudRF5z4GKMEYUrLjPl38fJSKNaEOkFuoprioowGuWN6w2RKznuFNSlJx7pzzXXStPUnNSOEO0jL5EVw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-jsx": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.22.5.tgz",
+ "integrity": "sha512-rog5gZaVbUip5iWDMTYbVM15XQq+RkUKhET/IHR6oizR+JEoN6CAfTTuHcK4vwUyzca30qqHqEpzBOnaRMWYMA==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-module-imports": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-jsx": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-jsx-development": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz",
+ "integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==",
+ "dependencies": {
+ "@babel/plugin-transform-react-jsx": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-react-pure-annotations": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.22.5.tgz",
+ "integrity": "sha512-gP4k85wx09q+brArVinTXhWiyzLl9UpmGva0+mWyKxk6JZequ05x3eUcIUE+FyttPKJFRRVtAvQaJ6YF9h1ZpA==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-regenerator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.5.tgz",
+ "integrity": "sha512-rR7KePOE7gfEtNTh9Qw+iO3Q/e4DEsoQ+hdvM6QUDH7JRJ5qxq5AA52ZzBWbI5i9lfNuvySgOGP8ZN7LAmaiPw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "regenerator-transform": "^0.15.1"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-reserved-words": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz",
+ "integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-runtime": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.22.5.tgz",
+ "integrity": "sha512-bg4Wxd1FWeFx3daHFTWk1pkSWK/AyQuiyAoeZAOkAOUBjnZPH6KT7eMxouV47tQ6hl6ax2zyAWBdWZXbrvXlaw==",
+ "dependencies": {
+ "@babel/helper-module-imports": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "babel-plugin-polyfill-corejs2": "^0.4.3",
+ "babel-plugin-polyfill-corejs3": "^0.8.1",
+ "babel-plugin-polyfill-regenerator": "^0.5.0",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-runtime/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/@babel/plugin-transform-shorthand-properties": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz",
+ "integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-spread": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz",
+ "integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-sticky-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz",
+ "integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-template-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz",
+ "integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-typeof-symbol": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz",
+ "integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-typescript": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.22.5.tgz",
+ "integrity": "sha512-SMubA9S7Cb5sGSFFUlqxyClTA9zWJ8qGQrppNUm05LtFuN1ELRFNndkix4zUJrC9F+YivWwa1dHMSyo0e0N9dA==",
+ "dependencies": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-typescript": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-unicode-escapes": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.5.tgz",
+ "integrity": "sha512-biEmVg1IYB/raUO5wT1tgfacCef15Fbzhkx493D3urBI++6hpJ+RFG4SrWMn0NEZLfvilqKf3QDrRVZHo08FYg==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-unicode-property-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz",
+ "integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-unicode-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz",
+ "integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/plugin-transform-unicode-sets-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz",
+ "integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==",
+ "dependencies": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0"
+ }
+ },
+ "node_modules/@babel/preset-env": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.22.5.tgz",
+ "integrity": "sha512-fj06hw89dpiZzGZtxn+QybifF07nNiZjZ7sazs2aVDcysAZVGjW7+7iFYxg6GLNM47R/thYfLdrXc+2f11Vi9A==",
+ "dependencies": {
+ "@babel/compat-data": "^7.22.5",
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.5",
+ "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.5",
+ "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.5",
+ "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2",
+ "@babel/plugin-syntax-async-generators": "^7.8.4",
+ "@babel/plugin-syntax-class-properties": "^7.12.13",
+ "@babel/plugin-syntax-class-static-block": "^7.14.5",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3",
+ "@babel/plugin-syntax-export-namespace-from": "^7.8.3",
+ "@babel/plugin-syntax-import-assertions": "^7.22.5",
+ "@babel/plugin-syntax-import-attributes": "^7.22.5",
+ "@babel/plugin-syntax-import-meta": "^7.10.4",
+ "@babel/plugin-syntax-json-strings": "^7.8.3",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3",
+ "@babel/plugin-syntax-private-property-in-object": "^7.14.5",
+ "@babel/plugin-syntax-top-level-await": "^7.14.5",
+ "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6",
+ "@babel/plugin-transform-arrow-functions": "^7.22.5",
+ "@babel/plugin-transform-async-generator-functions": "^7.22.5",
+ "@babel/plugin-transform-async-to-generator": "^7.22.5",
+ "@babel/plugin-transform-block-scoped-functions": "^7.22.5",
+ "@babel/plugin-transform-block-scoping": "^7.22.5",
+ "@babel/plugin-transform-class-properties": "^7.22.5",
+ "@babel/plugin-transform-class-static-block": "^7.22.5",
+ "@babel/plugin-transform-classes": "^7.22.5",
+ "@babel/plugin-transform-computed-properties": "^7.22.5",
+ "@babel/plugin-transform-destructuring": "^7.22.5",
+ "@babel/plugin-transform-dotall-regex": "^7.22.5",
+ "@babel/plugin-transform-duplicate-keys": "^7.22.5",
+ "@babel/plugin-transform-dynamic-import": "^7.22.5",
+ "@babel/plugin-transform-exponentiation-operator": "^7.22.5",
+ "@babel/plugin-transform-export-namespace-from": "^7.22.5",
+ "@babel/plugin-transform-for-of": "^7.22.5",
+ "@babel/plugin-transform-function-name": "^7.22.5",
+ "@babel/plugin-transform-json-strings": "^7.22.5",
+ "@babel/plugin-transform-literals": "^7.22.5",
+ "@babel/plugin-transform-logical-assignment-operators": "^7.22.5",
+ "@babel/plugin-transform-member-expression-literals": "^7.22.5",
+ "@babel/plugin-transform-modules-amd": "^7.22.5",
+ "@babel/plugin-transform-modules-commonjs": "^7.22.5",
+ "@babel/plugin-transform-modules-systemjs": "^7.22.5",
+ "@babel/plugin-transform-modules-umd": "^7.22.5",
+ "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5",
+ "@babel/plugin-transform-new-target": "^7.22.5",
+ "@babel/plugin-transform-nullish-coalescing-operator": "^7.22.5",
+ "@babel/plugin-transform-numeric-separator": "^7.22.5",
+ "@babel/plugin-transform-object-rest-spread": "^7.22.5",
+ "@babel/plugin-transform-object-super": "^7.22.5",
+ "@babel/plugin-transform-optional-catch-binding": "^7.22.5",
+ "@babel/plugin-transform-optional-chaining": "^7.22.5",
+ "@babel/plugin-transform-parameters": "^7.22.5",
+ "@babel/plugin-transform-private-methods": "^7.22.5",
+ "@babel/plugin-transform-private-property-in-object": "^7.22.5",
+ "@babel/plugin-transform-property-literals": "^7.22.5",
+ "@babel/plugin-transform-regenerator": "^7.22.5",
+ "@babel/plugin-transform-reserved-words": "^7.22.5",
+ "@babel/plugin-transform-shorthand-properties": "^7.22.5",
+ "@babel/plugin-transform-spread": "^7.22.5",
+ "@babel/plugin-transform-sticky-regex": "^7.22.5",
+ "@babel/plugin-transform-template-literals": "^7.22.5",
+ "@babel/plugin-transform-typeof-symbol": "^7.22.5",
+ "@babel/plugin-transform-unicode-escapes": "^7.22.5",
+ "@babel/plugin-transform-unicode-property-regex": "^7.22.5",
+ "@babel/plugin-transform-unicode-regex": "^7.22.5",
+ "@babel/plugin-transform-unicode-sets-regex": "^7.22.5",
+ "@babel/preset-modules": "^0.1.5",
+ "@babel/types": "^7.22.5",
+ "babel-plugin-polyfill-corejs2": "^0.4.3",
+ "babel-plugin-polyfill-corejs3": "^0.8.1",
+ "babel-plugin-polyfill-regenerator": "^0.5.0",
+ "core-js-compat": "^3.30.2",
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/preset-env/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/@babel/preset-modules": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz",
+ "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.0.0",
+ "@babel/plugin-proposal-unicode-property-regex": "^7.4.4",
+ "@babel/plugin-transform-dotall-regex": "^7.4.4",
+ "@babel/types": "^7.4.4",
+ "esutils": "^2.0.2"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/preset-react": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.22.5.tgz",
+ "integrity": "sha512-M+Is3WikOpEJHgR385HbuCITPTaPRaNkibTEa9oiofmJvIsrceb4yp9RL9Kb+TE8LznmeyZqpP+Lopwcx59xPQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.5",
+ "@babel/plugin-transform-react-display-name": "^7.22.5",
+ "@babel/plugin-transform-react-jsx": "^7.22.5",
+ "@babel/plugin-transform-react-jsx-development": "^7.22.5",
+ "@babel/plugin-transform-react-pure-annotations": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/preset-typescript": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.22.5.tgz",
+ "integrity": "sha512-YbPaal9LxztSGhmndR46FmAbkJ/1fAsw293tSU+I5E5h+cnJ3d4GTwyUgGYmOXJYdGA+uNePle4qbaRzj2NISQ==",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.5",
+ "@babel/plugin-syntax-jsx": "^7.22.5",
+ "@babel/plugin-transform-modules-commonjs": "^7.22.5",
+ "@babel/plugin-transform-typescript": "^7.22.5"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@babel/regjsgen": {
+ "version": "0.8.0",
+ "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz",
+ "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA=="
+ },
+ "node_modules/@babel/runtime": {
+ "version": "7.21.5",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.21.5.tgz",
+ "integrity": "sha512-8jI69toZqqcsnqGGqwGS4Qb1VwLOEp4hz+CXPywcvjs60u3B4Pom/U/7rm4W8tMOYEB+E9wgD0mW1l3r8qlI9Q==",
+ "dependencies": {
+ "regenerator-runtime": "^0.13.11"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/runtime-corejs3": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.22.5.tgz",
+ "integrity": "sha512-TNPDN6aBFaUox2Lu+H/Y1dKKQgr4ucz/FGyCz67RVYLsBpVpUFf1dDngzg+Od8aqbrqwyztkaZjtWCZEUOT8zA==",
+ "dependencies": {
+ "core-js-pure": "^3.30.2",
+ "regenerator-runtime": "^0.13.11"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/template": {
+ "version": "7.25.0",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.0.tgz",
+ "integrity": "sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.24.7",
+ "@babel/parser": "^7.25.0",
+ "@babel/types": "^7.25.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/traverse": {
+ "version": "7.25.6",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.6.tgz",
+ "integrity": "sha512-9Vrcx5ZW6UwK5tvqsj0nGpp/XzqthkT0dqIc9g1AdtygFToNtTF67XzYS//dm+SAK9cp3B9R4ZO/46p63SCjlQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.24.7",
+ "@babel/generator": "^7.25.6",
+ "@babel/parser": "^7.25.6",
+ "@babel/template": "^7.25.0",
+ "@babel/types": "^7.25.6",
+ "debug": "^4.3.1",
+ "globals": "^11.1.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@babel/types": {
+ "version": "7.25.6",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.25.6.tgz",
+ "integrity": "sha512-/l42B1qxpG6RdfYf343Uw1vmDjeNhneUXtzhojE7pDgfpEypmRhI6j1kr17XCVv4Cgl9HdAiQY2x0GwKm7rWCw==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-string-parser": "^7.24.8",
+ "@babel/helper-validator-identifier": "^7.24.7",
+ "to-fast-properties": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/@colors/colors": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz",
+ "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==",
+ "optional": true,
+ "engines": {
+ "node": ">=0.1.90"
+ }
+ },
+ "node_modules/@discoveryjs/json-ext": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz",
+ "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==",
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/@docusaurus/core": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.3.tgz",
+ "integrity": "sha512-dWH5P7cgeNSIg9ufReX6gaCl/TmrGKD38Orbwuz05WPhAQtFXHd5B8Qym1TiXfvUNvwoYKkAJOJuGe8ou0Z7PA==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "^7.18.6",
+ "@babel/generator": "^7.18.7",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3",
+ "@babel/plugin-transform-runtime": "^7.18.6",
+ "@babel/preset-env": "^7.18.6",
+ "@babel/preset-react": "^7.18.6",
+ "@babel/preset-typescript": "^7.18.6",
+ "@babel/runtime": "^7.18.6",
+ "@babel/runtime-corejs3": "^7.18.6",
+ "@babel/traverse": "^7.18.8",
+ "@docusaurus/cssnano-preset": "2.4.3",
+ "@docusaurus/logger": "2.4.3",
+ "@docusaurus/mdx-loader": "2.4.3",
+ "@docusaurus/react-loadable": "5.5.2",
+ "@docusaurus/utils": "2.4.3",
+ "@docusaurus/utils-common": "2.4.3",
+ "@docusaurus/utils-validation": "2.4.3",
+ "@slorber/static-site-generator-webpack-plugin": "^4.0.7",
+ "@svgr/webpack": "^6.2.1",
+ "autoprefixer": "^10.4.7",
+ "babel-loader": "^8.2.5",
+ "babel-plugin-dynamic-import-node": "^2.3.3",
+ "boxen": "^6.2.1",
+ "chalk": "^4.1.2",
+ "chokidar": "^3.5.3",
+ "clean-css": "^5.3.0",
+ "cli-table3": "^0.6.2",
+ "combine-promises": "^1.1.0",
+ "commander": "^5.1.0",
+ "copy-webpack-plugin": "^11.0.0",
+ "core-js": "^3.23.3",
+ "css-loader": "^6.7.1",
+ "css-minimizer-webpack-plugin": "^4.0.0",
+ "cssnano": "^5.1.12",
+ "del": "^6.1.1",
+ "detect-port": "^1.3.0",
+ "escape-html": "^1.0.3",
+ "eta": "^2.0.0",
+ "file-loader": "^6.2.0",
+ "fs-extra": "^10.1.0",
+ "html-minifier-terser": "^6.1.0",
+ "html-tags": "^3.2.0",
+ "html-webpack-plugin": "^5.5.0",
+ "import-fresh": "^3.3.0",
+ "leven": "^3.1.0",
+ "lodash": "^4.17.21",
+ "mini-css-extract-plugin": "^2.6.1",
+ "postcss": "^8.4.14",
+ "postcss-loader": "^7.0.0",
+ "prompts": "^2.4.2",
+ "react-dev-utils": "^12.0.1",
+ "react-helmet-async": "^1.3.0",
+ "react-loadable": "npm:@docusaurus/react-loadable@5.5.2",
+ "react-loadable-ssr-addon-v5-slorber": "^1.0.1",
+ "react-router": "^5.3.3",
+ "react-router-config": "^5.1.1",
+ "react-router-dom": "^5.3.3",
+ "rtl-detect": "^1.0.4",
+ "semver": "^7.3.7",
+ "serve-handler": "^6.1.3",
+ "shelljs": "^0.8.5",
+ "terser-webpack-plugin": "^5.3.3",
+ "tslib": "^2.4.0",
+ "update-notifier": "^5.1.0",
+ "url-loader": "^4.1.1",
+ "wait-on": "^6.0.1",
+ "webpack": "^5.73.0",
+ "webpack-bundle-analyzer": "^4.5.0",
+ "webpack-dev-server": "^4.9.3",
+ "webpack-merge": "^5.8.0",
+ "webpackbar": "^5.0.2"
+ },
+ "bin": {
+ "docusaurus": "bin/docusaurus.mjs"
+ },
+ "engines": {
+ "node": ">=16.14"
+ },
+ "peerDependencies": {
+ "react": "^16.8.4 || ^17.0.0",
+ "react-dom": "^16.8.4 || ^17.0.0"
+ }
+ },
+ "node_modules/@docusaurus/core/node_modules/commander": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz",
+ "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/@docusaurus/core/node_modules/fs-extra": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
+ "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
+ "dependencies": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@docusaurus/core/node_modules/jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "dependencies": {
+ "universalify": "^2.0.0"
+ },
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "node_modules/@docusaurus/core/node_modules/universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==",
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/@docusaurus/cssnano-preset": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.3.tgz",
+ "integrity": "sha512-ZvGSRCi7z9wLnZrXNPG6DmVPHdKGd8dIn9pYbEOFiYihfv4uDR3UtxogmKf+rT8ZlKFf5Lqne8E8nt08zNM8CA==",
+ "license": "MIT",
+ "dependencies": {
+ "cssnano-preset-advanced": "^5.3.8",
+ "postcss": "^8.4.14",
+ "postcss-sort-media-queries": "^4.2.1",
+ "tslib": "^2.4.0"
+ },
+ "engines": {
+ "node": ">=16.14"
+ }
+ },
+ "node_modules/@docusaurus/logger": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.3.tgz",
+ "integrity": "sha512-Zxws7r3yLufk9xM1zq9ged0YHs65mlRmtsobnFkdZTxWXdTYlWWLWdKyNKAsVC+D7zg+pv2fGbyabdOnyZOM3w==",
+ "license": "MIT",
+ "dependencies": {
+ "chalk": "^4.1.2",
+ "tslib": "^2.4.0"
+ },
+ "engines": {
+ "node": ">=16.14"
+ }
+ },
+ "node_modules/@docusaurus/mdx-loader": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.3.tgz",
+ "integrity": "sha512-b1+fDnWtl3GiqkL0BRjYtc94FZrcDDBV1j8446+4tptB9BAOlePwG2p/pK6vGvfL53lkOsszXMghr2g67M0vCw==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/parser": "^7.18.8",
+ "@babel/traverse": "^7.18.8",
+ "@docusaurus/logger": "2.4.3",
+ "@docusaurus/utils": "2.4.3",
+ "@mdx-js/mdx": "^1.6.22",
+ "escape-html": "^1.0.3",
+ "file-loader": "^6.2.0",
+ "fs-extra": "^10.1.0",
+ "image-size": "^1.0.1",
+ "mdast-util-to-string": "^2.0.0",
+ "remark-emoji": "^2.2.0",
+ "stringify-object": "^3.3.0",
+ "tslib": "^2.4.0",
+ "unified": "^9.2.2",
+ "unist-util-visit": "^2.0.3",
+ "url-loader": "^4.1.1",
+ "webpack": "^5.73.0"
+ },
+ "engines": {
+ "node": ">=16.14"
+ },
+ "peerDependencies": {
+ "react": "^16.8.4 || ^17.0.0",
+ "react-dom": "^16.8.4 || ^17.0.0"
+ }
+ },
+ "node_modules/@docusaurus/mdx-loader/node_modules/fs-extra": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
+ "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
+ "license": "MIT",
+ "dependencies": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@docusaurus/mdx-loader/node_modules/jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "license": "MIT",
+ "dependencies": {
+ "universalify": "^2.0.0"
+ },
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "node_modules/@docusaurus/mdx-loader/node_modules/universalify": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
+ "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/@docusaurus/react-loadable": {
+ "version": "5.5.2",
+ "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz",
+ "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==",
+ "dependencies": {
+ "@types/react": "*",
+ "prop-types": "^15.6.2"
+ },
+ "peerDependencies": {
+ "react": "*"
+ }
+ },
+ "node_modules/@docusaurus/types": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.1.tgz",
+ "integrity": "sha512-0R+cbhpMkhbRXX138UOc/2XZFF8hiZa6ooZAEEJFp5scytzCw4tC1gChMFXrpa3d2tYE6AX8IrOEpSonLmfQuQ==",
+ "dependencies": {
+ "@types/history": "^4.7.11",
+ "@types/react": "*",
+ "commander": "^5.1.0",
+ "joi": "^17.6.0",
+ "react-helmet-async": "^1.3.0",
+ "utility-types": "^3.10.0",
+ "webpack": "^5.73.0",
+ "webpack-merge": "^5.8.0"
+ },
+ "peerDependencies": {
+ "react": "^16.8.4 || ^17.0.0",
+ "react-dom": "^16.8.4 || ^17.0.0"
+ }
+ },
+ "node_modules/@docusaurus/types/node_modules/commander": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz",
+ "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/@docusaurus/utils": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.3.tgz",
+ "integrity": "sha512-fKcXsjrD86Smxv8Pt0TBFqYieZZCPh4cbf9oszUq/AMhZn3ujwpKaVYZACPX8mmjtYx0JOgNx52CREBfiGQB4A==",
+ "license": "MIT",
+ "dependencies": {
+ "@docusaurus/logger": "2.4.3",
+ "@svgr/webpack": "^6.2.1",
+ "escape-string-regexp": "^4.0.0",
+ "file-loader": "^6.2.0",
+ "fs-extra": "^10.1.0",
+ "github-slugger": "^1.4.0",
+ "globby": "^11.1.0",
+ "gray-matter": "^4.0.3",
+ "js-yaml": "^4.1.0",
+ "lodash": "^4.17.21",
+ "micromatch": "^4.0.5",
+ "resolve-pathname": "^3.0.0",
+ "shelljs": "^0.8.5",
+ "tslib": "^2.4.0",
+ "url-loader": "^4.1.1",
+ "webpack": "^5.73.0"
+ },
+ "engines": {
+ "node": ">=16.14"
+ },
+ "peerDependencies": {
+ "@docusaurus/types": "*"
+ },
+ "peerDependenciesMeta": {
+ "@docusaurus/types": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@docusaurus/utils-common": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.3.tgz",
+ "integrity": "sha512-/jascp4GbLQCPVmcGkPzEQjNaAk3ADVfMtudk49Ggb+131B1WDD6HqlSmDf8MxGdy7Dja2gc+StHf01kiWoTDQ==",
+ "license": "MIT",
+ "dependencies": {
+ "tslib": "^2.4.0"
+ },
+ "engines": {
+ "node": ">=16.14"
+ },
+ "peerDependencies": {
+ "@docusaurus/types": "*"
+ },
+ "peerDependenciesMeta": {
+ "@docusaurus/types": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/@docusaurus/utils-validation": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.3.tgz",
+ "integrity": "sha512-G2+Vt3WR5E/9drAobP+hhZQMaswRwDlp6qOMi7o7ZypB+VO7N//DZWhZEwhcRGepMDJGQEwtPv7UxtYwPL9PBw==",
+ "license": "MIT",
+ "dependencies": {
+ "@docusaurus/logger": "2.4.3",
+ "@docusaurus/utils": "2.4.3",
+ "joi": "^17.6.0",
+ "js-yaml": "^4.1.0",
+ "tslib": "^2.4.0"
+ },
+ "engines": {
+ "node": ">=16.14"
+ }
+ },
+ "node_modules/@docusaurus/utils/node_modules/fs-extra": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
+ "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
+ "dependencies": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/@docusaurus/utils/node_modules/jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "dependencies": {
+ "universalify": "^2.0.0"
+ },
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "node_modules/@docusaurus/utils/node_modules/universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==",
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/@h2oai/makersaurus": {
+ "version": "0.8.4",
+ "resolved": "https://npm.pkg.github.com/download/@h2oai/makersaurus/0.8.4/c185e488ffe1d93da1cc547eeee2e489cfa253c5",
+ "integrity": "sha512-18Cl3gba9hzd7kr6jruzSasjh7VYjeap8KngNPHB/7rrVBHTgcqEcMu72aa7GghWStrwonGr+MB13mx+faOtzQ==",
+ "dependencies": {
+ "commander": "^9.4.1",
+ "handlebars": "^4.7.7",
+ "sync-directory": "^5.1.9",
+ "yup": "^0.32.11"
+ },
+ "bin": {
+ "makersaurus": "src/bin.js"
+ }
+ },
+ "node_modules/@hapi/hoek": {
+ "version": "9.3.0",
+ "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz",
+ "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ=="
+ },
+ "node_modules/@hapi/topo": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz",
+ "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==",
+ "dependencies": {
+ "@hapi/hoek": "^9.0.0"
+ }
+ },
+ "node_modules/@jest/schemas": {
+ "version": "29.4.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.4.3.tgz",
+ "integrity": "sha512-VLYKXQmtmuEz6IxJsrZwzG9NvtkQsWNnWMsKxqWNu3+CnfzJQhp0WDDKWLVV9hLKr0l3SLLFRqcYHjhtyuDVxg==",
+ "dependencies": {
+ "@sinclair/typebox": "^0.25.16"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jest/types": {
+ "version": "29.5.0",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.5.0.tgz",
+ "integrity": "sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==",
+ "dependencies": {
+ "@jest/schemas": "^29.4.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/@jridgewell/gen-mapping": {
+ "version": "0.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz",
+ "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==",
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/set-array": "^1.2.1",
+ "@jridgewell/sourcemap-codec": "^1.4.10",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/resolve-uri": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
+ "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w==",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/set-array": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz",
+ "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/@jridgewell/source-map": {
+ "version": "0.3.3",
+ "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.3.tgz",
+ "integrity": "sha512-b+fsZXeLYi9fEULmfBrhxn4IrPlINf8fiNarzTof004v3lFdntdwa9PF7vFJqm3mg7s+ScJMxXaE3Acp1irZcg==",
+ "dependencies": {
+ "@jridgewell/gen-mapping": "^0.3.0",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ }
+ },
+ "node_modules/@jridgewell/sourcemap-codec": {
+ "version": "1.4.15",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
+ "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg=="
+ },
+ "node_modules/@jridgewell/trace-mapping": {
+ "version": "0.3.25",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz",
+ "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "node_modules/@leichtgewicht/ip-codec": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz",
+ "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A=="
+ },
+ "node_modules/@mdx-js/mdx": {
+ "version": "1.6.22",
+ "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-1.6.22.tgz",
+ "integrity": "sha512-AMxuLxPz2j5/6TpF/XSdKpQP1NlG0z11dFOlq+2IP/lSgl11GY8ji6S/rgsViN/L0BDvHvUMruRb7ub+24LUYA==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "7.12.9",
+ "@babel/plugin-syntax-jsx": "7.12.1",
+ "@babel/plugin-syntax-object-rest-spread": "7.8.3",
+ "@mdx-js/util": "1.6.22",
+ "babel-plugin-apply-mdx-type-prop": "1.6.22",
+ "babel-plugin-extract-import-names": "1.6.22",
+ "camelcase-css": "2.0.1",
+ "detab": "2.0.4",
+ "hast-util-raw": "6.0.1",
+ "lodash.uniq": "4.5.0",
+ "mdast-util-to-hast": "10.0.1",
+ "remark-footnotes": "2.0.0",
+ "remark-mdx": "1.6.22",
+ "remark-parse": "8.0.3",
+ "remark-squeeze-paragraphs": "4.0.0",
+ "style-to-object": "0.3.0",
+ "unified": "9.2.0",
+ "unist-builder": "2.0.3",
+ "unist-util-visit": "2.0.3"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/@mdx-js/mdx/node_modules/@babel/core": {
+ "version": "7.12.9",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz",
+ "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.10.4",
+ "@babel/generator": "^7.12.5",
+ "@babel/helper-module-transforms": "^7.12.1",
+ "@babel/helpers": "^7.12.5",
+ "@babel/parser": "^7.12.7",
+ "@babel/template": "^7.12.7",
+ "@babel/traverse": "^7.12.9",
+ "@babel/types": "^7.12.7",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.1",
+ "json5": "^2.1.2",
+ "lodash": "^4.17.19",
+ "resolve": "^1.3.2",
+ "semver": "^5.4.1",
+ "source-map": "^0.5.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/@mdx-js/mdx/node_modules/@babel/plugin-syntax-jsx": {
+ "version": "7.12.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz",
+ "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@mdx-js/mdx/node_modules/semver": {
+ "version": "5.7.2",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz",
+ "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/@mdx-js/mdx/node_modules/source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/@mdx-js/mdx/node_modules/unified": {
+ "version": "9.2.0",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz",
+ "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==",
+ "license": "MIT",
+ "dependencies": {
+ "bail": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-buffer": "^2.0.0",
+ "is-plain-obj": "^2.0.0",
+ "trough": "^1.0.0",
+ "vfile": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/@mdx-js/util": {
+ "version": "1.6.22",
+ "resolved": "https://registry.npmjs.org/@mdx-js/util/-/util-1.6.22.tgz",
+ "integrity": "sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "dependencies": {
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@nodelib/fs.walk": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+ "dependencies": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/@polka/url": {
+ "version": "1.0.0-next.21",
+ "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.21.tgz",
+ "integrity": "sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g=="
+ },
+ "node_modules/@sideway/address": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz",
+ "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==",
+ "dependencies": {
+ "@hapi/hoek": "^9.0.0"
+ }
+ },
+ "node_modules/@sideway/formula": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz",
+ "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg=="
+ },
+ "node_modules/@sideway/pinpoint": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz",
+ "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ=="
+ },
+ "node_modules/@sinclair/typebox": {
+ "version": "0.25.24",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz",
+ "integrity": "sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ=="
+ },
+ "node_modules/@sindresorhus/is": {
+ "version": "0.14.0",
+ "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz",
+ "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/@slorber/static-site-generator-webpack-plugin": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz",
+ "integrity": "sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA==",
+ "dependencies": {
+ "eval": "^0.1.8",
+ "p-map": "^4.0.0",
+ "webpack-sources": "^3.2.2"
+ },
+ "engines": {
+ "node": ">=14"
+ }
+ },
+ "node_modules/@svgr/babel-plugin-add-jsx-attribute": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz",
+ "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@svgr/babel-plugin-remove-jsx-attribute": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz",
+ "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==",
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@svgr/babel-plugin-remove-jsx-empty-expression": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz",
+ "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==",
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@svgr/babel-plugin-replace-jsx-attribute-value": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz",
+ "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@svgr/babel-plugin-svg-dynamic-title": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz",
+ "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@svgr/babel-plugin-svg-em-dimensions": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz",
+ "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@svgr/babel-plugin-transform-react-native-svg": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz",
+ "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@svgr/babel-plugin-transform-svg-component": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz",
+ "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@svgr/babel-preset": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz",
+ "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==",
+ "dependencies": {
+ "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1",
+ "@svgr/babel-plugin-remove-jsx-attribute": "*",
+ "@svgr/babel-plugin-remove-jsx-empty-expression": "*",
+ "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1",
+ "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1",
+ "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1",
+ "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1",
+ "@svgr/babel-plugin-transform-svg-component": "^6.5.1"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/@svgr/core": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz",
+ "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==",
+ "dependencies": {
+ "@babel/core": "^7.19.6",
+ "@svgr/babel-preset": "^6.5.1",
+ "@svgr/plugin-jsx": "^6.5.1",
+ "camelcase": "^6.2.0",
+ "cosmiconfig": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ }
+ },
+ "node_modules/@svgr/hast-util-to-babel-ast": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz",
+ "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==",
+ "dependencies": {
+ "@babel/types": "^7.20.0",
+ "entities": "^4.4.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ }
+ },
+ "node_modules/@svgr/plugin-jsx": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz",
+ "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==",
+ "dependencies": {
+ "@babel/core": "^7.19.6",
+ "@svgr/babel-preset": "^6.5.1",
+ "@svgr/hast-util-to-babel-ast": "^6.5.1",
+ "svg-parser": "^2.0.4"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@svgr/core": "^6.0.0"
+ }
+ },
+ "node_modules/@svgr/plugin-svgo": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz",
+ "integrity": "sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==",
+ "dependencies": {
+ "cosmiconfig": "^7.0.1",
+ "deepmerge": "^4.2.2",
+ "svgo": "^2.8.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ },
+ "peerDependencies": {
+ "@svgr/core": "*"
+ }
+ },
+ "node_modules/@svgr/webpack": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-6.5.1.tgz",
+ "integrity": "sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==",
+ "dependencies": {
+ "@babel/core": "^7.19.6",
+ "@babel/plugin-transform-react-constant-elements": "^7.18.12",
+ "@babel/preset-env": "^7.19.4",
+ "@babel/preset-react": "^7.18.6",
+ "@babel/preset-typescript": "^7.18.6",
+ "@svgr/core": "^6.5.1",
+ "@svgr/plugin-jsx": "^6.5.1",
+ "@svgr/plugin-svgo": "^6.5.1"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/gregberge"
+ }
+ },
+ "node_modules/@szmarczak/http-timer": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz",
+ "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==",
+ "dependencies": {
+ "defer-to-connect": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/@trysound/sax": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz",
+ "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA==",
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/@types/body-parser": {
+ "version": "1.19.2",
+ "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz",
+ "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==",
+ "dependencies": {
+ "@types/connect": "*",
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/bonjour": {
+ "version": "3.5.10",
+ "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz",
+ "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==",
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/connect": {
+ "version": "3.4.35",
+ "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz",
+ "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==",
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/connect-history-api-fallback": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.0.tgz",
+ "integrity": "sha512-4x5FkPpLipqwthjPsF7ZRbOv3uoLUFkTA9G9v583qi4pACvq0uTELrB8OLUzPWUI4IJIyvM85vzkV1nyiI2Lig==",
+ "dependencies": {
+ "@types/express-serve-static-core": "*",
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/estree": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz",
+ "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/express": {
+ "version": "4.17.17",
+ "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.17.tgz",
+ "integrity": "sha512-Q4FmmuLGBG58btUnfS1c1r/NQdlp3DMfGDGig8WhfpA2YRUtEkxAjkZb0yvplJGYdF1fsQ81iMDcH24sSCNC/Q==",
+ "dependencies": {
+ "@types/body-parser": "*",
+ "@types/express-serve-static-core": "^4.17.33",
+ "@types/qs": "*",
+ "@types/serve-static": "*"
+ }
+ },
+ "node_modules/@types/express-serve-static-core": {
+ "version": "4.17.35",
+ "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.35.tgz",
+ "integrity": "sha512-wALWQwrgiB2AWTT91CB62b6Yt0sNHpznUXeZEcnPU3DRdlDIz74x8Qg1UUYKSVFi+va5vKOLYRBI1bRKiLLKIg==",
+ "dependencies": {
+ "@types/node": "*",
+ "@types/qs": "*",
+ "@types/range-parser": "*",
+ "@types/send": "*"
+ }
+ },
+ "node_modules/@types/hast": {
+ "version": "2.3.10",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz",
+ "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2"
+ }
+ },
+ "node_modules/@types/history": {
+ "version": "4.7.11",
+ "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz",
+ "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA=="
+ },
+ "node_modules/@types/html-minifier-terser": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz",
+ "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg=="
+ },
+ "node_modules/@types/http-proxy": {
+ "version": "1.17.11",
+ "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.11.tgz",
+ "integrity": "sha512-HC8G7c1WmaF2ekqpnFq626xd3Zz0uvaqFmBJNRZCGEZCXkvSdJoNFn/8Ygbd9fKNQj8UzLdCETaI0UWPAjK7IA==",
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/istanbul-lib-coverage": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz",
+ "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g=="
+ },
+ "node_modules/@types/istanbul-lib-report": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz",
+ "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==",
+ "dependencies": {
+ "@types/istanbul-lib-coverage": "*"
+ }
+ },
+ "node_modules/@types/istanbul-reports": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz",
+ "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==",
+ "dependencies": {
+ "@types/istanbul-lib-report": "*"
+ }
+ },
+ "node_modules/@types/json-schema": {
+ "version": "7.0.12",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz",
+ "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA=="
+ },
+ "node_modules/@types/lodash": {
+ "version": "4.14.194",
+ "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.194.tgz",
+ "integrity": "sha512-r22s9tAS7imvBt2lyHC9B8AGwWnXaYb1tY09oyLkXDs4vArpYJzw09nj8MLx5VfciBPGIb+ZwG0ssYnEPJxn/g=="
+ },
+ "node_modules/@types/mdast": {
+ "version": "3.0.15",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.15.tgz",
+ "integrity": "sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2"
+ }
+ },
+ "node_modules/@types/mime": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz",
+ "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw=="
+ },
+ "node_modules/@types/node": {
+ "version": "20.2.5",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.2.5.tgz",
+ "integrity": "sha512-JJulVEQXmiY9Px5axXHeYGLSjhkZEnD+MDPDGbCbIAbMslkKwmygtZFy1X6s/075Yo94sf8GuSlFfPzysQrWZQ=="
+ },
+ "node_modules/@types/parse-json": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz",
+ "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA=="
+ },
+ "node_modules/@types/parse5": {
+ "version": "5.0.3",
+ "resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-5.0.3.tgz",
+ "integrity": "sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw==",
+ "license": "MIT"
+ },
+ "node_modules/@types/prop-types": {
+ "version": "15.7.5",
+ "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz",
+ "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w=="
+ },
+ "node_modules/@types/qs": {
+ "version": "6.9.7",
+ "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz",
+ "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw=="
+ },
+ "node_modules/@types/range-parser": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz",
+ "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw=="
+ },
+ "node_modules/@types/react": {
+ "version": "18.2.9",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.9.tgz",
+ "integrity": "sha512-pL3JAesUkF7PEQGxh5XOwdXGV907te6m1/Qe1ERJLgomojS6Ne790QiA7GUl434JEkFA2aAaB6qJ5z4e1zJn/w==",
+ "dependencies": {
+ "@types/prop-types": "*",
+ "@types/scheduler": "*",
+ "csstype": "^3.0.2"
+ }
+ },
+ "node_modules/@types/retry": {
+ "version": "0.12.0",
+ "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz",
+ "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA=="
+ },
+ "node_modules/@types/scheduler": {
+ "version": "0.16.3",
+ "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz",
+ "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ=="
+ },
+ "node_modules/@types/send": {
+ "version": "0.17.1",
+ "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.1.tgz",
+ "integrity": "sha512-Cwo8LE/0rnvX7kIIa3QHCkcuF21c05Ayb0ZfxPiv0W8VRiZiNW/WuRupHKpqqGVGf7SUA44QSOUKaEd9lIrd/Q==",
+ "dependencies": {
+ "@types/mime": "^1",
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/serve-index": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz",
+ "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==",
+ "dependencies": {
+ "@types/express": "*"
+ }
+ },
+ "node_modules/@types/serve-static": {
+ "version": "1.15.1",
+ "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.1.tgz",
+ "integrity": "sha512-NUo5XNiAdULrJENtJXZZ3fHtfMolzZwczzBbnAeBbqBwG+LaG6YaJtuwzwGSQZ2wsCrxjEhNNjAkKigy3n8teQ==",
+ "dependencies": {
+ "@types/mime": "*",
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/sockjs": {
+ "version": "0.3.33",
+ "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz",
+ "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==",
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/unist": {
+ "version": "2.0.11",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz",
+ "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==",
+ "license": "MIT"
+ },
+ "node_modules/@types/ws": {
+ "version": "8.5.5",
+ "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz",
+ "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==",
+ "dependencies": {
+ "@types/node": "*"
+ }
+ },
+ "node_modules/@types/yargs": {
+ "version": "17.0.24",
+ "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz",
+ "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==",
+ "dependencies": {
+ "@types/yargs-parser": "*"
+ }
+ },
+ "node_modules/@types/yargs-parser": {
+ "version": "21.0.0",
+ "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz",
+ "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA=="
+ },
+ "node_modules/@webassemblyjs/ast": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.12.1.tgz",
+ "integrity": "sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==",
+ "license": "MIT",
+ "dependencies": {
+ "@webassemblyjs/helper-numbers": "1.11.6",
+ "@webassemblyjs/helper-wasm-bytecode": "1.11.6"
+ }
+ },
+ "node_modules/@webassemblyjs/floating-point-hex-parser": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz",
+ "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw==",
+ "license": "MIT"
+ },
+ "node_modules/@webassemblyjs/helper-api-error": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz",
+ "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q==",
+ "license": "MIT"
+ },
+ "node_modules/@webassemblyjs/helper-buffer": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz",
+ "integrity": "sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw==",
+ "license": "MIT"
+ },
+ "node_modules/@webassemblyjs/helper-numbers": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz",
+ "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==",
+ "license": "MIT",
+ "dependencies": {
+ "@webassemblyjs/floating-point-hex-parser": "1.11.6",
+ "@webassemblyjs/helper-api-error": "1.11.6",
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "node_modules/@webassemblyjs/helper-wasm-bytecode": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz",
+ "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA==",
+ "license": "MIT"
+ },
+ "node_modules/@webassemblyjs/helper-wasm-section": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz",
+ "integrity": "sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g==",
+ "license": "MIT",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@webassemblyjs/helper-buffer": "1.12.1",
+ "@webassemblyjs/helper-wasm-bytecode": "1.11.6",
+ "@webassemblyjs/wasm-gen": "1.12.1"
+ }
+ },
+ "node_modules/@webassemblyjs/ieee754": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz",
+ "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==",
+ "license": "MIT",
+ "dependencies": {
+ "@xtuc/ieee754": "^1.2.0"
+ }
+ },
+ "node_modules/@webassemblyjs/leb128": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz",
+ "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==",
+ "license": "Apache-2.0",
+ "dependencies": {
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "node_modules/@webassemblyjs/utf8": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz",
+ "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA==",
+ "license": "MIT"
+ },
+ "node_modules/@webassemblyjs/wasm-edit": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz",
+ "integrity": "sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g==",
+ "license": "MIT",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@webassemblyjs/helper-buffer": "1.12.1",
+ "@webassemblyjs/helper-wasm-bytecode": "1.11.6",
+ "@webassemblyjs/helper-wasm-section": "1.12.1",
+ "@webassemblyjs/wasm-gen": "1.12.1",
+ "@webassemblyjs/wasm-opt": "1.12.1",
+ "@webassemblyjs/wasm-parser": "1.12.1",
+ "@webassemblyjs/wast-printer": "1.12.1"
+ }
+ },
+ "node_modules/@webassemblyjs/wasm-gen": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz",
+ "integrity": "sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w==",
+ "license": "MIT",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@webassemblyjs/helper-wasm-bytecode": "1.11.6",
+ "@webassemblyjs/ieee754": "1.11.6",
+ "@webassemblyjs/leb128": "1.11.6",
+ "@webassemblyjs/utf8": "1.11.6"
+ }
+ },
+ "node_modules/@webassemblyjs/wasm-opt": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz",
+ "integrity": "sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg==",
+ "license": "MIT",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@webassemblyjs/helper-buffer": "1.12.1",
+ "@webassemblyjs/wasm-gen": "1.12.1",
+ "@webassemblyjs/wasm-parser": "1.12.1"
+ }
+ },
+ "node_modules/@webassemblyjs/wasm-parser": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz",
+ "integrity": "sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@webassemblyjs/helper-api-error": "1.11.6",
+ "@webassemblyjs/helper-wasm-bytecode": "1.11.6",
+ "@webassemblyjs/ieee754": "1.11.6",
+ "@webassemblyjs/leb128": "1.11.6",
+ "@webassemblyjs/utf8": "1.11.6"
+ }
+ },
+ "node_modules/@webassemblyjs/wast-printer": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz",
+ "integrity": "sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA==",
+ "license": "MIT",
+ "dependencies": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "node_modules/@xtuc/ieee754": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz",
+ "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==",
+ "license": "BSD-3-Clause"
+ },
+ "node_modules/@xtuc/long": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz",
+ "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==",
+ "license": "Apache-2.0"
+ },
+ "node_modules/accepts": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
+ "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
+ "dependencies": {
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/accepts/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/accepts/node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/acorn": {
+ "version": "8.8.2",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz",
+ "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==",
+ "bin": {
+ "acorn": "bin/acorn"
+ },
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/acorn-import-attributes": {
+ "version": "1.9.5",
+ "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz",
+ "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==",
+ "license": "MIT",
+ "peerDependencies": {
+ "acorn": "^8"
+ }
+ },
+ "node_modules/acorn-walk": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz",
+ "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==",
+ "engines": {
+ "node": ">=0.4.0"
+ }
+ },
+ "node_modules/address": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz",
+ "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==",
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/aggregate-error": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz",
+ "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==",
+ "dependencies": {
+ "clean-stack": "^2.0.0",
+ "indent-string": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/ajv-formats": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz",
+ "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==",
+ "dependencies": {
+ "ajv": "^8.0.0"
+ },
+ "peerDependencies": {
+ "ajv": "^8.0.0"
+ },
+ "peerDependenciesMeta": {
+ "ajv": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/ajv-formats/node_modules/ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/ajv-formats/node_modules/json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "node_modules/ajv-keywords": {
+ "version": "3.5.2",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz",
+ "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==",
+ "peerDependencies": {
+ "ajv": "^6.9.1"
+ }
+ },
+ "node_modules/ansi-align": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz",
+ "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==",
+ "dependencies": {
+ "string-width": "^4.1.0"
+ }
+ },
+ "node_modules/ansi-align/node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "node_modules/ansi-align/node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-html-community": {
+ "version": "0.0.8",
+ "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz",
+ "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw==",
+ "engines": [
+ "node >= 0.8.0"
+ ],
+ "bin": {
+ "ansi-html": "bin/ansi-html"
+ }
+ },
+ "node_modules/ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "dependencies": {
+ "color-convert": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/anymatch": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
+ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
+ "dependencies": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
+ },
+ "node_modules/array-flatten": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz",
+ "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ=="
+ },
+ "node_modules/array-union": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
+ "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/at-least-node": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz",
+ "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==",
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
+ "node_modules/autoprefixer": {
+ "version": "10.4.20",
+ "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz",
+ "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/autoprefixer"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "browserslist": "^4.23.3",
+ "caniuse-lite": "^1.0.30001646",
+ "fraction.js": "^4.3.7",
+ "normalize-range": "^0.1.2",
+ "picocolors": "^1.0.1",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "bin": {
+ "autoprefixer": "bin/autoprefixer"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/axios": {
+ "version": "0.25.0",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-0.25.0.tgz",
+ "integrity": "sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g==",
+ "dependencies": {
+ "follow-redirects": "^1.14.7"
+ }
+ },
+ "node_modules/babel-loader": {
+ "version": "8.3.0",
+ "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.3.0.tgz",
+ "integrity": "sha512-H8SvsMF+m9t15HNLMipppzkC+Y2Yq+v3SonZyU70RBL/h1gxPkH08Ot8pEE9Z4Kd+czyWJClmFS8qzIP9OZ04Q==",
+ "dependencies": {
+ "find-cache-dir": "^3.3.1",
+ "loader-utils": "^2.0.0",
+ "make-dir": "^3.1.0",
+ "schema-utils": "^2.6.5"
+ },
+ "engines": {
+ "node": ">= 8.9"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0",
+ "webpack": ">=2"
+ }
+ },
+ "node_modules/babel-plugin-apply-mdx-type-prop": {
+ "version": "1.6.22",
+ "resolved": "https://registry.npmjs.org/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz",
+ "integrity": "sha512-VefL+8o+F/DfK24lPZMtJctrCVOfgbqLAGZSkxwhazQv4VxPg3Za/i40fu22KR2m8eEda+IfSOlPLUSIiLcnCQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "7.10.4",
+ "@mdx-js/util": "1.6.22"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.11.6"
+ }
+ },
+ "node_modules/babel-plugin-apply-mdx-type-prop/node_modules/@babel/helper-plugin-utils": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz",
+ "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==",
+ "license": "MIT"
+ },
+ "node_modules/babel-plugin-dynamic-import-node": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz",
+ "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==",
+ "dependencies": {
+ "object.assign": "^4.1.0"
+ }
+ },
+ "node_modules/babel-plugin-extract-import-names": {
+ "version": "1.6.22",
+ "resolved": "https://registry.npmjs.org/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.22.tgz",
+ "integrity": "sha512-yJ9BsJaISua7d8zNT7oRG1ZLBJCIdZ4PZqmH8qa9N5AK01ifk3fnkc98AXhtzE7UkfCsEumvoQWgoYLhOnJ7jQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "7.10.4"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/babel-plugin-extract-import-names/node_modules/@babel/helper-plugin-utils": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz",
+ "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==",
+ "license": "MIT"
+ },
+ "node_modules/babel-plugin-polyfill-corejs2": {
+ "version": "0.4.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.3.tgz",
+ "integrity": "sha512-bM3gHc337Dta490gg+/AseNB9L4YLHxq1nGKZZSHbhXv4aTYU2MD2cjza1Ru4S6975YLTaL1K8uJf6ukJhhmtw==",
+ "dependencies": {
+ "@babel/compat-data": "^7.17.7",
+ "@babel/helper-define-polyfill-provider": "^0.4.0",
+ "semver": "^6.1.1"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/babel-plugin-polyfill-corejs3": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.1.tgz",
+ "integrity": "sha512-ikFrZITKg1xH6pLND8zT14UPgjKHiGLqex7rGEZCH2EvhsneJaJPemmpQaIZV5AL03II+lXylw3UmddDK8RU5Q==",
+ "dependencies": {
+ "@babel/helper-define-polyfill-provider": "^0.4.0",
+ "core-js-compat": "^3.30.1"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/babel-plugin-polyfill-regenerator": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.0.tgz",
+ "integrity": "sha512-hDJtKjMLVa7Z+LwnTCxoDLQj6wdc+B8dun7ayF2fYieI6OzfuvcLMB32ihJZ4UhCBwNYGl5bg/x/P9cMdnkc2g==",
+ "dependencies": {
+ "@babel/helper-define-polyfill-provider": "^0.4.0"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/bail": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz",
+ "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
+ },
+ "node_modules/batch": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
+ "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw=="
+ },
+ "node_modules/big.js": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz",
+ "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/binary-extensions": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
+ "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/body-parser": {
+ "version": "1.20.3",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz",
+ "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
+ "license": "MIT",
+ "dependencies": {
+ "bytes": "3.1.2",
+ "content-type": "~1.0.5",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "on-finished": "2.4.1",
+ "qs": "6.13.0",
+ "raw-body": "2.5.2",
+ "type-is": "~1.6.18",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/body-parser/node_modules/bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/body-parser/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/body-parser/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "license": "MIT"
+ },
+ "node_modules/body-parser/node_modules/qs": {
+ "version": "6.13.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
+ "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "side-channel": "^1.0.6"
+ },
+ "engines": {
+ "node": ">=0.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/bonjour-service": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.1.1.tgz",
+ "integrity": "sha512-Z/5lQRMOG9k7W+FkeGTNjh7htqn/2LMnfOvBZ8pynNZCM9MwkQkI3zeI4oz09uWdcgmgHugVvBqxGg4VQJ5PCg==",
+ "dependencies": {
+ "array-flatten": "^2.1.2",
+ "dns-equal": "^1.0.0",
+ "fast-deep-equal": "^3.1.3",
+ "multicast-dns": "^7.2.5"
+ }
+ },
+ "node_modules/boolbase": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
+ "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="
+ },
+ "node_modules/boxen": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz",
+ "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==",
+ "dependencies": {
+ "ansi-align": "^3.0.1",
+ "camelcase": "^6.2.0",
+ "chalk": "^4.1.2",
+ "cli-boxes": "^3.0.0",
+ "string-width": "^5.0.1",
+ "type-fest": "^2.5.0",
+ "widest-line": "^4.0.1",
+ "wrap-ansi": "^8.0.1"
+ },
+ "engines": {
+ "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "dependencies": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "node_modules/braces": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+ "license": "MIT",
+ "dependencies": {
+ "fill-range": "^7.1.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/browserslist": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz",
+ "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "caniuse-lite": "^1.0.30001646",
+ "electron-to-chromium": "^1.5.4",
+ "node-releases": "^2.0.18",
+ "update-browserslist-db": "^1.1.0"
+ },
+ "bin": {
+ "browserslist": "cli.js"
+ },
+ "engines": {
+ "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7"
+ }
+ },
+ "node_modules/buffer-from": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
+ "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ=="
+ },
+ "node_modules/bytes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
+ "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/cacheable-request": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz",
+ "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==",
+ "dependencies": {
+ "clone-response": "^1.0.2",
+ "get-stream": "^5.1.0",
+ "http-cache-semantics": "^4.0.0",
+ "keyv": "^3.0.0",
+ "lowercase-keys": "^2.0.0",
+ "normalize-url": "^4.1.0",
+ "responselike": "^1.0.2"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cacheable-request/node_modules/get-stream": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz",
+ "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==",
+ "dependencies": {
+ "pump": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/cacheable-request/node_modules/lowercase-keys": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz",
+ "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/cacheable-request/node_modules/normalize-url": {
+ "version": "4.5.1",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz",
+ "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/call-bind": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz",
+ "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==",
+ "license": "MIT",
+ "dependencies": {
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "set-function-length": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/call-me-maybe": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.2.tgz",
+ "integrity": "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ=="
+ },
+ "node_modules/callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/camel-case": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz",
+ "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==",
+ "dependencies": {
+ "pascal-case": "^3.1.2",
+ "tslib": "^2.0.3"
+ }
+ },
+ "node_modules/camelcase": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
+ "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/camelcase-css": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
+ "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/caniuse-api": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz",
+ "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==",
+ "dependencies": {
+ "browserslist": "^4.0.0",
+ "caniuse-lite": "^1.0.0",
+ "lodash.memoize": "^4.1.2",
+ "lodash.uniq": "^4.5.0"
+ }
+ },
+ "node_modules/caniuse-lite": {
+ "version": "1.0.30001660",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001660.tgz",
+ "integrity": "sha512-GacvNTTuATm26qC74pt+ad1fW15mlQ/zuTzzY1ZoIzECTP8HURDfF43kNxPgf7H1jmelCBQTTbBNxdSXOA7Bqg==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/caniuse-lite"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "CC-BY-4.0"
+ },
+ "node_modules/ccount": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-1.1.0.tgz",
+ "integrity": "sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "dependencies": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/chalk?sponsor=1"
+ }
+ },
+ "node_modules/character-entities": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz",
+ "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-entities-legacy": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz",
+ "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/character-reference-invalid": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz",
+ "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/chokidar": {
+ "version": "3.5.3",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz",
+ "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==",
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://paulmillr.com/funding/"
+ }
+ ],
+ "dependencies": {
+ "anymatch": "~3.1.2",
+ "braces": "~3.0.2",
+ "glob-parent": "~5.1.2",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.6.0"
+ },
+ "engines": {
+ "node": ">= 8.10.0"
+ },
+ "optionalDependencies": {
+ "fsevents": "~2.3.2"
+ }
+ },
+ "node_modules/chrome-trace-event": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz",
+ "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg==",
+ "engines": {
+ "node": ">=6.0"
+ }
+ },
+ "node_modules/ci-info": {
+ "version": "3.8.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz",
+ "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/sibiraj-s"
+ }
+ ],
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/clean-css": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.2.tgz",
+ "integrity": "sha512-JVJbM+f3d3Q704rF4bqQ5UUyTtuJ0JRKNbTKVEeujCCBoMdkEi+V+e8oktO9qGQNSvHrFTM6JZRXrUvGR1czww==",
+ "dependencies": {
+ "source-map": "~0.6.0"
+ },
+ "engines": {
+ "node": ">= 10.0"
+ }
+ },
+ "node_modules/clean-stack": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz",
+ "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/cli-boxes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz",
+ "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/cli-table3": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.3.tgz",
+ "integrity": "sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==",
+ "dependencies": {
+ "string-width": "^4.2.0"
+ },
+ "engines": {
+ "node": "10.* || >= 12.*"
+ },
+ "optionalDependencies": {
+ "@colors/colors": "1.5.0"
+ }
+ },
+ "node_modules/cli-table3/node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "node_modules/cli-table3/node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/clone-deep": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz",
+ "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==",
+ "dependencies": {
+ "is-plain-object": "^2.0.4",
+ "kind-of": "^6.0.2",
+ "shallow-clone": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/clone-response": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz",
+ "integrity": "sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA==",
+ "dependencies": {
+ "mimic-response": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/collapse-white-space": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-1.0.6.tgz",
+ "integrity": "sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "dependencies": {
+ "color-name": "~1.1.4"
+ },
+ "engines": {
+ "node": ">=7.0.0"
+ }
+ },
+ "node_modules/color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "node_modules/colord": {
+ "version": "2.9.3",
+ "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz",
+ "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw=="
+ },
+ "node_modules/colorette": {
+ "version": "2.0.20",
+ "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz",
+ "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w=="
+ },
+ "node_modules/combine-promises": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.1.0.tgz",
+ "integrity": "sha512-ZI9jvcLDxqwaXEixOhArm3r7ReIivsXkpbyEWyeOhzz1QS0iSgBPnWvEqvIQtYyamGCYA88gFhmUrs9hrrQ0pg==",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/comma-separated-tokens": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz",
+ "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/commander": {
+ "version": "9.5.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz",
+ "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==",
+ "engines": {
+ "node": "^12.20.0 || >=14"
+ }
+ },
+ "node_modules/commondir": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
+ "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg=="
+ },
+ "node_modules/compressible": {
+ "version": "2.0.18",
+ "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz",
+ "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==",
+ "dependencies": {
+ "mime-db": ">= 1.43.0 < 2"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/compressible/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/compression": {
+ "version": "1.7.4",
+ "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz",
+ "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==",
+ "dependencies": {
+ "accepts": "~1.3.5",
+ "bytes": "3.0.0",
+ "compressible": "~2.0.16",
+ "debug": "2.6.9",
+ "on-headers": "~1.0.2",
+ "safe-buffer": "5.1.2",
+ "vary": "~1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/compression/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/compression/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "node_modules/compression/node_modules/safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+ },
+ "node_modules/concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="
+ },
+ "node_modules/configstore": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz",
+ "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==",
+ "dependencies": {
+ "dot-prop": "^5.2.0",
+ "graceful-fs": "^4.1.2",
+ "make-dir": "^3.0.0",
+ "unique-string": "^2.0.0",
+ "write-file-atomic": "^3.0.0",
+ "xdg-basedir": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/connect-history-api-fallback": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz",
+ "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA==",
+ "engines": {
+ "node": ">=0.8"
+ }
+ },
+ "node_modules/consola": {
+ "version": "2.15.3",
+ "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz",
+ "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw=="
+ },
+ "node_modules/content-disposition": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
+ "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/content-type": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
+ "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/convert-source-map": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz",
+ "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A=="
+ },
+ "node_modules/cookie": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz",
+ "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/cookie-signature": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
+ "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ=="
+ },
+ "node_modules/copy-webpack-plugin": {
+ "version": "11.0.0",
+ "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz",
+ "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==",
+ "dependencies": {
+ "fast-glob": "^3.2.11",
+ "glob-parent": "^6.0.1",
+ "globby": "^13.1.1",
+ "normalize-path": "^3.0.0",
+ "schema-utils": "^4.0.0",
+ "serialize-javascript": "^6.0.0"
+ },
+ "engines": {
+ "node": ">= 14.15.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "webpack": "^5.1.0"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/ajv-keywords": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
+ "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.3"
+ },
+ "peerDependencies": {
+ "ajv": "^8.8.2"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "dependencies": {
+ "is-glob": "^4.0.3"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/globby": {
+ "version": "13.1.4",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.4.tgz",
+ "integrity": "sha512-iui/IiiW+QrJ1X1hKH5qwlMQyv34wJAYwH1vrf8b9kBA4sNiif3gKsMHa+BrdnOpEudWjpotfa7LrTzB1ERS/g==",
+ "dependencies": {
+ "dir-glob": "^3.0.1",
+ "fast-glob": "^3.2.11",
+ "ignore": "^5.2.0",
+ "merge2": "^1.4.1",
+ "slash": "^4.0.0"
+ },
+ "engines": {
+ "node": "^12.20.0 || ^14.13.1 || >=16.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "node_modules/copy-webpack-plugin/node_modules/schema-utils": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz",
+ "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.9",
+ "ajv": "^8.9.0",
+ "ajv-formats": "^2.1.1",
+ "ajv-keywords": "^5.1.0"
+ },
+ "engines": {
+ "node": ">= 12.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/copy-webpack-plugin/node_modules/slash": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz",
+ "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/core-js": {
+ "version": "3.30.2",
+ "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.30.2.tgz",
+ "integrity": "sha512-uBJiDmwqsbJCWHAwjrx3cvjbMXP7xD72Dmsn5LOJpiRmE3WbBbN5rCqQ2Qh6Ek6/eOrjlWngEynBWo4VxerQhg==",
+ "hasInstallScript": true,
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/core-js"
+ }
+ },
+ "node_modules/core-js-compat": {
+ "version": "3.30.2",
+ "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.30.2.tgz",
+ "integrity": "sha512-nriW1nuJjUgvkEjIot1Spwakz52V9YkYHZAQG6A1eCgC8AA1p0zngrQEP9R0+V6hji5XilWKG1Bd0YRppmGimA==",
+ "dependencies": {
+ "browserslist": "^4.21.5"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/core-js"
+ }
+ },
+ "node_modules/core-js-pure": {
+ "version": "3.30.2",
+ "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.30.2.tgz",
+ "integrity": "sha512-p/npFUJXXBkCCTIlEGBdghofn00jWG6ZOtdoIXSJmAu2QBvN0IqpZXWweOytcwE6cfx8ZvVUy1vw8zxhe4Y2vg==",
+ "hasInstallScript": true,
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/core-js"
+ }
+ },
+ "node_modules/core-util-is": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
+ "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ=="
+ },
+ "node_modules/cosmiconfig": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz",
+ "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==",
+ "dependencies": {
+ "@types/parse-json": "^4.0.0",
+ "import-fresh": "^3.2.1",
+ "parse-json": "^5.0.0",
+ "path-type": "^4.0.0",
+ "yaml": "^1.10.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/cross-spawn": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
+ "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "dependencies": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/crypto-random-string": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz",
+ "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/css-declaration-sorter": {
+ "version": "6.4.0",
+ "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.4.0.tgz",
+ "integrity": "sha512-jDfsatwWMWN0MODAFuHszfjphEXfNw9JUAhmY4pLu3TyTU+ohUpsbVtbU+1MZn4a47D9kqh03i4eyOm+74+zew==",
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.0.9"
+ }
+ },
+ "node_modules/css-loader": {
+ "version": "6.8.1",
+ "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.8.1.tgz",
+ "integrity": "sha512-xDAXtEVGlD0gJ07iclwWVkLoZOpEvAWaSyf6W18S2pOC//K8+qUDIx8IIT3D+HjnmkJPQeesOPv5aiUaJsCM2g==",
+ "dependencies": {
+ "icss-utils": "^5.1.0",
+ "postcss": "^8.4.21",
+ "postcss-modules-extract-imports": "^3.0.0",
+ "postcss-modules-local-by-default": "^4.0.3",
+ "postcss-modules-scope": "^3.0.0",
+ "postcss-modules-values": "^4.0.0",
+ "postcss-value-parser": "^4.2.0",
+ "semver": "^7.3.8"
+ },
+ "engines": {
+ "node": ">= 12.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "webpack": "^5.0.0"
+ }
+ },
+ "node_modules/css-minimizer-webpack-plugin": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz",
+ "integrity": "sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA==",
+ "dependencies": {
+ "cssnano": "^5.1.8",
+ "jest-worker": "^29.1.2",
+ "postcss": "^8.4.17",
+ "schema-utils": "^4.0.0",
+ "serialize-javascript": "^6.0.0",
+ "source-map": "^0.6.1"
+ },
+ "engines": {
+ "node": ">= 14.15.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "webpack": "^5.0.0"
+ },
+ "peerDependenciesMeta": {
+ "@parcel/css": {
+ "optional": true
+ },
+ "@swc/css": {
+ "optional": true
+ },
+ "clean-css": {
+ "optional": true
+ },
+ "csso": {
+ "optional": true
+ },
+ "esbuild": {
+ "optional": true
+ },
+ "lightningcss": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/css-minimizer-webpack-plugin/node_modules/ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/css-minimizer-webpack-plugin/node_modules/ajv-keywords": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
+ "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.3"
+ },
+ "peerDependencies": {
+ "ajv": "^8.8.2"
+ }
+ },
+ "node_modules/css-minimizer-webpack-plugin/node_modules/json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "node_modules/css-minimizer-webpack-plugin/node_modules/schema-utils": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz",
+ "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.9",
+ "ajv": "^8.9.0",
+ "ajv-formats": "^2.1.1",
+ "ajv-keywords": "^5.1.0"
+ },
+ "engines": {
+ "node": ">= 12.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/css-select": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz",
+ "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==",
+ "dependencies": {
+ "boolbase": "^1.0.0",
+ "css-what": "^6.0.1",
+ "domhandler": "^4.3.1",
+ "domutils": "^2.8.0",
+ "nth-check": "^2.0.1"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/fb55"
+ }
+ },
+ "node_modules/css-tree": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz",
+ "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==",
+ "dependencies": {
+ "mdn-data": "2.0.14",
+ "source-map": "^0.6.1"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/css-what": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz",
+ "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==",
+ "engines": {
+ "node": ">= 6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/fb55"
+ }
+ },
+ "node_modules/cssesc": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
+ "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==",
+ "bin": {
+ "cssesc": "bin/cssesc"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/cssnano": {
+ "version": "5.1.15",
+ "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.15.tgz",
+ "integrity": "sha512-j+BKgDcLDQA+eDifLx0EO4XSA56b7uut3BQFH+wbSaSTuGLuiyTa/wbRYthUXX8LC9mLg+WWKe8h+qJuwTAbHw==",
+ "dependencies": {
+ "cssnano-preset-default": "^5.2.14",
+ "lilconfig": "^2.0.3",
+ "yaml": "^1.10.2"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/cssnano"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/cssnano-preset-advanced": {
+ "version": "5.3.10",
+ "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz",
+ "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==",
+ "license": "MIT",
+ "dependencies": {
+ "autoprefixer": "^10.4.12",
+ "cssnano-preset-default": "^5.2.14",
+ "postcss-discard-unused": "^5.1.0",
+ "postcss-merge-idents": "^5.1.1",
+ "postcss-reduce-idents": "^5.2.0",
+ "postcss-zindex": "^5.1.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/cssnano-preset-default": {
+ "version": "5.2.14",
+ "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz",
+ "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==",
+ "dependencies": {
+ "css-declaration-sorter": "^6.3.1",
+ "cssnano-utils": "^3.1.0",
+ "postcss-calc": "^8.2.3",
+ "postcss-colormin": "^5.3.1",
+ "postcss-convert-values": "^5.1.3",
+ "postcss-discard-comments": "^5.1.2",
+ "postcss-discard-duplicates": "^5.1.0",
+ "postcss-discard-empty": "^5.1.1",
+ "postcss-discard-overridden": "^5.1.0",
+ "postcss-merge-longhand": "^5.1.7",
+ "postcss-merge-rules": "^5.1.4",
+ "postcss-minify-font-values": "^5.1.0",
+ "postcss-minify-gradients": "^5.1.1",
+ "postcss-minify-params": "^5.1.4",
+ "postcss-minify-selectors": "^5.2.1",
+ "postcss-normalize-charset": "^5.1.0",
+ "postcss-normalize-display-values": "^5.1.0",
+ "postcss-normalize-positions": "^5.1.1",
+ "postcss-normalize-repeat-style": "^5.1.1",
+ "postcss-normalize-string": "^5.1.0",
+ "postcss-normalize-timing-functions": "^5.1.0",
+ "postcss-normalize-unicode": "^5.1.1",
+ "postcss-normalize-url": "^5.1.0",
+ "postcss-normalize-whitespace": "^5.1.1",
+ "postcss-ordered-values": "^5.1.3",
+ "postcss-reduce-initial": "^5.1.2",
+ "postcss-reduce-transforms": "^5.1.0",
+ "postcss-svgo": "^5.1.0",
+ "postcss-unique-selectors": "^5.1.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/cssnano-utils": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz",
+ "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==",
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/csso": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz",
+ "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==",
+ "dependencies": {
+ "css-tree": "^1.1.2"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/csstype": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz",
+ "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ=="
+ },
+ "node_modules/debug": {
+ "version": "4.3.4",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
+ "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
+ "dependencies": {
+ "ms": "2.1.2"
+ },
+ "engines": {
+ "node": ">=6.0"
+ },
+ "peerDependenciesMeta": {
+ "supports-color": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/decompress-response": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz",
+ "integrity": "sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA==",
+ "dependencies": {
+ "mimic-response": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/deep-extend": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
+ "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
+ "engines": {
+ "node": ">=4.0.0"
+ }
+ },
+ "node_modules/deepmerge": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz",
+ "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/default-gateway": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz",
+ "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==",
+ "dependencies": {
+ "execa": "^5.0.0"
+ },
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/defer-to-connect": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz",
+ "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ=="
+ },
+ "node_modules/define-data-property": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
+ "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
+ "license": "MIT",
+ "dependencies": {
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/define-lazy-prop": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz",
+ "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/define-properties": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.0.tgz",
+ "integrity": "sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==",
+ "dependencies": {
+ "has-property-descriptors": "^1.0.0",
+ "object-keys": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/del": {
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz",
+ "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==",
+ "dependencies": {
+ "globby": "^11.0.1",
+ "graceful-fs": "^4.2.4",
+ "is-glob": "^4.0.1",
+ "is-path-cwd": "^2.2.0",
+ "is-path-inside": "^3.0.2",
+ "p-map": "^4.0.0",
+ "rimraf": "^3.0.2",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/destroy": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
+ "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8",
+ "npm": "1.2.8000 || >= 1.4.16"
+ }
+ },
+ "node_modules/detab": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/detab/-/detab-2.0.4.tgz",
+ "integrity": "sha512-8zdsQA5bIkoRECvCrNKPla84lyoR7DSAyf7p0YgXzBO9PDJx8KntPUay7NS6yp+KdxdVtiE5SpHKtbp2ZQyA9g==",
+ "license": "MIT",
+ "dependencies": {
+ "repeat-string": "^1.5.4"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/detect-node": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz",
+ "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g=="
+ },
+ "node_modules/detect-port": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz",
+ "integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==",
+ "dependencies": {
+ "address": "^1.0.1",
+ "debug": "4"
+ },
+ "bin": {
+ "detect": "bin/detect-port.js",
+ "detect-port": "bin/detect-port.js"
+ }
+ },
+ "node_modules/detect-port-alt": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz",
+ "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==",
+ "dependencies": {
+ "address": "^1.0.1",
+ "debug": "^2.6.0"
+ },
+ "bin": {
+ "detect": "bin/detect-port",
+ "detect-port": "bin/detect-port"
+ },
+ "engines": {
+ "node": ">= 4.2.1"
+ }
+ },
+ "node_modules/detect-port-alt/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/detect-port-alt/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "node_modules/dir-glob": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
+ "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
+ "dependencies": {
+ "path-type": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/dns-equal": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz",
+ "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg=="
+ },
+ "node_modules/dns-packet": {
+ "version": "5.6.0",
+ "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.0.tgz",
+ "integrity": "sha512-rza3UH1LwdHh9qyPXp8lkwpjSNk/AMD3dPytUoRoqnypDUhY0xvbdmVhWOfxO68frEfV9BU8V12Ez7ZsHGZpCQ==",
+ "dependencies": {
+ "@leichtgewicht/ip-codec": "^2.0.1"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/docusaurus-plugin-includes": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/docusaurus-plugin-includes/-/docusaurus-plugin-includes-1.1.5.tgz",
+ "integrity": "sha512-CqucZ7S820ayVP2s2Yq61lHezx0zMzt+qIPOmzmDv1k6Fru03+yU66mieSBvwN/B0w/g5M9xvjwDfLNU+eNtiQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@docusaurus/core": "^2.3.0",
+ "@docusaurus/types": "^2.3.0",
+ "@docusaurus/utils": "^2.3.0",
+ "fs-extra": "^10.0.0",
+ "path": "^0.12.7"
+ },
+ "engines": {
+ "node": ">=12.13.0"
+ },
+ "peerDependencies": {
+ "react": "^16.8.4 || ^17.0.0",
+ "react-dom": "^16.8.4 || ^17.0.0"
+ }
+ },
+ "node_modules/docusaurus-plugin-includes/node_modules/fs-extra": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
+ "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
+ "dependencies": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/docusaurus-plugin-includes/node_modules/jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "dependencies": {
+ "universalify": "^2.0.0"
+ },
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "node_modules/docusaurus-plugin-includes/node_modules/universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==",
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/dom-converter": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz",
+ "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==",
+ "dependencies": {
+ "utila": "~0.4"
+ }
+ },
+ "node_modules/dom-serializer": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz",
+ "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==",
+ "dependencies": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^4.2.0",
+ "entities": "^2.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1"
+ }
+ },
+ "node_modules/dom-serializer/node_modules/entities": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz",
+ "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==",
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/domelementtype": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz",
+ "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/fb55"
+ }
+ ]
+ },
+ "node_modules/domhandler": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz",
+ "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==",
+ "dependencies": {
+ "domelementtype": "^2.2.0"
+ },
+ "engines": {
+ "node": ">= 4"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/domhandler?sponsor=1"
+ }
+ },
+ "node_modules/domutils": {
+ "version": "2.8.0",
+ "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz",
+ "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==",
+ "dependencies": {
+ "dom-serializer": "^1.0.1",
+ "domelementtype": "^2.2.0",
+ "domhandler": "^4.2.0"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/domutils?sponsor=1"
+ }
+ },
+ "node_modules/dot-case": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz",
+ "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==",
+ "dependencies": {
+ "no-case": "^3.0.4",
+ "tslib": "^2.0.3"
+ }
+ },
+ "node_modules/dot-prop": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz",
+ "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==",
+ "dependencies": {
+ "is-obj": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/dot-prop/node_modules/is-obj": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz",
+ "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/duplexer": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz",
+ "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg=="
+ },
+ "node_modules/duplexer3": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.5.tgz",
+ "integrity": "sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA=="
+ },
+ "node_modules/eastasianwidth": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
+ "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
+ },
+ "node_modules/ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="
+ },
+ "node_modules/electron-to-chromium": {
+ "version": "1.5.19",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.19.tgz",
+ "integrity": "sha512-kpLJJi3zxTR1U828P+LIUDZ5ohixyo68/IcYOHLqnbTPr/wdgn4i1ECvmALN9E16JPA6cvCG5UG79gVwVdEK5w==",
+ "license": "ISC"
+ },
+ "node_modules/emoji-regex": {
+ "version": "9.2.2",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
+ "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
+ },
+ "node_modules/emojis-list": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz",
+ "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==",
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/emoticon": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-3.2.0.tgz",
+ "integrity": "sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/end-of-stream": {
+ "version": "1.4.4",
+ "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
+ "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
+ "dependencies": {
+ "once": "^1.4.0"
+ }
+ },
+ "node_modules/enhanced-resolve": {
+ "version": "5.17.1",
+ "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz",
+ "integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==",
+ "license": "MIT",
+ "dependencies": {
+ "graceful-fs": "^4.2.4",
+ "tapable": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/entities": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz",
+ "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==",
+ "engines": {
+ "node": ">=0.12"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/error-ex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
+ "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "dependencies": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "node_modules/es-define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz",
+ "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==",
+ "license": "MIT",
+ "dependencies": {
+ "get-intrinsic": "^1.2.4"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/es-module-lexer": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.2.1.tgz",
+ "integrity": "sha512-9978wrXM50Y4rTMmW5kXIC09ZdXQZqkE4mxhwkd8VbzsGkXGPgV4zWuqQJgCEzYngdo2dYDa0l8xhX4fkSwJSg=="
+ },
+ "node_modules/es6-promise": {
+ "version": "4.2.8",
+ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz",
+ "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w=="
+ },
+ "node_modules/escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/escape-goat": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz",
+ "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="
+ },
+ "node_modules/escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/eslint-scope": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
+ "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
+ "dependencies": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^4.1.1"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==",
+ "bin": {
+ "esparse": "bin/esparse.js",
+ "esvalidate": "bin/esvalidate.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/esrecurse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+ "dependencies": {
+ "estraverse": "^5.2.0"
+ },
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/esrecurse/node_modules/estraverse": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+ "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==",
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==",
+ "engines": {
+ "node": ">=4.0"
+ }
+ },
+ "node_modules/esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/eta": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz",
+ "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g==",
+ "engines": {
+ "node": ">=6.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/eta-dev/eta?sponsor=1"
+ }
+ },
+ "node_modules/etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/eval": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz",
+ "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==",
+ "dependencies": {
+ "@types/node": "*",
+ "require-like": ">= 0.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/eventemitter3": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
+ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
+ },
+ "node_modules/events": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz",
+ "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==",
+ "engines": {
+ "node": ">=0.8.x"
+ }
+ },
+ "node_modules/execa": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
+ "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
+ "dependencies": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^6.0.0",
+ "human-signals": "^2.1.0",
+ "is-stream": "^2.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^4.0.1",
+ "onetime": "^5.1.2",
+ "signal-exit": "^3.0.3",
+ "strip-final-newline": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sindresorhus/execa?sponsor=1"
+ }
+ },
+ "node_modules/execa/node_modules/get-stream": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
+ "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/express": {
+ "version": "4.20.0",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.20.0.tgz",
+ "integrity": "sha512-pLdae7I6QqShF5PnNTCVn4hI91Dx0Grkn2+IAsMTgMIKuQVte2dN9PeGSSAME2FR8anOhVA62QDIUaWVfEXVLw==",
+ "license": "MIT",
+ "dependencies": {
+ "accepts": "~1.3.8",
+ "array-flatten": "1.1.1",
+ "body-parser": "1.20.3",
+ "content-disposition": "0.5.4",
+ "content-type": "~1.0.4",
+ "cookie": "0.6.0",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "1.2.0",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "merge-descriptors": "1.0.3",
+ "methods": "~1.1.2",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "0.1.10",
+ "proxy-addr": "~2.0.7",
+ "qs": "6.11.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.2.1",
+ "send": "0.19.0",
+ "serve-static": "1.16.0",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.10.0"
+ }
+ },
+ "node_modules/express/node_modules/array-flatten": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+ "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg=="
+ },
+ "node_modules/express/node_modules/content-disposition": {
+ "version": "0.5.4",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
+ "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
+ "dependencies": {
+ "safe-buffer": "5.2.1"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/express/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/express/node_modules/encodeurl": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
+ "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/express/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "node_modules/express/node_modules/path-to-regexp": {
+ "version": "0.1.10",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz",
+ "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w==",
+ "license": "MIT"
+ },
+ "node_modules/express/node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
+ "license": "MIT"
+ },
+ "node_modules/extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==",
+ "dependencies": {
+ "is-extendable": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
+ },
+ "node_modules/fast-glob": {
+ "version": "3.2.12",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz",
+ "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==",
+ "dependencies": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.2",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.4"
+ },
+ "engines": {
+ "node": ">=8.6.0"
+ }
+ },
+ "node_modules/fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="
+ },
+ "node_modules/fast-url-parser": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz",
+ "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==",
+ "dependencies": {
+ "punycode": "^1.3.2"
+ }
+ },
+ "node_modules/fastq": {
+ "version": "1.15.0",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz",
+ "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==",
+ "dependencies": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "node_modules/faye-websocket": {
+ "version": "0.11.4",
+ "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz",
+ "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==",
+ "dependencies": {
+ "websocket-driver": ">=0.5.1"
+ },
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/file-loader": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz",
+ "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==",
+ "dependencies": {
+ "loader-utils": "^2.0.0",
+ "schema-utils": "^3.0.0"
+ },
+ "engines": {
+ "node": ">= 10.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "webpack": "^4.0.0 || ^5.0.0"
+ }
+ },
+ "node_modules/file-loader/node_modules/schema-utils": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz",
+ "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.8",
+ "ajv": "^6.12.5",
+ "ajv-keywords": "^3.5.2"
+ },
+ "engines": {
+ "node": ">= 10.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/filesize": {
+ "version": "8.0.7",
+ "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz",
+ "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ==",
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/fill-range": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+ "license": "MIT",
+ "dependencies": {
+ "to-regex-range": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/finalhandler": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz",
+ "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==",
+ "dependencies": {
+ "debug": "2.6.9",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "statuses": "2.0.1",
+ "unpipe": "~1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/finalhandler/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/finalhandler/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "node_modules/find-cache-dir": {
+ "version": "3.3.2",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz",
+ "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==",
+ "dependencies": {
+ "commondir": "^1.0.1",
+ "make-dir": "^3.0.2",
+ "pkg-dir": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/avajs/find-cache-dir?sponsor=1"
+ }
+ },
+ "node_modules/find-up": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
+ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
+ "dependencies": {
+ "locate-path": "^5.0.0",
+ "path-exists": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/follow-redirects": {
+ "version": "1.15.9",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz",
+ "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==",
+ "funding": [
+ {
+ "type": "individual",
+ "url": "https://github.com/sponsors/RubenVerborgh"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=4.0"
+ },
+ "peerDependenciesMeta": {
+ "debug": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/fork-ts-checker-webpack-plugin": {
+ "version": "6.5.3",
+ "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz",
+ "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==",
+ "dependencies": {
+ "@babel/code-frame": "^7.8.3",
+ "@types/json-schema": "^7.0.5",
+ "chalk": "^4.1.0",
+ "chokidar": "^3.4.2",
+ "cosmiconfig": "^6.0.0",
+ "deepmerge": "^4.2.2",
+ "fs-extra": "^9.0.0",
+ "glob": "^7.1.6",
+ "memfs": "^3.1.2",
+ "minimatch": "^3.0.4",
+ "schema-utils": "2.7.0",
+ "semver": "^7.3.2",
+ "tapable": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=10",
+ "yarn": ">=1.0.0"
+ },
+ "peerDependencies": {
+ "eslint": ">= 6",
+ "typescript": ">= 2.7",
+ "vue-template-compiler": "*",
+ "webpack": ">= 4"
+ },
+ "peerDependenciesMeta": {
+ "eslint": {
+ "optional": true
+ },
+ "vue-template-compiler": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/fork-ts-checker-webpack-plugin/node_modules/cosmiconfig": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz",
+ "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==",
+ "dependencies": {
+ "@types/parse-json": "^4.0.0",
+ "import-fresh": "^3.1.0",
+ "parse-json": "^5.0.0",
+ "path-type": "^4.0.0",
+ "yaml": "^1.7.2"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/fork-ts-checker-webpack-plugin/node_modules/fs-extra": {
+ "version": "9.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz",
+ "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==",
+ "dependencies": {
+ "at-least-node": "^1.0.0",
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/fork-ts-checker-webpack-plugin/node_modules/jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "dependencies": {
+ "universalify": "^2.0.0"
+ },
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "node_modules/fork-ts-checker-webpack-plugin/node_modules/schema-utils": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz",
+ "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.4",
+ "ajv": "^6.12.2",
+ "ajv-keywords": "^3.4.1"
+ },
+ "engines": {
+ "node": ">= 8.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/fork-ts-checker-webpack-plugin/node_modules/tapable": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz",
+ "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/fork-ts-checker-webpack-plugin/node_modules/universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==",
+ "engines": {
+ "node": ">= 10.0.0"
+ }
+ },
+ "node_modules/forwarded": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
+ "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fraction.js": {
+ "version": "4.3.7",
+ "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz",
+ "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==",
+ "license": "MIT",
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "type": "patreon",
+ "url": "https://github.com/sponsors/rawify"
+ }
+ },
+ "node_modules/fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/fs-extra": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz",
+ "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==",
+ "dependencies": {
+ "graceful-fs": "^4.1.2",
+ "jsonfile": "^4.0.0",
+ "universalify": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=6 <7 || >=8"
+ }
+ },
+ "node_modules/fs-monkey": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.4.tgz",
+ "integrity": "sha512-INM/fWAxMICjttnD0DX1rBvinKskj5G1w+oy/pnm9u/tSlnBrzFonJMcalKJ30P8RRsPzKcCG7Q8l0jx5Fh9YQ=="
+ },
+ "node_modules/fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="
+ },
+ "node_modules/fsevents": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
+ "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
+ "hasInstallScript": true,
+ "optional": true,
+ "os": [
+ "darwin"
+ ],
+ "engines": {
+ "node": "^8.16.0 || ^10.6.0 || >=11.0.0"
+ }
+ },
+ "node_modules/function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==",
+ "engines": {
+ "node": ">=6.9.0"
+ }
+ },
+ "node_modules/get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "license": "MIT",
+ "dependencies": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/get-own-enumerable-property-symbols": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz",
+ "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g==",
+ "license": "ISC"
+ },
+ "node_modules/get-stream": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz",
+ "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==",
+ "dependencies": {
+ "pump": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/github-slugger": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz",
+ "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw=="
+ },
+ "node_modules/glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "dependencies": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ },
+ "engines": {
+ "node": "*"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "dependencies": {
+ "is-glob": "^4.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/glob-to-regexp": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz",
+ "integrity": "sha512-Iozmtbqv0noj0uDDqoL0zNq0VBEfK2YFoMAZoxJe4cwphvLR+JskfF30QhXHOR4m3KrE6NLRYw+U9MRXvifyig=="
+ },
+ "node_modules/global-dirs": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz",
+ "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==",
+ "dependencies": {
+ "ini": "2.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/global-dirs/node_modules/ini": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz",
+ "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA==",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/global-modules": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz",
+ "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==",
+ "dependencies": {
+ "global-prefix": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/global-prefix": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz",
+ "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==",
+ "dependencies": {
+ "ini": "^1.3.5",
+ "kind-of": "^6.0.2",
+ "which": "^1.3.1"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/global-prefix/node_modules/which": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
+ "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "which": "bin/which"
+ }
+ },
+ "node_modules/globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/globby": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
+ "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
+ "dependencies": {
+ "array-union": "^2.1.0",
+ "dir-glob": "^3.0.1",
+ "fast-glob": "^3.2.9",
+ "ignore": "^5.2.0",
+ "merge2": "^1.4.1",
+ "slash": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/gopd": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
+ "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
+ "license": "MIT",
+ "dependencies": {
+ "get-intrinsic": "^1.1.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/got": {
+ "version": "9.6.0",
+ "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz",
+ "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==",
+ "dependencies": {
+ "@sindresorhus/is": "^0.14.0",
+ "@szmarczak/http-timer": "^1.1.2",
+ "cacheable-request": "^6.0.0",
+ "decompress-response": "^3.3.0",
+ "duplexer3": "^0.1.4",
+ "get-stream": "^4.1.0",
+ "lowercase-keys": "^1.0.1",
+ "mimic-response": "^1.0.1",
+ "p-cancelable": "^1.0.0",
+ "to-readable-stream": "^1.0.0",
+ "url-parse-lax": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/graceful-fs": {
+ "version": "4.2.11",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
+ "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
+ },
+ "node_modules/gray-matter": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz",
+ "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==",
+ "dependencies": {
+ "js-yaml": "^3.13.1",
+ "kind-of": "^6.0.2",
+ "section-matter": "^1.0.0",
+ "strip-bom-string": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=6.0"
+ }
+ },
+ "node_modules/gray-matter/node_modules/argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "dependencies": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "node_modules/gray-matter/node_modules/js-yaml": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
+ "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "dependencies": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/gzip-size": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz",
+ "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==",
+ "dependencies": {
+ "duplexer": "^0.1.2"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/handle-thing": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz",
+ "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg=="
+ },
+ "node_modules/handlebars": {
+ "version": "4.7.7",
+ "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.7.tgz",
+ "integrity": "sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==",
+ "dependencies": {
+ "minimist": "^1.2.5",
+ "neo-async": "^2.6.0",
+ "source-map": "^0.6.1",
+ "wordwrap": "^1.0.0"
+ },
+ "bin": {
+ "handlebars": "bin/handlebars"
+ },
+ "engines": {
+ "node": ">=0.4.7"
+ },
+ "optionalDependencies": {
+ "uglify-js": "^3.1.4"
+ }
+ },
+ "node_modules/has": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
+ "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
+ "dependencies": {
+ "function-bind": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/has-property-descriptors": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
+ "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
+ "license": "MIT",
+ "dependencies": {
+ "es-define-property": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz",
+ "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-symbols": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
+ "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/has-yarn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz",
+ "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "function-bind": "^1.1.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/hast-to-hyperscript": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz",
+ "integrity": "sha512-zQgLKqF+O2F72S1aa4y2ivxzSlko3MAvxkwG8ehGmNiqd98BIN3JM1rAJPmplEyLmGLO2QZYJtIneOSZ2YbJuA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.3",
+ "comma-separated-tokens": "^1.0.0",
+ "property-information": "^5.3.0",
+ "space-separated-tokens": "^1.0.0",
+ "style-to-object": "^0.3.0",
+ "unist-util-is": "^4.0.0",
+ "web-namespaces": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-from-parse5": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-6.0.1.tgz",
+ "integrity": "sha512-jeJUWiN5pSxW12Rh01smtVkZgZr33wBokLzKLwinYOUfSzm1Nl/c3GUGebDyOKjdsRgMvoVbV0VpAcpjF4NrJA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/parse5": "^5.0.0",
+ "hastscript": "^6.0.0",
+ "property-information": "^5.0.0",
+ "vfile": "^4.0.0",
+ "vfile-location": "^3.2.0",
+ "web-namespaces": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-parse-selector": {
+ "version": "2.2.5",
+ "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz",
+ "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-raw": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-6.0.1.tgz",
+ "integrity": "sha512-ZMuiYA+UF7BXBtsTBNcLBF5HzXzkyE6MLzJnL605LKE8GJylNjGc4jjxazAHUtcwT5/CEt6afRKViYB4X66dig==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^2.0.0",
+ "hast-util-from-parse5": "^6.0.0",
+ "hast-util-to-parse5": "^6.0.0",
+ "html-void-elements": "^1.0.0",
+ "parse5": "^6.0.0",
+ "unist-util-position": "^3.0.0",
+ "vfile": "^4.0.0",
+ "web-namespaces": "^1.0.0",
+ "xtend": "^4.0.0",
+ "zwitch": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hast-util-to-parse5": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz",
+ "integrity": "sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ==",
+ "license": "MIT",
+ "dependencies": {
+ "hast-to-hyperscript": "^9.0.0",
+ "property-information": "^5.0.0",
+ "web-namespaces": "^1.0.0",
+ "xtend": "^4.0.0",
+ "zwitch": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/hastscript": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz",
+ "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/hast": "^2.0.0",
+ "comma-separated-tokens": "^1.0.0",
+ "hast-util-parse-selector": "^2.0.0",
+ "property-information": "^5.0.0",
+ "space-separated-tokens": "^1.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/he": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz",
+ "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==",
+ "bin": {
+ "he": "bin/he"
+ }
+ },
+ "node_modules/history": {
+ "version": "4.10.1",
+ "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz",
+ "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==",
+ "dependencies": {
+ "@babel/runtime": "^7.1.2",
+ "loose-envify": "^1.2.0",
+ "resolve-pathname": "^3.0.0",
+ "tiny-invariant": "^1.0.2",
+ "tiny-warning": "^1.0.0",
+ "value-equal": "^1.0.1"
+ }
+ },
+ "node_modules/hoist-non-react-statics": {
+ "version": "3.3.2",
+ "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz",
+ "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==",
+ "dependencies": {
+ "react-is": "^16.7.0"
+ }
+ },
+ "node_modules/hpack.js": {
+ "version": "2.1.6",
+ "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz",
+ "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==",
+ "dependencies": {
+ "inherits": "^2.0.1",
+ "obuf": "^1.0.0",
+ "readable-stream": "^2.0.1",
+ "wbuf": "^1.1.0"
+ }
+ },
+ "node_modules/hpack.js/node_modules/isarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+ "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ=="
+ },
+ "node_modules/hpack.js/node_modules/readable-stream": {
+ "version": "2.3.8",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz",
+ "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==",
+ "dependencies": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "node_modules/hpack.js/node_modules/safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+ },
+ "node_modules/hpack.js/node_modules/string_decoder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+ "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+ "dependencies": {
+ "safe-buffer": "~5.1.0"
+ }
+ },
+ "node_modules/html-entities": {
+ "version": "2.3.5",
+ "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.5.tgz",
+ "integrity": "sha512-72TJlcMkYsEJASa/3HnX7VT59htM7iSHbH59NSZbtc+22Ap0Txnlx91sfeB+/A7wNZg7UxtZdhAW4y+/jimrdg=="
+ },
+ "node_modules/html-minifier-terser": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz",
+ "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==",
+ "dependencies": {
+ "camel-case": "^4.1.2",
+ "clean-css": "^5.2.2",
+ "commander": "^8.3.0",
+ "he": "^1.2.0",
+ "param-case": "^3.0.4",
+ "relateurl": "^0.2.7",
+ "terser": "^5.10.0"
+ },
+ "bin": {
+ "html-minifier-terser": "cli.js"
+ },
+ "engines": {
+ "node": ">=12"
+ }
+ },
+ "node_modules/html-minifier-terser/node_modules/commander": {
+ "version": "8.3.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz",
+ "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==",
+ "engines": {
+ "node": ">= 12"
+ }
+ },
+ "node_modules/html-tags": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz",
+ "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ==",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/html-void-elements": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-1.0.5.tgz",
+ "integrity": "sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/html-webpack-plugin": {
+ "version": "5.5.1",
+ "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.5.1.tgz",
+ "integrity": "sha512-cTUzZ1+NqjGEKjmVgZKLMdiFg3m9MdRXkZW2OEe69WYVi5ONLMmlnSZdXzGGMOq0C8jGDrL6EWyEDDUioHO/pA==",
+ "dependencies": {
+ "@types/html-minifier-terser": "^6.0.0",
+ "html-minifier-terser": "^6.0.2",
+ "lodash": "^4.17.21",
+ "pretty-error": "^4.0.0",
+ "tapable": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/html-webpack-plugin"
+ },
+ "peerDependencies": {
+ "webpack": "^5.20.0"
+ }
+ },
+ "node_modules/htmlparser2": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz",
+ "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==",
+ "funding": [
+ "https://github.com/fb55/htmlparser2?sponsor=1",
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/fb55"
+ }
+ ],
+ "dependencies": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^4.0.0",
+ "domutils": "^2.5.2",
+ "entities": "^2.0.0"
+ }
+ },
+ "node_modules/htmlparser2/node_modules/entities": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz",
+ "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==",
+ "funding": {
+ "url": "https://github.com/fb55/entities?sponsor=1"
+ }
+ },
+ "node_modules/http-cache-semantics": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz",
+ "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ=="
+ },
+ "node_modules/http-deceiver": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz",
+ "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw=="
+ },
+ "node_modules/http-errors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
+ "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "license": "MIT",
+ "dependencies": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/http-parser-js": {
+ "version": "0.5.8",
+ "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz",
+ "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q=="
+ },
+ "node_modules/http-proxy": {
+ "version": "1.18.1",
+ "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz",
+ "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==",
+ "dependencies": {
+ "eventemitter3": "^4.0.0",
+ "follow-redirects": "^1.0.0",
+ "requires-port": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=8.0.0"
+ }
+ },
+ "node_modules/http-proxy-middleware": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz",
+ "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==",
+ "dependencies": {
+ "@types/http-proxy": "^1.17.8",
+ "http-proxy": "^1.18.1",
+ "is-glob": "^4.0.1",
+ "is-plain-obj": "^3.0.0",
+ "micromatch": "^4.0.2"
+ },
+ "engines": {
+ "node": ">=12.0.0"
+ },
+ "peerDependencies": {
+ "@types/express": "^4.17.13"
+ },
+ "peerDependenciesMeta": {
+ "@types/express": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/http-proxy-middleware/node_modules/is-plain-obj": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz",
+ "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/human-signals": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
+ "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==",
+ "engines": {
+ "node": ">=10.17.0"
+ }
+ },
+ "node_modules/iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "license": "MIT",
+ "dependencies": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/icss-utils": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz",
+ "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==",
+ "engines": {
+ "node": "^10 || ^12 || >= 14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/ignore": {
+ "version": "5.2.4",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz",
+ "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==",
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/image-size": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz",
+ "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==",
+ "license": "MIT",
+ "dependencies": {
+ "queue": "6.0.2"
+ },
+ "bin": {
+ "image-size": "bin/image-size.js"
+ },
+ "engines": {
+ "node": ">=16.x"
+ }
+ },
+ "node_modules/immer": {
+ "version": "9.0.21",
+ "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz",
+ "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA==",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/immer"
+ }
+ },
+ "node_modules/import-fresh": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
+ "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
+ "dependencies": {
+ "parent-module": "^1.0.0",
+ "resolve-from": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/import-lazy": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz",
+ "integrity": "sha512-m7ZEHgtw69qOGw+jwxXkHlrlIPdTGkyh66zXZ1ajZbxkDBNjSY/LGbmjc7h0s2ELsUDTAhFr55TrPSSqJGPG0A==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==",
+ "engines": {
+ "node": ">=0.8.19"
+ }
+ },
+ "node_modules/indent-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
+ "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
+ "dependencies": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "node_modules/inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
+ },
+ "node_modules/ini": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
+ "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="
+ },
+ "node_modules/inline-style-parser": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz",
+ "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q==",
+ "license": "MIT"
+ },
+ "node_modules/interpret": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz",
+ "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==",
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/invariant": {
+ "version": "2.2.4",
+ "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz",
+ "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==",
+ "dependencies": {
+ "loose-envify": "^1.0.0"
+ }
+ },
+ "node_modules/ipaddr.js": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz",
+ "integrity": "sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ==",
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/is-absolute": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz",
+ "integrity": "sha512-dOWoqflvcydARa360Gvv18DZ/gRuHKi2NU/wU5X1ZFzdYfH29nkiNZsF3mp4OJ3H4yo9Mx8A/uAGNzpzPN3yBA==",
+ "dependencies": {
+ "is-relative": "^1.0.0",
+ "is-windows": "^1.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-alphabetical": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz",
+ "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-alphanumerical": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz",
+ "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==",
+ "license": "MIT",
+ "dependencies": {
+ "is-alphabetical": "^1.0.0",
+ "is-decimal": "^1.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg=="
+ },
+ "node_modules/is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "dependencies": {
+ "binary-extensions": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-buffer": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz",
+ "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "license": "MIT",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/is-ci": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz",
+ "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==",
+ "dependencies": {
+ "ci-info": "^2.0.0"
+ },
+ "bin": {
+ "is-ci": "bin.js"
+ }
+ },
+ "node_modules/is-ci/node_modules/ci-info": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz",
+ "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ=="
+ },
+ "node_modules/is-core-module": {
+ "version": "2.12.1",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz",
+ "integrity": "sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==",
+ "dependencies": {
+ "has": "^1.0.3"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/is-decimal": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz",
+ "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-docker": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
+ "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==",
+ "bin": {
+ "is-docker": "cli.js"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "dependencies": {
+ "is-extglob": "^2.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-hexadecimal": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz",
+ "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-installed-globally": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz",
+ "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==",
+ "dependencies": {
+ "global-dirs": "^3.0.0",
+ "is-path-inside": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-npm": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-5.0.0.tgz",
+ "integrity": "sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.12.0"
+ }
+ },
+ "node_modules/is-obj": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz",
+ "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-path-cwd": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz",
+ "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/is-path-inside": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
+ "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-plain-obj": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz",
+ "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-plain-object": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+ "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+ "dependencies": {
+ "isobject": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-regexp": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz",
+ "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-relative": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz",
+ "integrity": "sha512-Kw/ReK0iqwKeu0MITLFuj0jbPAmEiOsIwyIXvvbfa6QfmN9pkD1M+8pdk7Rl/dTKbH34/XBFMbgD4iMJhLQbGA==",
+ "dependencies": {
+ "is-unc-path": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-root": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz",
+ "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/is-stream": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
+ "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==",
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/is-typedarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
+ "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA=="
+ },
+ "node_modules/is-unc-path": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-1.0.0.tgz",
+ "integrity": "sha512-mrGpVd0fs7WWLfVsStvgF6iEJnbjDFZh9/emhRDcGWTduTfNHd9CHeUwH3gYIjdbwo4On6hunkztwOaAw0yllQ==",
+ "dependencies": {
+ "unc-path-regex": "^0.1.2"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-whitespace-character": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz",
+ "integrity": "sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-windows": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+ "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/is-word-character": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-word-character/-/is-word-character-1.0.4.tgz",
+ "integrity": "sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/is-wsl": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
+ "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
+ "dependencies": {
+ "is-docker": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/is-yarn-global": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz",
+ "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw=="
+ },
+ "node_modules/isarray": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+ "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ=="
+ },
+ "node_modules/isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
+ },
+ "node_modules/isobject": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+ "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/jest-util": {
+ "version": "29.5.0",
+ "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.5.0.tgz",
+ "integrity": "sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==",
+ "dependencies": {
+ "@jest/types": "^29.5.0",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "graceful-fs": "^4.2.9",
+ "picomatch": "^2.2.3"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-worker": {
+ "version": "29.5.0",
+ "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.5.0.tgz",
+ "integrity": "sha512-NcrQnevGoSp4b5kg+akIpthoAFHxPBcb5P6mYPY0fUNT+sSvmtu6jlkEle3anczUKIKEbMxFimk9oTP/tpIPgA==",
+ "dependencies": {
+ "@types/node": "*",
+ "jest-util": "^29.5.0",
+ "merge-stream": "^2.0.0",
+ "supports-color": "^8.0.0"
+ },
+ "engines": {
+ "node": "^14.15.0 || ^16.10.0 || >=18.0.0"
+ }
+ },
+ "node_modules/jest-worker/node_modules/supports-color": {
+ "version": "8.1.1",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
+ "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/supports-color?sponsor=1"
+ }
+ },
+ "node_modules/jiti": {
+ "version": "1.18.2",
+ "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.18.2.tgz",
+ "integrity": "sha512-QAdOptna2NYiSSpv0O/BwoHBSmz4YhpzJHyi+fnMRTXFjp7B8i/YG5Z8IfusxB1ufjcD2Sre1F3R+nX3fvy7gg==",
+ "bin": {
+ "jiti": "bin/jiti.js"
+ }
+ },
+ "node_modules/joi": {
+ "version": "17.9.2",
+ "resolved": "https://registry.npmjs.org/joi/-/joi-17.9.2.tgz",
+ "integrity": "sha512-Itk/r+V4Dx0V3c7RLFdRh12IOjySm2/WGPMubBT92cQvRfYZhPM2W0hZlctjj72iES8jsRCwp7S/cRmWBnJ4nw==",
+ "dependencies": {
+ "@hapi/hoek": "^9.0.0",
+ "@hapi/topo": "^5.0.0",
+ "@sideway/address": "^4.1.3",
+ "@sideway/formula": "^3.0.1",
+ "@sideway/pinpoint": "^2.0.0"
+ }
+ },
+ "node_modules/js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
+ },
+ "node_modules/js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "dependencies": {
+ "argparse": "^2.0.1"
+ },
+ "bin": {
+ "js-yaml": "bin/js-yaml.js"
+ }
+ },
+ "node_modules/jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/json-buffer": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz",
+ "integrity": "sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ=="
+ },
+ "node_modules/json-parse-even-better-errors": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
+ "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w=="
+ },
+ "node_modules/json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="
+ },
+ "node_modules/json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==",
+ "bin": {
+ "json5": "lib/cli.js"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/jsonfile": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
+ "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==",
+ "optionalDependencies": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "node_modules/keyv": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz",
+ "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==",
+ "dependencies": {
+ "json-buffer": "3.0.0"
+ }
+ },
+ "node_modules/kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/kleur": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz",
+ "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/klona": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.6.tgz",
+ "integrity": "sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/latest-version": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz",
+ "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==",
+ "dependencies": {
+ "package-json": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/launch-editor": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.6.0.tgz",
+ "integrity": "sha512-JpDCcQnyAAzZZaZ7vEiSqL690w7dAEyLao+KC96zBplnYbJS7TYNjvM3M7y3dGz+v7aIsJk3hllWuc0kWAjyRQ==",
+ "dependencies": {
+ "picocolors": "^1.0.0",
+ "shell-quote": "^1.7.3"
+ }
+ },
+ "node_modules/leven": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
+ "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/lilconfig": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz",
+ "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/lines-and-columns": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="
+ },
+ "node_modules/loader-runner": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz",
+ "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==",
+ "engines": {
+ "node": ">=6.11.5"
+ }
+ },
+ "node_modules/loader-utils": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz",
+ "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==",
+ "dependencies": {
+ "big.js": "^5.2.2",
+ "emojis-list": "^3.0.0",
+ "json5": "^2.1.2"
+ },
+ "engines": {
+ "node": ">=8.9.0"
+ }
+ },
+ "node_modules/locate-path": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
+ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
+ "dependencies": {
+ "p-locate": "^4.1.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
+ },
+ "node_modules/lodash-es": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
+ "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw=="
+ },
+ "node_modules/lodash.debounce": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz",
+ "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow=="
+ },
+ "node_modules/lodash.memoize": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
+ "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag=="
+ },
+ "node_modules/lodash.uniq": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
+ "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ=="
+ },
+ "node_modules/loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "dependencies": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ },
+ "bin": {
+ "loose-envify": "cli.js"
+ }
+ },
+ "node_modules/lower-case": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz",
+ "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==",
+ "dependencies": {
+ "tslib": "^2.0.3"
+ }
+ },
+ "node_modules/lowercase-keys": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz",
+ "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "dependencies": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "node_modules/make-dir": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz",
+ "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==",
+ "dependencies": {
+ "semver": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/make-dir/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/markdown-escapes": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz",
+ "integrity": "sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/mdast-squeeze-paragraphs": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz",
+ "integrity": "sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ==",
+ "license": "MIT",
+ "dependencies": {
+ "unist-util-remove": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-definitions": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-4.0.0.tgz",
+ "integrity": "sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "unist-util-visit": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-hast": {
+ "version": "10.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz",
+ "integrity": "sha512-BW3LM9SEMnjf4HXXVApZMt8gLQWVNXc3jryK0nJu/rOXPOnlkUjmdkDlmxMirpbU9ILncGFIwLH/ubnWBbcdgA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/mdast": "^3.0.0",
+ "@types/unist": "^2.0.0",
+ "mdast-util-definitions": "^4.0.0",
+ "mdurl": "^1.0.0",
+ "unist-builder": "^2.0.0",
+ "unist-util-generated": "^1.0.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdast-util-to-string": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz",
+ "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/mdn-data": {
+ "version": "2.0.14",
+ "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz",
+ "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow=="
+ },
+ "node_modules/mdurl": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz",
+ "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==",
+ "license": "MIT"
+ },
+ "node_modules/media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/memfs": {
+ "version": "3.5.3",
+ "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz",
+ "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==",
+ "dependencies": {
+ "fs-monkey": "^1.0.4"
+ },
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
+ "node_modules/merge-descriptors": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
+ "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==",
+ "license": "MIT",
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/merge-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
+ "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="
+ },
+ "node_modules/merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/micromatch": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
+ "license": "MIT",
+ "dependencies": {
+ "braces": "^3.0.3",
+ "picomatch": "^2.3.1"
+ },
+ "engines": {
+ "node": ">=8.6"
+ }
+ },
+ "node_modules/mime": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
+ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==",
+ "license": "MIT",
+ "bin": {
+ "mime": "cli.js"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/mime-db": {
+ "version": "1.33.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz",
+ "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mime-types": {
+ "version": "2.1.18",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz",
+ "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==",
+ "dependencies": {
+ "mime-db": "~1.33.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/mimic-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
+ "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/mimic-response": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz",
+ "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/mini-css-extract-plugin": {
+ "version": "2.7.6",
+ "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.7.6.tgz",
+ "integrity": "sha512-Qk7HcgaPkGG6eD77mLvZS1nmxlao3j+9PkrT9Uc7HAE1id3F41+DdBRYRYkbyfNRGzm8/YWtzhw7nVPmwhqTQw==",
+ "dependencies": {
+ "schema-utils": "^4.0.0"
+ },
+ "engines": {
+ "node": ">= 12.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "webpack": "^5.0.0"
+ }
+ },
+ "node_modules/mini-css-extract-plugin/node_modules/ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/mini-css-extract-plugin/node_modules/ajv-keywords": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
+ "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.3"
+ },
+ "peerDependencies": {
+ "ajv": "^8.8.2"
+ }
+ },
+ "node_modules/mini-css-extract-plugin/node_modules/json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "node_modules/mini-css-extract-plugin/node_modules/schema-utils": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz",
+ "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.9",
+ "ajv": "^8.9.0",
+ "ajv-formats": "^2.1.1",
+ "ajv-keywords": "^5.1.0"
+ },
+ "engines": {
+ "node": ">= 12.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/minimalistic-assert": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz",
+ "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A=="
+ },
+ "node_modules/minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "dependencies": {
+ "brace-expansion": "^1.1.7"
+ },
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/minimist": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
+ "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/mrmime": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-1.0.1.tgz",
+ "integrity": "sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==",
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "node_modules/multicast-dns": {
+ "version": "7.2.5",
+ "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz",
+ "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==",
+ "dependencies": {
+ "dns-packet": "^5.2.2",
+ "thunky": "^1.0.2"
+ },
+ "bin": {
+ "multicast-dns": "cli.js"
+ }
+ },
+ "node_modules/nanoclone": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/nanoclone/-/nanoclone-0.2.1.tgz",
+ "integrity": "sha512-wynEP02LmIbLpcYw8uBKpcfF6dmg2vcpKqxeH5UcoKEYdExslsdUA4ugFauuaeYdTB76ez6gJW8XAZ6CgkXYxA=="
+ },
+ "node_modules/nanoid": {
+ "version": "3.3.7",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz",
+ "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "bin": {
+ "nanoid": "bin/nanoid.cjs"
+ },
+ "engines": {
+ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
+ }
+ },
+ "node_modules/negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/neo-async": {
+ "version": "2.6.2",
+ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
+ "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw=="
+ },
+ "node_modules/no-case": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz",
+ "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==",
+ "dependencies": {
+ "lower-case": "^2.0.2",
+ "tslib": "^2.0.3"
+ }
+ },
+ "node_modules/node-emoji": {
+ "version": "1.11.0",
+ "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz",
+ "integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==",
+ "license": "MIT",
+ "dependencies": {
+ "lodash": "^4.17.21"
+ }
+ },
+ "node_modules/node-forge": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz",
+ "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==",
+ "engines": {
+ "node": ">= 6.13.0"
+ }
+ },
+ "node_modules/node-releases": {
+ "version": "2.0.18",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz",
+ "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==",
+ "license": "MIT"
+ },
+ "node_modules/normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/normalize-range": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
+ "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/normalize-url": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz",
+ "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/npm-run-path": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
+ "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
+ "dependencies": {
+ "path-key": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/nth-check": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz",
+ "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==",
+ "dependencies": {
+ "boolbase": "^1.0.0"
+ },
+ "funding": {
+ "url": "https://github.com/fb55/nth-check?sponsor=1"
+ }
+ },
+ "node_modules/object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/object-inspect": {
+ "version": "1.13.2",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz",
+ "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/object.assign": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz",
+ "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==",
+ "dependencies": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "has-symbols": "^1.0.3",
+ "object-keys": "^1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/obuf": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz",
+ "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg=="
+ },
+ "node_modules/on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "dependencies": {
+ "ee-first": "1.1.1"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/on-headers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz",
+ "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "dependencies": {
+ "wrappy": "1"
+ }
+ },
+ "node_modules/onetime": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
+ "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
+ "dependencies": {
+ "mimic-fn": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/open": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz",
+ "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==",
+ "dependencies": {
+ "define-lazy-prop": "^2.0.0",
+ "is-docker": "^2.1.1",
+ "is-wsl": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/opener": {
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz",
+ "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A==",
+ "bin": {
+ "opener": "bin/opener-bin.js"
+ }
+ },
+ "node_modules/p-cancelable": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz",
+ "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/p-limit": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
+ "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
+ "dependencies": {
+ "p-try": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-locate": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
+ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
+ "dependencies": {
+ "p-limit": "^2.2.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/p-map": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz",
+ "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==",
+ "dependencies": {
+ "aggregate-error": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/p-retry": {
+ "version": "4.6.2",
+ "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz",
+ "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==",
+ "dependencies": {
+ "@types/retry": "0.12.0",
+ "retry": "^0.13.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/p-try": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
+ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/package-json": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz",
+ "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==",
+ "dependencies": {
+ "got": "^9.6.0",
+ "registry-auth-token": "^4.0.0",
+ "registry-url": "^5.0.0",
+ "semver": "^6.2.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/package-json/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/param-case": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz",
+ "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==",
+ "dependencies": {
+ "dot-case": "^3.0.4",
+ "tslib": "^2.0.3"
+ }
+ },
+ "node_modules/parent-module": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+ "dependencies": {
+ "callsites": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/parse-entities": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz",
+ "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==",
+ "license": "MIT",
+ "dependencies": {
+ "character-entities": "^1.0.0",
+ "character-entities-legacy": "^1.0.0",
+ "character-reference-invalid": "^1.0.0",
+ "is-alphanumerical": "^1.0.0",
+ "is-decimal": "^1.0.0",
+ "is-hexadecimal": "^1.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/parse-json": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
+ "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
+ "dependencies": {
+ "@babel/code-frame": "^7.0.0",
+ "error-ex": "^1.3.1",
+ "json-parse-even-better-errors": "^2.3.0",
+ "lines-and-columns": "^1.1.6"
+ },
+ "engines": {
+ "node": ">=8"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/parse5": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz",
+ "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw==",
+ "license": "MIT"
+ },
+ "node_modules/parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/pascal-case": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz",
+ "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==",
+ "dependencies": {
+ "no-case": "^3.0.4",
+ "tslib": "^2.0.3"
+ }
+ },
+ "node_modules/path": {
+ "version": "0.12.7",
+ "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz",
+ "integrity": "sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q==",
+ "dependencies": {
+ "process": "^0.11.1",
+ "util": "^0.10.3"
+ }
+ },
+ "node_modules/path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/path-is-inside": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz",
+ "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w=="
+ },
+ "node_modules/path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
+ },
+ "node_modules/path-to-regexp": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz",
+ "integrity": "sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==",
+ "license": "MIT",
+ "dependencies": {
+ "isarray": "0.0.1"
+ }
+ },
+ "node_modules/path-type": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
+ "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/picocolors": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz",
+ "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw==",
+ "license": "ISC"
+ },
+ "node_modules/picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
+ "engines": {
+ "node": ">=8.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/jonschlinkert"
+ }
+ },
+ "node_modules/pkg-dir": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
+ "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
+ "dependencies": {
+ "find-up": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pkg-up": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz",
+ "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==",
+ "dependencies": {
+ "find-up": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/pkg-up/node_modules/find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "dependencies": {
+ "locate-path": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/pkg-up/node_modules/locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "dependencies": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/pkg-up/node_modules/p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "dependencies": {
+ "p-limit": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/pkg-up/node_modules/path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/postcss": {
+ "version": "8.4.45",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.45.tgz",
+ "integrity": "sha512-7KTLTdzdZZYscUc65XmjFiB73vBhBfbPztCYdUNvlaso9PrzjzcmjqBPR0lNGkcVlcO4BjiO5rK/qNz+XAen1Q==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/postcss/"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/postcss"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "nanoid": "^3.3.7",
+ "picocolors": "^1.0.1",
+ "source-map-js": "^1.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14"
+ }
+ },
+ "node_modules/postcss-calc": {
+ "version": "8.2.4",
+ "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz",
+ "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==",
+ "dependencies": {
+ "postcss-selector-parser": "^6.0.9",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.2"
+ }
+ },
+ "node_modules/postcss-colormin": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz",
+ "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==",
+ "dependencies": {
+ "browserslist": "^4.21.4",
+ "caniuse-api": "^3.0.0",
+ "colord": "^2.9.1",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-convert-values": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz",
+ "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==",
+ "dependencies": {
+ "browserslist": "^4.21.4",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-discard-comments": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz",
+ "integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==",
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-discard-duplicates": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz",
+ "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==",
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-discard-empty": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz",
+ "integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==",
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-discard-overridden": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz",
+ "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==",
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-discard-unused": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz",
+ "integrity": "sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==",
+ "license": "MIT",
+ "dependencies": {
+ "postcss-selector-parser": "^6.0.5"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-loader": {
+ "version": "7.3.2",
+ "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.2.tgz",
+ "integrity": "sha512-c7qDlXErX6n0VT+LUsW+nwefVtTu3ORtVvK8EXuUIDcxo+b/euYqpuHlJAvePb0Af5e8uMjR/13e0lTuYifaig==",
+ "dependencies": {
+ "cosmiconfig": "^8.1.3",
+ "jiti": "^1.18.2",
+ "klona": "^2.0.6",
+ "semver": "^7.3.8"
+ },
+ "engines": {
+ "node": ">= 14.15.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "postcss": "^7.0.0 || ^8.0.1",
+ "webpack": "^5.0.0"
+ }
+ },
+ "node_modules/postcss-loader/node_modules/cosmiconfig": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.2.0.tgz",
+ "integrity": "sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==",
+ "dependencies": {
+ "import-fresh": "^3.2.1",
+ "js-yaml": "^4.1.0",
+ "parse-json": "^5.0.0",
+ "path-type": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=14"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/d-fischer"
+ }
+ },
+ "node_modules/postcss-merge-idents": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz",
+ "integrity": "sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==",
+ "license": "MIT",
+ "dependencies": {
+ "cssnano-utils": "^3.1.0",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-merge-longhand": {
+ "version": "5.1.7",
+ "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz",
+ "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0",
+ "stylehacks": "^5.1.1"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-merge-rules": {
+ "version": "5.1.4",
+ "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz",
+ "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==",
+ "dependencies": {
+ "browserslist": "^4.21.4",
+ "caniuse-api": "^3.0.0",
+ "cssnano-utils": "^3.1.0",
+ "postcss-selector-parser": "^6.0.5"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-minify-font-values": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz",
+ "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-minify-gradients": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz",
+ "integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==",
+ "dependencies": {
+ "colord": "^2.9.1",
+ "cssnano-utils": "^3.1.0",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-minify-params": {
+ "version": "5.1.4",
+ "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz",
+ "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==",
+ "dependencies": {
+ "browserslist": "^4.21.4",
+ "cssnano-utils": "^3.1.0",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-minify-selectors": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz",
+ "integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==",
+ "dependencies": {
+ "postcss-selector-parser": "^6.0.5"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-modules-extract-imports": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz",
+ "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==",
+ "engines": {
+ "node": "^10 || ^12 || >= 14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/postcss-modules-local-by-default": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.3.tgz",
+ "integrity": "sha512-2/u2zraspoACtrbFRnTijMiQtb4GW4BvatjaG/bCjYQo8kLTdevCUlwuBHx2sCnSyrI3x3qj4ZK1j5LQBgzmwA==",
+ "dependencies": {
+ "icss-utils": "^5.0.0",
+ "postcss-selector-parser": "^6.0.2",
+ "postcss-value-parser": "^4.1.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >= 14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/postcss-modules-scope": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz",
+ "integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==",
+ "dependencies": {
+ "postcss-selector-parser": "^6.0.4"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >= 14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/postcss-modules-values": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz",
+ "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==",
+ "dependencies": {
+ "icss-utils": "^5.0.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >= 14"
+ },
+ "peerDependencies": {
+ "postcss": "^8.1.0"
+ }
+ },
+ "node_modules/postcss-normalize-charset": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz",
+ "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==",
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-normalize-display-values": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz",
+ "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-normalize-positions": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz",
+ "integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-normalize-repeat-style": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz",
+ "integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-normalize-string": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz",
+ "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-normalize-timing-functions": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz",
+ "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-normalize-unicode": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz",
+ "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==",
+ "dependencies": {
+ "browserslist": "^4.21.4",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-normalize-url": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz",
+ "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==",
+ "dependencies": {
+ "normalize-url": "^6.0.1",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-normalize-whitespace": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz",
+ "integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-ordered-values": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz",
+ "integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==",
+ "dependencies": {
+ "cssnano-utils": "^3.1.0",
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-reduce-idents": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz",
+ "integrity": "sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==",
+ "license": "MIT",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-reduce-initial": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz",
+ "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==",
+ "dependencies": {
+ "browserslist": "^4.21.4",
+ "caniuse-api": "^3.0.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-reduce-transforms": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz",
+ "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-selector-parser": {
+ "version": "6.0.13",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz",
+ "integrity": "sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==",
+ "dependencies": {
+ "cssesc": "^3.0.0",
+ "util-deprecate": "^1.0.2"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/postcss-sort-media-queries": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz",
+ "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==",
+ "license": "MIT",
+ "dependencies": {
+ "sort-css-media-queries": "2.1.0"
+ },
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.4.16"
+ }
+ },
+ "node_modules/postcss-svgo": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz",
+ "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==",
+ "dependencies": {
+ "postcss-value-parser": "^4.2.0",
+ "svgo": "^2.7.0"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-unique-selectors": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz",
+ "integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==",
+ "dependencies": {
+ "postcss-selector-parser": "^6.0.5"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/postcss-value-parser": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
+ "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="
+ },
+ "node_modules/postcss-zindex": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz",
+ "integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==",
+ "license": "MIT",
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/prepend-http": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz",
+ "integrity": "sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/pretty-error": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz",
+ "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==",
+ "dependencies": {
+ "lodash": "^4.17.20",
+ "renderkid": "^3.0.0"
+ }
+ },
+ "node_modules/pretty-time": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz",
+ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/process": {
+ "version": "0.11.10",
+ "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz",
+ "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==",
+ "engines": {
+ "node": ">= 0.6.0"
+ }
+ },
+ "node_modules/process-nextick-args": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
+ "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
+ },
+ "node_modules/prompts": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz",
+ "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==",
+ "dependencies": {
+ "kleur": "^3.0.3",
+ "sisteransi": "^1.0.5"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/prop-types": {
+ "version": "15.8.1",
+ "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
+ "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
+ "dependencies": {
+ "loose-envify": "^1.4.0",
+ "object-assign": "^4.1.1",
+ "react-is": "^16.13.1"
+ }
+ },
+ "node_modules/property-expr": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.5.tgz",
+ "integrity": "sha512-IJUkICM5dP5znhCckHSv30Q4b5/JA5enCtkRHYaOVOAocnH/1BQEYTC5NMfT3AVl/iXKdr3aqQbQn9DxyWknwA=="
+ },
+ "node_modules/property-information": {
+ "version": "5.6.0",
+ "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz",
+ "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==",
+ "license": "MIT",
+ "dependencies": {
+ "xtend": "^4.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/proxy-addr": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
+ "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
+ "dependencies": {
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/proxy-addr/node_modules/ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==",
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/pump": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
+ "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
+ "dependencies": {
+ "end-of-stream": "^1.1.0",
+ "once": "^1.3.1"
+ }
+ },
+ "node_modules/punycode": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
+ "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ=="
+ },
+ "node_modules/pupa": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz",
+ "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==",
+ "dependencies": {
+ "escape-goat": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/qs": {
+ "version": "6.11.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
+ "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==",
+ "license": "BSD-3-Clause",
+ "dependencies": {
+ "side-channel": "^1.0.4"
+ },
+ "engines": {
+ "node": ">=0.6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/queue": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz",
+ "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==",
+ "license": "MIT",
+ "dependencies": {
+ "inherits": "~2.0.3"
+ }
+ },
+ "node_modules/queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/randombytes": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
+ "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==",
+ "dependencies": {
+ "safe-buffer": "^5.1.0"
+ }
+ },
+ "node_modules/range-parser": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz",
+ "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/raw-body": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
+ "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
+ "license": "MIT",
+ "dependencies": {
+ "bytes": "3.1.2",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ },
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/raw-body/node_modules/bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/rc": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
+ "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
+ "dependencies": {
+ "deep-extend": "^0.6.0",
+ "ini": "~1.3.0",
+ "minimist": "^1.2.0",
+ "strip-json-comments": "~2.0.1"
+ },
+ "bin": {
+ "rc": "cli.js"
+ }
+ },
+ "node_modules/react": {
+ "version": "17.0.2",
+ "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz",
+ "integrity": "sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==",
+ "peer": true,
+ "dependencies": {
+ "loose-envify": "^1.1.0",
+ "object-assign": "^4.1.1"
+ },
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/react-dev-utils": {
+ "version": "12.0.1",
+ "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz",
+ "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==",
+ "dependencies": {
+ "@babel/code-frame": "^7.16.0",
+ "address": "^1.1.2",
+ "browserslist": "^4.18.1",
+ "chalk": "^4.1.2",
+ "cross-spawn": "^7.0.3",
+ "detect-port-alt": "^1.1.6",
+ "escape-string-regexp": "^4.0.0",
+ "filesize": "^8.0.6",
+ "find-up": "^5.0.0",
+ "fork-ts-checker-webpack-plugin": "^6.5.0",
+ "global-modules": "^2.0.0",
+ "globby": "^11.0.4",
+ "gzip-size": "^6.0.0",
+ "immer": "^9.0.7",
+ "is-root": "^2.1.0",
+ "loader-utils": "^3.2.0",
+ "open": "^8.4.0",
+ "pkg-up": "^3.1.0",
+ "prompts": "^2.4.2",
+ "react-error-overlay": "^6.0.11",
+ "recursive-readdir": "^2.2.2",
+ "shell-quote": "^1.7.3",
+ "strip-ansi": "^6.0.1",
+ "text-table": "^0.2.0"
+ },
+ "engines": {
+ "node": ">=14"
+ }
+ },
+ "node_modules/react-dev-utils/node_modules/find-up": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
+ "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
+ "dependencies": {
+ "locate-path": "^6.0.0",
+ "path-exists": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/react-dev-utils/node_modules/loader-utils": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz",
+ "integrity": "sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw==",
+ "engines": {
+ "node": ">= 12.13.0"
+ }
+ },
+ "node_modules/react-dev-utils/node_modules/locate-path": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
+ "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
+ "dependencies": {
+ "p-locate": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/react-dev-utils/node_modules/p-limit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+ "dependencies": {
+ "yocto-queue": "^0.1.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/react-dev-utils/node_modules/p-locate": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
+ "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
+ "dependencies": {
+ "p-limit": "^3.0.2"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/react-dom": {
+ "version": "17.0.2",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz",
+ "integrity": "sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==",
+ "peer": true,
+ "dependencies": {
+ "loose-envify": "^1.1.0",
+ "object-assign": "^4.1.1",
+ "scheduler": "^0.20.2"
+ },
+ "peerDependencies": {
+ "react": "17.0.2"
+ }
+ },
+ "node_modules/react-error-overlay": {
+ "version": "6.0.11",
+ "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz",
+ "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg=="
+ },
+ "node_modules/react-fast-compare": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz",
+ "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ=="
+ },
+ "node_modules/react-helmet-async": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz",
+ "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==",
+ "dependencies": {
+ "@babel/runtime": "^7.12.5",
+ "invariant": "^2.2.4",
+ "prop-types": "^15.7.2",
+ "react-fast-compare": "^3.2.0",
+ "shallowequal": "^1.1.0"
+ },
+ "peerDependencies": {
+ "react": "^16.6.0 || ^17.0.0 || ^18.0.0",
+ "react-dom": "^16.6.0 || ^17.0.0 || ^18.0.0"
+ }
+ },
+ "node_modules/react-is": {
+ "version": "16.13.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
+ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
+ },
+ "node_modules/react-loadable": {
+ "name": "@docusaurus/react-loadable",
+ "version": "5.5.2",
+ "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz",
+ "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==",
+ "dependencies": {
+ "@types/react": "*",
+ "prop-types": "^15.6.2"
+ },
+ "peerDependencies": {
+ "react": "*"
+ }
+ },
+ "node_modules/react-loadable-ssr-addon-v5-slorber": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz",
+ "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==",
+ "dependencies": {
+ "@babel/runtime": "^7.10.3"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ },
+ "peerDependencies": {
+ "react-loadable": "*",
+ "webpack": ">=4.41.1 || 5.x"
+ }
+ },
+ "node_modules/react-router": {
+ "version": "5.3.4",
+ "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz",
+ "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==",
+ "dependencies": {
+ "@babel/runtime": "^7.12.13",
+ "history": "^4.9.0",
+ "hoist-non-react-statics": "^3.1.0",
+ "loose-envify": "^1.3.1",
+ "path-to-regexp": "^1.7.0",
+ "prop-types": "^15.6.2",
+ "react-is": "^16.6.0",
+ "tiny-invariant": "^1.0.2",
+ "tiny-warning": "^1.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=15"
+ }
+ },
+ "node_modules/react-router-config": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz",
+ "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==",
+ "dependencies": {
+ "@babel/runtime": "^7.1.2"
+ },
+ "peerDependencies": {
+ "react": ">=15",
+ "react-router": ">=5"
+ }
+ },
+ "node_modules/react-router-dom": {
+ "version": "5.3.4",
+ "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz",
+ "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==",
+ "dependencies": {
+ "@babel/runtime": "^7.12.13",
+ "history": "^4.9.0",
+ "loose-envify": "^1.3.1",
+ "prop-types": "^15.6.2",
+ "react-router": "5.3.4",
+ "tiny-invariant": "^1.0.2",
+ "tiny-warning": "^1.0.0"
+ },
+ "peerDependencies": {
+ "react": ">=15"
+ }
+ },
+ "node_modules/readable-stream": {
+ "version": "3.6.2",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
+ "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
+ "dependencies": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ },
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/readdir-enhanced": {
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/readdir-enhanced/-/readdir-enhanced-1.5.2.tgz",
+ "integrity": "sha512-oncAoS9LLjy/+DeZfSAdZBI/iFJGcPCOp44RPFI6FIMHuxt5CC5P0cUZ9mET+EZB9ONhcEvAids/lVRkj0sTHw==",
+ "dependencies": {
+ "call-me-maybe": "^1.0.1",
+ "es6-promise": "^4.1.0",
+ "glob-to-regexp": "^0.3.0"
+ }
+ },
+ "node_modules/readdirp": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+ "dependencies": {
+ "picomatch": "^2.2.1"
+ },
+ "engines": {
+ "node": ">=8.10.0"
+ }
+ },
+ "node_modules/rechoir": {
+ "version": "0.6.2",
+ "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz",
+ "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==",
+ "dependencies": {
+ "resolve": "^1.1.6"
+ },
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/recursive-readdir": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz",
+ "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==",
+ "dependencies": {
+ "minimatch": "^3.0.5"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/regenerate": {
+ "version": "1.4.2",
+ "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz",
+ "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A=="
+ },
+ "node_modules/regenerate-unicode-properties": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz",
+ "integrity": "sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==",
+ "dependencies": {
+ "regenerate": "^1.4.2"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/regenerator-runtime": {
+ "version": "0.13.11",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz",
+ "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg=="
+ },
+ "node_modules/regenerator-transform": {
+ "version": "0.15.1",
+ "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.1.tgz",
+ "integrity": "sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==",
+ "dependencies": {
+ "@babel/runtime": "^7.8.4"
+ }
+ },
+ "node_modules/regexpu-core": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz",
+ "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==",
+ "dependencies": {
+ "@babel/regjsgen": "^0.8.0",
+ "regenerate": "^1.4.2",
+ "regenerate-unicode-properties": "^10.1.0",
+ "regjsparser": "^0.9.1",
+ "unicode-match-property-ecmascript": "^2.0.0",
+ "unicode-match-property-value-ecmascript": "^2.1.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/registry-auth-token": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.2.tgz",
+ "integrity": "sha512-PC5ZysNb42zpFME6D/XlIgtNGdTl8bBOCw90xQLVMpzuuubJKYDWFAEuUNc+Cn8Z8724tg2SDhDRrkVEsqfDMg==",
+ "dependencies": {
+ "rc": "1.2.8"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/registry-url": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz",
+ "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==",
+ "dependencies": {
+ "rc": "^1.2.8"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/regjsparser": {
+ "version": "0.9.1",
+ "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz",
+ "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==",
+ "dependencies": {
+ "jsesc": "~0.5.0"
+ },
+ "bin": {
+ "regjsparser": "bin/parser"
+ }
+ },
+ "node_modules/regjsparser/node_modules/jsesc": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz",
+ "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA==",
+ "bin": {
+ "jsesc": "bin/jsesc"
+ }
+ },
+ "node_modules/relateurl": {
+ "version": "0.2.7",
+ "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz",
+ "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog==",
+ "engines": {
+ "node": ">= 0.10"
+ }
+ },
+ "node_modules/remark-emoji": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-2.2.0.tgz",
+ "integrity": "sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w==",
+ "license": "MIT",
+ "dependencies": {
+ "emoticon": "^3.2.0",
+ "node-emoji": "^1.10.0",
+ "unist-util-visit": "^2.0.3"
+ }
+ },
+ "node_modules/remark-footnotes": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/remark-footnotes/-/remark-footnotes-2.0.0.tgz",
+ "integrity": "sha512-3Clt8ZMH75Ayjp9q4CorNeyjwIxHFcTkaektplKGl2A1jNGEUey8cKL0ZC5vJwfcD5GFGsNLImLG/NGzWIzoMQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-mdx": {
+ "version": "1.6.22",
+ "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-1.6.22.tgz",
+ "integrity": "sha512-phMHBJgeV76uyFkH4rvzCftLfKCr2RZuF+/gmVcaKrpsihyzmhXjA0BEMDaPTXG5y8qZOKPVo83NAOX01LPnOQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/core": "7.12.9",
+ "@babel/helper-plugin-utils": "7.10.4",
+ "@babel/plugin-proposal-object-rest-spread": "7.12.1",
+ "@babel/plugin-syntax-jsx": "7.12.1",
+ "@mdx-js/util": "1.6.22",
+ "is-alphabetical": "1.0.4",
+ "remark-parse": "8.0.3",
+ "unified": "9.2.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-mdx/node_modules/@babel/core": {
+ "version": "7.12.9",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz",
+ "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/code-frame": "^7.10.4",
+ "@babel/generator": "^7.12.5",
+ "@babel/helper-module-transforms": "^7.12.1",
+ "@babel/helpers": "^7.12.5",
+ "@babel/parser": "^7.12.7",
+ "@babel/template": "^7.12.7",
+ "@babel/traverse": "^7.12.9",
+ "@babel/types": "^7.12.7",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.1",
+ "json5": "^2.1.2",
+ "lodash": "^4.17.19",
+ "resolve": "^1.3.2",
+ "semver": "^5.4.1",
+ "source-map": "^0.5.0"
+ },
+ "engines": {
+ "node": ">=6.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/babel"
+ }
+ },
+ "node_modules/remark-mdx/node_modules/@babel/helper-plugin-utils": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz",
+ "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg==",
+ "license": "MIT"
+ },
+ "node_modules/remark-mdx/node_modules/@babel/plugin-syntax-jsx": {
+ "version": "7.12.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz",
+ "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==",
+ "license": "MIT",
+ "dependencies": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ },
+ "peerDependencies": {
+ "@babel/core": "^7.0.0-0"
+ }
+ },
+ "node_modules/remark-mdx/node_modules/semver": {
+ "version": "5.7.2",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz",
+ "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver"
+ }
+ },
+ "node_modules/remark-mdx/node_modules/source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/remark-mdx/node_modules/unified": {
+ "version": "9.2.0",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz",
+ "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==",
+ "license": "MIT",
+ "dependencies": {
+ "bail": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-buffer": "^2.0.0",
+ "is-plain-obj": "^2.0.0",
+ "trough": "^1.0.0",
+ "vfile": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-parse": {
+ "version": "8.0.3",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-8.0.3.tgz",
+ "integrity": "sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q==",
+ "license": "MIT",
+ "dependencies": {
+ "ccount": "^1.0.0",
+ "collapse-white-space": "^1.0.2",
+ "is-alphabetical": "^1.0.0",
+ "is-decimal": "^1.0.0",
+ "is-whitespace-character": "^1.0.0",
+ "is-word-character": "^1.0.0",
+ "markdown-escapes": "^1.0.0",
+ "parse-entities": "^2.0.0",
+ "repeat-string": "^1.5.4",
+ "state-toggle": "^1.0.0",
+ "trim": "0.0.1",
+ "trim-trailing-lines": "^1.0.0",
+ "unherit": "^1.0.4",
+ "unist-util-remove-position": "^2.0.0",
+ "vfile-location": "^3.0.0",
+ "xtend": "^4.0.1"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/remark-squeeze-paragraphs": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz",
+ "integrity": "sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw==",
+ "license": "MIT",
+ "dependencies": {
+ "mdast-squeeze-paragraphs": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/renderkid": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz",
+ "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==",
+ "dependencies": {
+ "css-select": "^4.1.3",
+ "dom-converter": "^0.2.0",
+ "htmlparser2": "^6.1.0",
+ "lodash": "^4.17.21",
+ "strip-ansi": "^6.0.1"
+ }
+ },
+ "node_modules/repeat-string": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+ "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.10"
+ }
+ },
+ "node_modules/require-from-string": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
+ "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/require-like": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz",
+ "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A==",
+ "engines": {
+ "node": "*"
+ }
+ },
+ "node_modules/requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ=="
+ },
+ "node_modules/resolve": {
+ "version": "1.22.2",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz",
+ "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==",
+ "dependencies": {
+ "is-core-module": "^2.11.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ },
+ "bin": {
+ "resolve": "bin/resolve"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/resolve-from": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/resolve-pathname": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz",
+ "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng=="
+ },
+ "node_modules/responselike": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz",
+ "integrity": "sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ==",
+ "dependencies": {
+ "lowercase-keys": "^1.0.0"
+ }
+ },
+ "node_modules/retry": {
+ "version": "0.13.1",
+ "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz",
+ "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==",
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/reusify": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
+ "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==",
+ "engines": {
+ "iojs": ">=1.0.0",
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/rimraf": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
+ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
+ "dependencies": {
+ "glob": "^7.1.3"
+ },
+ "bin": {
+ "rimraf": "bin.js"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/isaacs"
+ }
+ },
+ "node_modules/rtl-detect": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.0.4.tgz",
+ "integrity": "sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ=="
+ },
+ "node_modules/run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ],
+ "dependencies": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "node_modules/rxjs": {
+ "version": "7.8.1",
+ "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz",
+ "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==",
+ "dependencies": {
+ "tslib": "^2.1.0"
+ }
+ },
+ "node_modules/safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/feross"
+ },
+ {
+ "type": "patreon",
+ "url": "https://www.patreon.com/feross"
+ },
+ {
+ "type": "consulting",
+ "url": "https://feross.org/support"
+ }
+ ]
+ },
+ "node_modules/safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
+ "license": "MIT"
+ },
+ "node_modules/scheduler": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.2.tgz",
+ "integrity": "sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==",
+ "peer": true,
+ "dependencies": {
+ "loose-envify": "^1.1.0",
+ "object-assign": "^4.1.1"
+ }
+ },
+ "node_modules/schema-utils": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz",
+ "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.5",
+ "ajv": "^6.12.4",
+ "ajv-keywords": "^3.5.2"
+ },
+ "engines": {
+ "node": ">= 8.9.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/section-matter": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz",
+ "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==",
+ "dependencies": {
+ "extend-shallow": "^2.0.1",
+ "kind-of": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/select-hose": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz",
+ "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg=="
+ },
+ "node_modules/selfsigned": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.1.1.tgz",
+ "integrity": "sha512-GSL3aowiF7wa/WtSFwnUrludWFoNhftq8bUkH9pkzjpN2XSPOAYEgg6e0sS9s0rZwgJzJiQRPU18A6clnoW5wQ==",
+ "dependencies": {
+ "node-forge": "^1"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/semver": {
+ "version": "7.6.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz",
+ "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/semver-diff": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz",
+ "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==",
+ "dependencies": {
+ "semver": "^6.3.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/semver-diff/node_modules/semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
+ "license": "ISC",
+ "bin": {
+ "semver": "bin/semver.js"
+ }
+ },
+ "node_modules/send": {
+ "version": "0.19.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz",
+ "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/send/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/send/node_modules/debug/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "license": "MIT"
+ },
+ "node_modules/send/node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/send/node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/serialize-javascript": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz",
+ "integrity": "sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==",
+ "dependencies": {
+ "randombytes": "^2.1.0"
+ }
+ },
+ "node_modules/serve-handler": {
+ "version": "6.1.5",
+ "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz",
+ "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==",
+ "dependencies": {
+ "bytes": "3.0.0",
+ "content-disposition": "0.5.2",
+ "fast-url-parser": "1.1.3",
+ "mime-types": "2.1.18",
+ "minimatch": "3.1.2",
+ "path-is-inside": "1.0.2",
+ "path-to-regexp": "2.2.1",
+ "range-parser": "1.2.0"
+ }
+ },
+ "node_modules/serve-handler/node_modules/path-to-regexp": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz",
+ "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ=="
+ },
+ "node_modules/serve-index": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz",
+ "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==",
+ "dependencies": {
+ "accepts": "~1.3.4",
+ "batch": "0.6.1",
+ "debug": "2.6.9",
+ "escape-html": "~1.0.3",
+ "http-errors": "~1.6.2",
+ "mime-types": "~2.1.17",
+ "parseurl": "~1.3.2"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/serve-index/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/serve-index/node_modules/depd": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+ "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/serve-index/node_modules/http-errors": {
+ "version": "1.6.3",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+ "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==",
+ "dependencies": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.0",
+ "statuses": ">= 1.4.0 < 2"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/serve-index/node_modules/inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw=="
+ },
+ "node_modules/serve-index/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "node_modules/serve-index/node_modules/setprototypeof": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
+ "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
+ },
+ "node_modules/serve-index/node_modules/statuses": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+ "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/serve-static": {
+ "version": "1.16.0",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.0.tgz",
+ "integrity": "sha512-pDLK8zwl2eKaYrs8mrPZBJua4hMplRWJ1tIFksVC3FtBEBnl8dxgeHtsaMS8DhS9i4fLObaon6ABoc4/hQGdPA==",
+ "license": "MIT",
+ "dependencies": {
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "0.18.0"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/serve-static/node_modules/debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "license": "MIT",
+ "dependencies": {
+ "ms": "2.0.0"
+ }
+ },
+ "node_modules/serve-static/node_modules/debug/node_modules/ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==",
+ "license": "MIT"
+ },
+ "node_modules/serve-static/node_modules/ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
+ "license": "MIT"
+ },
+ "node_modules/serve-static/node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/serve-static/node_modules/send": {
+ "version": "0.18.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz",
+ "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==",
+ "license": "MIT",
+ "dependencies": {
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "2.0.1"
+ },
+ "engines": {
+ "node": ">= 0.8.0"
+ }
+ },
+ "node_modules/set-function-length": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
+ "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==",
+ "license": "MIT",
+ "dependencies": {
+ "define-data-property": "^1.1.4",
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.2"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ }
+ },
+ "node_modules/setprototypeof": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
+ "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==",
+ "license": "ISC"
+ },
+ "node_modules/shallow-clone": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz",
+ "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==",
+ "dependencies": {
+ "kind-of": "^6.0.2"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shallowequal": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz",
+ "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ=="
+ },
+ "node_modules/shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "dependencies": {
+ "shebang-regex": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/shell-quote": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz",
+ "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==",
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/shelljs": {
+ "version": "0.8.5",
+ "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz",
+ "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==",
+ "dependencies": {
+ "glob": "^7.0.0",
+ "interpret": "^1.0.0",
+ "rechoir": "^0.6.2"
+ },
+ "bin": {
+ "shjs": "bin/shjs"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/side-channel": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz",
+ "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==",
+ "license": "MIT",
+ "dependencies": {
+ "call-bind": "^1.0.7",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.4",
+ "object-inspect": "^1.13.1"
+ },
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/signal-exit": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
+ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="
+ },
+ "node_modules/sirv": {
+ "version": "1.0.19",
+ "resolved": "https://registry.npmjs.org/sirv/-/sirv-1.0.19.tgz",
+ "integrity": "sha512-JuLThK3TnZG1TAKDwNIqNq6QA2afLOCcm+iE8D1Kj3GA40pSPsxQjjJl0J8X3tsR7T+CP1GavpzLwYkgVLWrZQ==",
+ "dependencies": {
+ "@polka/url": "^1.0.0-next.20",
+ "mrmime": "^1.0.0",
+ "totalist": "^1.0.0"
+ },
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/sisteransi": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz",
+ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg=="
+ },
+ "node_modules/slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/sockjs": {
+ "version": "0.3.24",
+ "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz",
+ "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==",
+ "dependencies": {
+ "faye-websocket": "^0.11.3",
+ "uuid": "^8.3.2",
+ "websocket-driver": "^0.7.4"
+ }
+ },
+ "node_modules/sort-css-media-queries": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz",
+ "integrity": "sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 6.3.0"
+ }
+ },
+ "node_modules/source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==",
+ "license": "BSD-3-Clause",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/source-map-support": {
+ "version": "0.5.21",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
+ "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
+ "dependencies": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
+ }
+ },
+ "node_modules/space-separated-tokens": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz",
+ "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/spdy": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz",
+ "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==",
+ "dependencies": {
+ "debug": "^4.1.0",
+ "handle-thing": "^2.0.0",
+ "http-deceiver": "^1.2.7",
+ "select-hose": "^2.0.0",
+ "spdy-transport": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=6.0.0"
+ }
+ },
+ "node_modules/spdy-transport": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz",
+ "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==",
+ "dependencies": {
+ "debug": "^4.1.0",
+ "detect-node": "^2.0.4",
+ "hpack.js": "^2.1.6",
+ "obuf": "^1.1.2",
+ "readable-stream": "^3.0.6",
+ "wbuf": "^1.7.3"
+ }
+ },
+ "node_modules/sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g=="
+ },
+ "node_modules/stable": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz",
+ "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w==",
+ "deprecated": "Modern JS already guarantees Array#sort() is a stable sort, so this library is deprecated. See the compatibility table on MDN: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/sort#browser_compatibility"
+ },
+ "node_modules/state-toggle": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.3.tgz",
+ "integrity": "sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/std-env": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.3.3.tgz",
+ "integrity": "sha512-Rz6yejtVyWnVjC1RFvNmYL10kgjC49EOghxWn0RFqlCHGFpQx+Xe7yW3I4ceK1SGrWIGMjD5Kbue8W/udkbMJg=="
+ },
+ "node_modules/string_decoder": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
+ "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
+ "dependencies": {
+ "safe-buffer": "~5.2.0"
+ }
+ },
+ "node_modules/string-width": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
+ "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
+ "dependencies": {
+ "eastasianwidth": "^0.2.0",
+ "emoji-regex": "^9.2.2",
+ "strip-ansi": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/string-width/node_modules/ansi-regex": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
+ "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ }
+ },
+ "node_modules/string-width/node_modules/strip-ansi": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
+ "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
+ "dependencies": {
+ "ansi-regex": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/strip-ansi?sponsor=1"
+ }
+ },
+ "node_modules/stringify-object": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz",
+ "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==",
+ "license": "BSD-2-Clause",
+ "dependencies": {
+ "get-own-enumerable-property-symbols": "^3.0.0",
+ "is-obj": "^1.0.1",
+ "is-regexp": "^1.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "dependencies": {
+ "ansi-regex": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/strip-bom-string": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz",
+ "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/strip-final-newline": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
+ "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/strip-json-comments": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
+ "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/style-to-object": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.3.0.tgz",
+ "integrity": "sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA==",
+ "license": "MIT",
+ "dependencies": {
+ "inline-style-parser": "0.1.1"
+ }
+ },
+ "node_modules/stylehacks": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz",
+ "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==",
+ "dependencies": {
+ "browserslist": "^4.21.4",
+ "postcss-selector-parser": "^6.0.4"
+ },
+ "engines": {
+ "node": "^10 || ^12 || >=14.0"
+ },
+ "peerDependencies": {
+ "postcss": "^8.2.15"
+ }
+ },
+ "node_modules/supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==",
+ "engines": {
+ "node": ">= 0.4"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/ljharb"
+ }
+ },
+ "node_modules/svg-parser": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz",
+ "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ=="
+ },
+ "node_modules/svgo": {
+ "version": "2.8.0",
+ "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz",
+ "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==",
+ "dependencies": {
+ "@trysound/sax": "0.2.0",
+ "commander": "^7.2.0",
+ "css-select": "^4.1.3",
+ "css-tree": "^1.1.3",
+ "csso": "^4.2.0",
+ "picocolors": "^1.0.0",
+ "stable": "^0.1.8"
+ },
+ "bin": {
+ "svgo": "bin/svgo"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/svgo/node_modules/commander": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz",
+ "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==",
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/sync-directory": {
+ "version": "5.1.9",
+ "resolved": "https://registry.npmjs.org/sync-directory/-/sync-directory-5.1.9.tgz",
+ "integrity": "sha512-0942RssO+NrIjDcaNiXUH/NQoAamURT1zpzN/uB8fgyetDM8NtPPOQNax3+BuNUfw/2JcaEXrAz567DokNq0lw==",
+ "dependencies": {
+ "chokidar": "^3.3.1",
+ "commander": "^6.2.0",
+ "fs-extra": "^7.0.1",
+ "is-absolute": "^1.0.0",
+ "readdir-enhanced": "^1.5.2"
+ },
+ "bin": {
+ "syncdir": "cmd.js"
+ }
+ },
+ "node_modules/sync-directory/node_modules/commander": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz",
+ "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/tapable": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz",
+ "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/terser": {
+ "version": "5.32.0",
+ "resolved": "https://registry.npmjs.org/terser/-/terser-5.32.0.tgz",
+ "integrity": "sha512-v3Gtw3IzpBJ0ugkxEX8U0W6+TnPKRRCWGh1jC/iM/e3Ki5+qvO1L1EAZ56bZasc64aXHwRHNIQEzm6//i5cemQ==",
+ "license": "BSD-2-Clause",
+ "dependencies": {
+ "@jridgewell/source-map": "^0.3.3",
+ "acorn": "^8.8.2",
+ "commander": "^2.20.0",
+ "source-map-support": "~0.5.20"
+ },
+ "bin": {
+ "terser": "bin/terser"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/terser-webpack-plugin": {
+ "version": "5.3.10",
+ "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz",
+ "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==",
+ "license": "MIT",
+ "dependencies": {
+ "@jridgewell/trace-mapping": "^0.3.20",
+ "jest-worker": "^27.4.5",
+ "schema-utils": "^3.1.1",
+ "serialize-javascript": "^6.0.1",
+ "terser": "^5.26.0"
+ },
+ "engines": {
+ "node": ">= 10.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "webpack": "^5.1.0"
+ },
+ "peerDependenciesMeta": {
+ "@swc/core": {
+ "optional": true
+ },
+ "esbuild": {
+ "optional": true
+ },
+ "uglify-js": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/jest-worker": {
+ "version": "27.5.1",
+ "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz",
+ "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==",
+ "dependencies": {
+ "@types/node": "*",
+ "merge-stream": "^2.0.0",
+ "supports-color": "^8.0.0"
+ },
+ "engines": {
+ "node": ">= 10.13.0"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/schema-utils": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz",
+ "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.8",
+ "ajv": "^6.12.5",
+ "ajv-keywords": "^3.5.2"
+ },
+ "engines": {
+ "node": ">= 10.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/terser-webpack-plugin/node_modules/supports-color": {
+ "version": "8.1.1",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
+ "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
+ "dependencies": {
+ "has-flag": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/supports-color?sponsor=1"
+ }
+ },
+ "node_modules/terser/node_modules/commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="
+ },
+ "node_modules/text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw=="
+ },
+ "node_modules/thunky": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz",
+ "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA=="
+ },
+ "node_modules/tiny-invariant": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.1.tgz",
+ "integrity": "sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw=="
+ },
+ "node_modules/tiny-warning": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz",
+ "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA=="
+ },
+ "node_modules/to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/to-readable-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz",
+ "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "license": "MIT",
+ "dependencies": {
+ "is-number": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=8.0"
+ }
+ },
+ "node_modules/toidentifier": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
+ "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.6"
+ }
+ },
+ "node_modules/toposort": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/toposort/-/toposort-2.0.2.tgz",
+ "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg=="
+ },
+ "node_modules/totalist": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/totalist/-/totalist-1.1.0.tgz",
+ "integrity": "sha512-gduQwd1rOdDMGxFG1gEvhV88Oirdo2p+KjoYFU7k2g+i7n6AFFbDQ5kMPUsW0pNbfQsB/cwXvT1i4Bue0s9g5g==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/trim": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.1.tgz",
+ "integrity": "sha512-YzQV+TZg4AxpKxaTHK3c3D+kRDCGVEE7LemdlQZoQXn0iennk10RsIoY6ikzAqJTc9Xjl9C1/waHom/J86ziAQ==",
+ "deprecated": "Use String.prototype.trim() instead"
+ },
+ "node_modules/trim-trailing-lines": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz",
+ "integrity": "sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/trough": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz",
+ "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/tslib": {
+ "version": "2.5.3",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.3.tgz",
+ "integrity": "sha512-mSxlJJwl3BMEQCUNnxXBU9jP4JBktcEGhURcPR6VQVlnP0FdDEsIaz0C35dXNGLyRfrATNofF0F5p2KPxQgB+w=="
+ },
+ "node_modules/type-fest": {
+ "version": "2.19.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz",
+ "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==",
+ "engines": {
+ "node": ">=12.20"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "license": "MIT",
+ "dependencies": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/type-is/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "license": "MIT",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/type-is/node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "license": "MIT",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/typedarray-to-buffer": {
+ "version": "3.1.5",
+ "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz",
+ "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==",
+ "dependencies": {
+ "is-typedarray": "^1.0.0"
+ }
+ },
+ "node_modules/typescript": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.3.tgz",
+ "integrity": "sha512-XH627E9vkeqhlZFQuL+UsyAXEnibT0kWR2FWONlr4sTjvxyJYnyefgrkyECLzM5NenmKzRAy2rR/OlYLA1HkZw==",
+ "peer": true,
+ "bin": {
+ "tsc": "bin/tsc",
+ "tsserver": "bin/tsserver"
+ },
+ "engines": {
+ "node": ">=14.17"
+ }
+ },
+ "node_modules/uglify-js": {
+ "version": "3.17.4",
+ "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.17.4.tgz",
+ "integrity": "sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==",
+ "optional": true,
+ "bin": {
+ "uglifyjs": "bin/uglifyjs"
+ },
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/unc-path-regex": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/unc-path-regex/-/unc-path-regex-0.1.2.tgz",
+ "integrity": "sha512-eXL4nmJT7oCpkZsHZUOJo8hcX3GbsiDOa0Qu9F646fi8dT3XuSVopVqAcEiVzSKKH7UoDti23wNX3qGFxcW5Qg==",
+ "engines": {
+ "node": ">=0.10.0"
+ }
+ },
+ "node_modules/unherit": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/unherit/-/unherit-1.1.3.tgz",
+ "integrity": "sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ==",
+ "license": "MIT",
+ "dependencies": {
+ "inherits": "^2.0.0",
+ "xtend": "^4.0.0"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/unicode-canonical-property-names-ecmascript": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz",
+ "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/unicode-match-property-ecmascript": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz",
+ "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==",
+ "dependencies": {
+ "unicode-canonical-property-names-ecmascript": "^2.0.0",
+ "unicode-property-aliases-ecmascript": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/unicode-match-property-value-ecmascript": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz",
+ "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/unicode-property-aliases-ecmascript": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz",
+ "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==",
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/unified": {
+ "version": "9.2.2",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz",
+ "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==",
+ "license": "MIT",
+ "dependencies": {
+ "bail": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-buffer": "^2.0.0",
+ "is-plain-obj": "^2.0.0",
+ "trough": "^1.0.0",
+ "vfile": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unique-string": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz",
+ "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==",
+ "dependencies": {
+ "crypto-random-string": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/unist-builder": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/unist-builder/-/unist-builder-2.0.3.tgz",
+ "integrity": "sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-generated": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-1.1.6.tgz",
+ "integrity": "sha512-cln2Mm1/CZzN5ttGK7vkoGw+RZ8VcUH6BtGbq98DDtRGquAAOXig1mrBQYelOwMXYS8rK+vZDyyojSjp7JX+Lg==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-is": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz",
+ "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-position": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-3.1.0.tgz",
+ "integrity": "sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-remove": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-remove/-/unist-util-remove-2.1.0.tgz",
+ "integrity": "sha512-J8NYPyBm4baYLdCbjmf1bhPu45Cr1MWTm77qd9istEkzWpnN6O9tMsEbB2JhNnBCqGENRqEWomQ+He6au0B27Q==",
+ "license": "MIT",
+ "dependencies": {
+ "unist-util-is": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-remove-position": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz",
+ "integrity": "sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA==",
+ "license": "MIT",
+ "dependencies": {
+ "unist-util-visit": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-stringify-position": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz",
+ "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.2"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz",
+ "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^4.0.0",
+ "unist-util-visit-parents": "^3.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/unist-util-visit-parents": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz",
+ "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^4.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/universalify": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
+ "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==",
+ "engines": {
+ "node": ">= 4.0.0"
+ }
+ },
+ "node_modules/unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/update-browserslist-db": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz",
+ "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==",
+ "funding": [
+ {
+ "type": "opencollective",
+ "url": "https://opencollective.com/browserslist"
+ },
+ {
+ "type": "tidelift",
+ "url": "https://tidelift.com/funding/github/npm/browserslist"
+ },
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/ai"
+ }
+ ],
+ "license": "MIT",
+ "dependencies": {
+ "escalade": "^3.1.2",
+ "picocolors": "^1.0.1"
+ },
+ "bin": {
+ "update-browserslist-db": "cli.js"
+ },
+ "peerDependencies": {
+ "browserslist": ">= 4.21.0"
+ }
+ },
+ "node_modules/update-notifier": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-5.1.0.tgz",
+ "integrity": "sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw==",
+ "dependencies": {
+ "boxen": "^5.0.0",
+ "chalk": "^4.1.0",
+ "configstore": "^5.0.1",
+ "has-yarn": "^2.1.0",
+ "import-lazy": "^2.1.0",
+ "is-ci": "^2.0.0",
+ "is-installed-globally": "^0.4.0",
+ "is-npm": "^5.0.0",
+ "is-yarn-global": "^0.3.0",
+ "latest-version": "^5.1.0",
+ "pupa": "^2.1.1",
+ "semver": "^7.3.4",
+ "semver-diff": "^3.1.1",
+ "xdg-basedir": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/yeoman/update-notifier?sponsor=1"
+ }
+ },
+ "node_modules/update-notifier/node_modules/boxen": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz",
+ "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==",
+ "dependencies": {
+ "ansi-align": "^3.0.0",
+ "camelcase": "^6.2.0",
+ "chalk": "^4.1.0",
+ "cli-boxes": "^2.2.1",
+ "string-width": "^4.2.2",
+ "type-fest": "^0.20.2",
+ "widest-line": "^3.1.0",
+ "wrap-ansi": "^7.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/update-notifier/node_modules/cli-boxes": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz",
+ "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw==",
+ "engines": {
+ "node": ">=6"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/update-notifier/node_modules/emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "node_modules/update-notifier/node_modules/string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "dependencies": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/update-notifier/node_modules/type-fest": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
+ "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/update-notifier/node_modules/widest-line": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz",
+ "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==",
+ "dependencies": {
+ "string-width": "^4.0.0"
+ },
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/update-notifier/node_modules/wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "dependencies": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ },
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "dependencies": {
+ "punycode": "^2.1.0"
+ }
+ },
+ "node_modules/uri-js/node_modules/punycode": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz",
+ "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
+ "node_modules/url-loader": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz",
+ "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==",
+ "dependencies": {
+ "loader-utils": "^2.0.0",
+ "mime-types": "^2.1.27",
+ "schema-utils": "^3.0.0"
+ },
+ "engines": {
+ "node": ">= 10.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "file-loader": "*",
+ "webpack": "^4.0.0 || ^5.0.0"
+ },
+ "peerDependenciesMeta": {
+ "file-loader": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/url-loader/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/url-loader/node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/url-loader/node_modules/schema-utils": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz",
+ "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.8",
+ "ajv": "^6.12.5",
+ "ajv-keywords": "^3.5.2"
+ },
+ "engines": {
+ "node": ">= 10.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/url-parse-lax": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz",
+ "integrity": "sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ==",
+ "dependencies": {
+ "prepend-http": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=4"
+ }
+ },
+ "node_modules/util": {
+ "version": "0.10.4",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz",
+ "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==",
+ "dependencies": {
+ "inherits": "2.0.3"
+ }
+ },
+ "node_modules/util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
+ },
+ "node_modules/util/node_modules/inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw=="
+ },
+ "node_modules/utila": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz",
+ "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA=="
+ },
+ "node_modules/utility-types": {
+ "version": "3.10.0",
+ "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz",
+ "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg==",
+ "engines": {
+ "node": ">= 4"
+ }
+ },
+ "node_modules/utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==",
+ "engines": {
+ "node": ">= 0.4.0"
+ }
+ },
+ "node_modules/uuid": {
+ "version": "8.3.2",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz",
+ "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==",
+ "bin": {
+ "uuid": "dist/bin/uuid"
+ }
+ },
+ "node_modules/value-equal": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz",
+ "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw=="
+ },
+ "node_modules/vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==",
+ "engines": {
+ "node": ">= 0.8"
+ }
+ },
+ "node_modules/vfile": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz",
+ "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "is-buffer": "^2.0.0",
+ "unist-util-stringify-position": "^2.0.0",
+ "vfile-message": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-location": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-3.2.0.tgz",
+ "integrity": "sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA==",
+ "license": "MIT",
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/vfile-message": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz",
+ "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^2.0.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/unified"
+ }
+ },
+ "node_modules/wait-on": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-6.0.1.tgz",
+ "integrity": "sha512-zht+KASY3usTY5u2LgaNqn/Cd8MukxLGjdcZxT2ns5QzDmTFc4XoWBgC+C/na+sMRZTuVygQoMYwdcVjHnYIVw==",
+ "dependencies": {
+ "axios": "^0.25.0",
+ "joi": "^17.6.0",
+ "lodash": "^4.17.21",
+ "minimist": "^1.2.5",
+ "rxjs": "^7.5.4"
+ },
+ "bin": {
+ "wait-on": "bin/wait-on"
+ },
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/watchpack": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.2.tgz",
+ "integrity": "sha512-TnbFSbcOCcDgjZ4piURLCbJ3nJhznVh9kw6F6iokjiFPl8ONxe9A6nMDVXDiNbrSfLILs6vB07F7wLBrwPYzJw==",
+ "license": "MIT",
+ "dependencies": {
+ "glob-to-regexp": "^0.4.1",
+ "graceful-fs": "^4.1.2"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/watchpack/node_modules/glob-to-regexp": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz",
+ "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==",
+ "license": "BSD-2-Clause"
+ },
+ "node_modules/wbuf": {
+ "version": "1.7.3",
+ "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz",
+ "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==",
+ "dependencies": {
+ "minimalistic-assert": "^1.0.0"
+ }
+ },
+ "node_modules/web-namespaces": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-1.1.4.tgz",
+ "integrity": "sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ },
+ "node_modules/webpack": {
+ "version": "5.94.0",
+ "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.94.0.tgz",
+ "integrity": "sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==",
+ "license": "MIT",
+ "dependencies": {
+ "@types/estree": "^1.0.5",
+ "@webassemblyjs/ast": "^1.12.1",
+ "@webassemblyjs/wasm-edit": "^1.12.1",
+ "@webassemblyjs/wasm-parser": "^1.12.1",
+ "acorn": "^8.7.1",
+ "acorn-import-attributes": "^1.9.5",
+ "browserslist": "^4.21.10",
+ "chrome-trace-event": "^1.0.2",
+ "enhanced-resolve": "^5.17.1",
+ "es-module-lexer": "^1.2.1",
+ "eslint-scope": "5.1.1",
+ "events": "^3.2.0",
+ "glob-to-regexp": "^0.4.1",
+ "graceful-fs": "^4.2.11",
+ "json-parse-even-better-errors": "^2.3.1",
+ "loader-runner": "^4.2.0",
+ "mime-types": "^2.1.27",
+ "neo-async": "^2.6.2",
+ "schema-utils": "^3.2.0",
+ "tapable": "^2.1.1",
+ "terser-webpack-plugin": "^5.3.10",
+ "watchpack": "^2.4.1",
+ "webpack-sources": "^3.2.3"
+ },
+ "bin": {
+ "webpack": "bin/webpack.js"
+ },
+ "engines": {
+ "node": ">=10.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependenciesMeta": {
+ "webpack-cli": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/webpack-bundle-analyzer": {
+ "version": "4.9.0",
+ "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.9.0.tgz",
+ "integrity": "sha512-+bXGmO1LyiNx0i9enBu3H8mv42sj/BJWhZNFwjz92tVnBa9J3JMGo2an2IXlEleoDOPn/Hofl5hr/xCpObUDtw==",
+ "dependencies": {
+ "@discoveryjs/json-ext": "0.5.7",
+ "acorn": "^8.0.4",
+ "acorn-walk": "^8.0.0",
+ "chalk": "^4.1.0",
+ "commander": "^7.2.0",
+ "gzip-size": "^6.0.0",
+ "lodash": "^4.17.20",
+ "opener": "^1.5.2",
+ "sirv": "^1.0.7",
+ "ws": "^7.3.1"
+ },
+ "bin": {
+ "webpack-bundle-analyzer": "lib/bin/analyzer.js"
+ },
+ "engines": {
+ "node": ">= 10.13.0"
+ }
+ },
+ "node_modules/webpack-bundle-analyzer/node_modules/commander": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz",
+ "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==",
+ "engines": {
+ "node": ">= 10"
+ }
+ },
+ "node_modules/webpack-dev-middleware": {
+ "version": "5.3.4",
+ "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz",
+ "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==",
+ "license": "MIT",
+ "dependencies": {
+ "colorette": "^2.0.10",
+ "memfs": "^3.4.3",
+ "mime-types": "^2.1.31",
+ "range-parser": "^1.2.1",
+ "schema-utils": "^4.0.0"
+ },
+ "engines": {
+ "node": ">= 12.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "webpack": "^4.0.0 || ^5.0.0"
+ }
+ },
+ "node_modules/webpack-dev-middleware/node_modules/ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/webpack-dev-middleware/node_modules/ajv-keywords": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
+ "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.3"
+ },
+ "peerDependencies": {
+ "ajv": "^8.8.2"
+ }
+ },
+ "node_modules/webpack-dev-middleware/node_modules/json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "node_modules/webpack-dev-middleware/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/webpack-dev-middleware/node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/webpack-dev-middleware/node_modules/range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/webpack-dev-middleware/node_modules/schema-utils": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz",
+ "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.9",
+ "ajv": "^8.9.0",
+ "ajv-formats": "^2.1.1",
+ "ajv-keywords": "^5.1.0"
+ },
+ "engines": {
+ "node": ">= 12.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/webpack-dev-server": {
+ "version": "4.15.0",
+ "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.0.tgz",
+ "integrity": "sha512-HmNB5QeSl1KpulTBQ8UT4FPrByYyaLxpJoQ0+s7EvUrMc16m0ZS1sgb1XGqzmgCPk0c9y+aaXxn11tbLzuM7NQ==",
+ "dependencies": {
+ "@types/bonjour": "^3.5.9",
+ "@types/connect-history-api-fallback": "^1.3.5",
+ "@types/express": "^4.17.13",
+ "@types/serve-index": "^1.9.1",
+ "@types/serve-static": "^1.13.10",
+ "@types/sockjs": "^0.3.33",
+ "@types/ws": "^8.5.1",
+ "ansi-html-community": "^0.0.8",
+ "bonjour-service": "^1.0.11",
+ "chokidar": "^3.5.3",
+ "colorette": "^2.0.10",
+ "compression": "^1.7.4",
+ "connect-history-api-fallback": "^2.0.0",
+ "default-gateway": "^6.0.3",
+ "express": "^4.17.3",
+ "graceful-fs": "^4.2.6",
+ "html-entities": "^2.3.2",
+ "http-proxy-middleware": "^2.0.3",
+ "ipaddr.js": "^2.0.1",
+ "launch-editor": "^2.6.0",
+ "open": "^8.0.9",
+ "p-retry": "^4.5.0",
+ "rimraf": "^3.0.2",
+ "schema-utils": "^4.0.0",
+ "selfsigned": "^2.1.1",
+ "serve-index": "^1.9.1",
+ "sockjs": "^0.3.24",
+ "spdy": "^4.0.2",
+ "webpack-dev-middleware": "^5.3.1",
+ "ws": "^8.13.0"
+ },
+ "bin": {
+ "webpack-dev-server": "bin/webpack-dev-server.js"
+ },
+ "engines": {
+ "node": ">= 12.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ },
+ "peerDependencies": {
+ "webpack": "^4.37.0 || ^5.0.0"
+ },
+ "peerDependenciesMeta": {
+ "webpack": {
+ "optional": true
+ },
+ "webpack-cli": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/webpack-dev-server/node_modules/ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ },
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/epoberezkin"
+ }
+ },
+ "node_modules/webpack-dev-server/node_modules/ajv-keywords": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
+ "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
+ "dependencies": {
+ "fast-deep-equal": "^3.1.3"
+ },
+ "peerDependencies": {
+ "ajv": "^8.8.2"
+ }
+ },
+ "node_modules/webpack-dev-server/node_modules/json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "node_modules/webpack-dev-server/node_modules/schema-utils": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz",
+ "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.9",
+ "ajv": "^8.9.0",
+ "ajv-formats": "^2.1.1",
+ "ajv-keywords": "^5.1.0"
+ },
+ "engines": {
+ "node": ">= 12.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/webpack-dev-server/node_modules/ws": {
+ "version": "8.18.0",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz",
+ "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=10.0.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": ">=5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/webpack-merge": {
+ "version": "5.9.0",
+ "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.9.0.tgz",
+ "integrity": "sha512-6NbRQw4+Sy50vYNTw7EyOn41OZItPiXB8GNv3INSoe3PSFaHJEz3SHTrYVaRm2LilNGnFUzh0FAwqPEmU/CwDg==",
+ "dependencies": {
+ "clone-deep": "^4.0.1",
+ "wildcard": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=10.0.0"
+ }
+ },
+ "node_modules/webpack-sources": {
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz",
+ "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==",
+ "engines": {
+ "node": ">=10.13.0"
+ }
+ },
+ "node_modules/webpack/node_modules/glob-to-regexp": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz",
+ "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw=="
+ },
+ "node_modules/webpack/node_modules/mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==",
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/webpack/node_modules/mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "dependencies": {
+ "mime-db": "1.52.0"
+ },
+ "engines": {
+ "node": ">= 0.6"
+ }
+ },
+ "node_modules/webpack/node_modules/schema-utils": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz",
+ "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==",
+ "dependencies": {
+ "@types/json-schema": "^7.0.8",
+ "ajv": "^6.12.5",
+ "ajv-keywords": "^3.5.2"
+ },
+ "engines": {
+ "node": ">= 10.13.0"
+ },
+ "funding": {
+ "type": "opencollective",
+ "url": "https://opencollective.com/webpack"
+ }
+ },
+ "node_modules/webpackbar": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz",
+ "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==",
+ "dependencies": {
+ "chalk": "^4.1.0",
+ "consola": "^2.15.3",
+ "pretty-time": "^1.1.0",
+ "std-env": "^3.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "peerDependencies": {
+ "webpack": "3 || 4 || 5"
+ }
+ },
+ "node_modules/websocket-driver": {
+ "version": "0.7.4",
+ "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz",
+ "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==",
+ "dependencies": {
+ "http-parser-js": ">=0.5.1",
+ "safe-buffer": ">=5.1.0",
+ "websocket-extensions": ">=0.1.1"
+ },
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/websocket-extensions": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz",
+ "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg==",
+ "engines": {
+ "node": ">=0.8.0"
+ }
+ },
+ "node_modules/which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "dependencies": {
+ "isexe": "^2.0.0"
+ },
+ "bin": {
+ "node-which": "bin/node-which"
+ },
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/widest-line": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz",
+ "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==",
+ "dependencies": {
+ "string-width": "^5.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/wildcard": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz",
+ "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ=="
+ },
+ "node_modules/wordwrap": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
+ "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q=="
+ },
+ "node_modules/wrap-ansi": {
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
+ "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
+ "dependencies": {
+ "ansi-styles": "^6.1.0",
+ "string-width": "^5.0.1",
+ "strip-ansi": "^7.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/wrap-ansi?sponsor=1"
+ }
+ },
+ "node_modules/wrap-ansi/node_modules/ansi-regex": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
+ "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-regex?sponsor=1"
+ }
+ },
+ "node_modules/wrap-ansi/node_modules/ansi-styles": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
+ "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/ansi-styles?sponsor=1"
+ }
+ },
+ "node_modules/wrap-ansi/node_modules/strip-ansi": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
+ "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
+ "dependencies": {
+ "ansi-regex": "^6.0.1"
+ },
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/chalk/strip-ansi?sponsor=1"
+ }
+ },
+ "node_modules/wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
+ },
+ "node_modules/write-file-atomic": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz",
+ "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==",
+ "dependencies": {
+ "imurmurhash": "^0.1.4",
+ "is-typedarray": "^1.0.0",
+ "signal-exit": "^3.0.2",
+ "typedarray-to-buffer": "^3.1.5"
+ }
+ },
+ "node_modules/ws": {
+ "version": "7.5.10",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz",
+ "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=8.3.0"
+ },
+ "peerDependencies": {
+ "bufferutil": "^4.0.1",
+ "utf-8-validate": "^5.0.2"
+ },
+ "peerDependenciesMeta": {
+ "bufferutil": {
+ "optional": true
+ },
+ "utf-8-validate": {
+ "optional": true
+ }
+ }
+ },
+ "node_modules/xdg-basedir": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz",
+ "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q==",
+ "engines": {
+ "node": ">=8"
+ }
+ },
+ "node_modules/xtend": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
+ "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==",
+ "license": "MIT",
+ "engines": {
+ "node": ">=0.4"
+ }
+ },
+ "node_modules/yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="
+ },
+ "node_modules/yaml": {
+ "version": "1.10.2",
+ "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
+ "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==",
+ "engines": {
+ "node": ">= 6"
+ }
+ },
+ "node_modules/yocto-queue": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==",
+ "engines": {
+ "node": ">=10"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
+ "node_modules/yup": {
+ "version": "0.32.11",
+ "resolved": "https://registry.npmjs.org/yup/-/yup-0.32.11.tgz",
+ "integrity": "sha512-Z2Fe1bn+eLstG8DRR6FTavGD+MeAwyfmouhHsIUgaADz8jvFKbO/fXc2trJKZg+5EBjh4gGm3iU/t3onKlXHIg==",
+ "dependencies": {
+ "@babel/runtime": "^7.15.4",
+ "@types/lodash": "^4.14.175",
+ "lodash": "^4.17.21",
+ "lodash-es": "^4.17.21",
+ "nanoclone": "^0.2.1",
+ "property-expr": "^2.0.4",
+ "toposort": "^2.0.2"
+ },
+ "engines": {
+ "node": ">=10"
+ }
+ },
+ "node_modules/zwitch": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz",
+ "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==",
+ "license": "MIT",
+ "funding": {
+ "type": "github",
+ "url": "https://github.com/sponsors/wooorm"
+ }
+ }
+ },
+ "dependencies": {
+ "@ampproject/remapping": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.2.1.tgz",
+ "integrity": "sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==",
+ "requires": {
+ "@jridgewell/gen-mapping": "^0.3.0",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ }
+ },
+ "@babel/code-frame": {
+ "version": "7.24.7",
+ "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.24.7.tgz",
+ "integrity": "sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==",
+ "requires": {
+ "@babel/highlight": "^7.24.7",
+ "picocolors": "^1.0.0"
+ }
+ },
+ "@babel/compat-data": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.22.5.tgz",
+ "integrity": "sha512-4Jc/YuIaYqKnDDz892kPIledykKg12Aw1PYX5i/TY28anJtacvM1Rrr8wbieB9GfEJwlzqT0hUEao0CxEebiDA=="
+ },
+ "@babel/core": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.22.5.tgz",
+ "integrity": "sha512-SBuTAjg91A3eKOvD+bPEz3LlhHZRNu1nFOVts9lzDJTXshHTjII0BAtDS3Y2DAkdZdDKWVZGVwkDfc4Clxn1dg==",
+ "requires": {
+ "@ampproject/remapping": "^2.2.0",
+ "@babel/code-frame": "^7.22.5",
+ "@babel/generator": "^7.22.5",
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helpers": "^7.22.5",
+ "@babel/parser": "^7.22.5",
+ "@babel/template": "^7.22.5",
+ "@babel/traverse": "^7.22.5",
+ "@babel/types": "^7.22.5",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.2",
+ "json5": "^2.2.2",
+ "semver": "^6.3.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "@babel/generator": {
+ "version": "7.25.6",
+ "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.25.6.tgz",
+ "integrity": "sha512-VPC82gr1seXOpkjAAKoLhP50vx4vGNlF4msF64dSFq1P8RfB+QAuJWGHPXXPc8QyfVWwwB/TNNU4+ayZmHNbZw==",
+ "requires": {
+ "@babel/types": "^7.25.6",
+ "@jridgewell/gen-mapping": "^0.3.5",
+ "@jridgewell/trace-mapping": "^0.3.25",
+ "jsesc": "^2.5.1"
+ }
+ },
+ "@babel/helper-annotate-as-pure": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.22.5.tgz",
+ "integrity": "sha512-LvBTxu8bQSQkcyKOU+a1btnNFQ1dMAd0R6PyW3arXes06F6QLWLIrd681bxRPIXlrMGR3XYnW9JyML7dP3qgxg==",
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-builder-binary-assignment-operator-visitor": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-builder-binary-assignment-operator-visitor/-/helper-builder-binary-assignment-operator-visitor-7.22.5.tgz",
+ "integrity": "sha512-m1EP3lVOPptR+2DwD125gziZNcmoNSHGmJROKoy87loWUQyJaVXDgpmruWqDARZSmtYQ+Dl25okU8+qhVzuykw==",
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-compilation-targets": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.22.5.tgz",
+ "integrity": "sha512-Ji+ywpHeuqxB8WDxraCiqR0xfhYjiDE/e6k7FuIaANnoOFxAHskHChz4vA1mJC9Lbm01s1PVAGhQY4FUKSkGZw==",
+ "requires": {
+ "@babel/compat-data": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.5",
+ "browserslist": "^4.21.3",
+ "lru-cache": "^5.1.1",
+ "semver": "^6.3.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "@babel/helper-create-class-features-plugin": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.22.5.tgz",
+ "integrity": "sha512-xkb58MyOYIslxu3gKmVXmjTtUPvBU4odYzbiIQbWwLKIHCsx6UGZGX6F1IznMFVnDdirseUZopzN+ZRt8Xb33Q==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-member-expression-to-functions": "^7.22.5",
+ "@babel/helper-optimise-call-expression": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.5",
+ "semver": "^6.3.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "@babel/helper-create-regexp-features-plugin": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.22.5.tgz",
+ "integrity": "sha512-1VpEFOIbMRaXyDeUwUfmTIxExLwQ+zkW+Bh5zXpApA3oQedBx9v/updixWxnx/bZpKw7u8VxWjb/qWpIcmPq8A==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "regexpu-core": "^5.3.1",
+ "semver": "^6.3.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "@babel/helper-define-polyfill-provider": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.4.0.tgz",
+ "integrity": "sha512-RnanLx5ETe6aybRi1cO/edaRH+bNYWaryCEmjDDYyNr4wnSzyOp8T0dWipmqVHKEY3AbVKUom50AKSlj1zmKbg==",
+ "requires": {
+ "@babel/helper-compilation-targets": "^7.17.7",
+ "@babel/helper-plugin-utils": "^7.16.7",
+ "debug": "^4.1.1",
+ "lodash.debounce": "^4.0.8",
+ "resolve": "^1.14.2",
+ "semver": "^6.1.2"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "@babel/helper-environment-visitor": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-environment-visitor/-/helper-environment-visitor-7.22.5.tgz",
+ "integrity": "sha512-XGmhECfVA/5sAt+H+xpSg0mfrHq6FzNr9Oxh7PSEBBRUb/mL7Kz3NICXb194rCqAEdxkhPT1a88teizAFyvk8Q=="
+ },
+ "@babel/helper-function-name": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.22.5.tgz",
+ "integrity": "sha512-wtHSq6jMRE3uF2otvfuD3DIvVhOsSNshQl0Qrd7qC9oQJzHvOL4qQXlQn2916+CXGywIjpGuIkoyZRRxHPiNQQ==",
+ "requires": {
+ "@babel/template": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-hoist-variables": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-hoist-variables/-/helper-hoist-variables-7.22.5.tgz",
+ "integrity": "sha512-wGjk9QZVzvknA6yKIUURb8zY3grXCcOZt+/7Wcy8O2uctxhplmUPkOdlgoNhmdVee2c92JXbf1xpMtVNbfoxRw==",
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-member-expression-to-functions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.22.5.tgz",
+ "integrity": "sha512-aBiH1NKMG0H2cGZqspNvsaBe6wNGjbJjuLy29aU+eDZjSbbN53BaxlpB02xm9v34pLTZ1nIQPFYn2qMZoa5BQQ==",
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-module-imports": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.22.5.tgz",
+ "integrity": "sha512-8Dl6+HD/cKifutF5qGd/8ZJi84QeAKh+CEe1sBzz8UayBBGg1dAIJrdHOcOM5b2MpzWL2yuotJTtGjETq0qjXg==",
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-module-transforms": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.22.5.tgz",
+ "integrity": "sha512-+hGKDt/Ze8GFExiVHno/2dvG5IdstpzCq0y4Qc9OJ25D4q3pKfiIP/4Vp3/JvhDkLKsDK2api3q3fpIgiIF5bw==",
+ "requires": {
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-module-imports": "^7.22.5",
+ "@babel/helper-simple-access": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.5",
+ "@babel/helper-validator-identifier": "^7.22.5",
+ "@babel/template": "^7.22.5",
+ "@babel/traverse": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-optimise-call-expression": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.22.5.tgz",
+ "integrity": "sha512-HBwaojN0xFRx4yIvpwGqxiV2tUfl7401jlok564NgB9EHS1y6QT17FmKWm4ztqjeVdXLuC4fSvHc5ePpQjoTbw==",
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-plugin-utils": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.22.5.tgz",
+ "integrity": "sha512-uLls06UVKgFG9QD4OeFYLEGteMIAa5kpTPcFL28yuCIIzsf6ZyKZMllKVOCZFhiZ5ptnwX4mtKdWCBE/uT4amg=="
+ },
+ "@babel/helper-remap-async-to-generator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.22.5.tgz",
+ "integrity": "sha512-cU0Sq1Rf4Z55fgz7haOakIyM7+x/uCFwXpLPaeRzfoUtAEAuUZjZvFPjL/rk5rW693dIgn2hng1W7xbT7lWT4g==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-wrap-function": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-replace-supers": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.22.5.tgz",
+ "integrity": "sha512-aLdNM5I3kdI/V9xGNyKSF3X/gTyMUBohTZ+/3QdQKAA9vxIiy12E+8E2HoOP1/DjeqU+g6as35QHJNMDDYpuCg==",
+ "requires": {
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-member-expression-to-functions": "^7.22.5",
+ "@babel/helper-optimise-call-expression": "^7.22.5",
+ "@babel/template": "^7.22.5",
+ "@babel/traverse": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-simple-access": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-simple-access/-/helper-simple-access-7.22.5.tgz",
+ "integrity": "sha512-n0H99E/K+Bika3++WNL17POvo4rKWZ7lZEp1Q+fStVbUi8nxPQEBOlTmCOxW/0JsS56SKKQ+ojAe2pHKJHN35w==",
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-skip-transparent-expression-wrappers": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.22.5.tgz",
+ "integrity": "sha512-tK14r66JZKiC43p8Ki33yLBVJKlQDFoA8GYN67lWCDCqoL6EMMSuM9b+Iff2jHaM/RRFYl7K+iiru7hbRqNx8Q==",
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-split-export-declaration": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-split-export-declaration/-/helper-split-export-declaration-7.22.5.tgz",
+ "integrity": "sha512-thqK5QFghPKWLhAV321lxF95yCg2K3Ob5yw+M3VHWfdia0IkPXUtoLH8x/6Fh486QUvzhb8YOWHChTVen2/PoQ==",
+ "requires": {
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helper-string-parser": {
+ "version": "7.24.8",
+ "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.24.8.tgz",
+ "integrity": "sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ=="
+ },
+ "@babel/helper-validator-identifier": {
+ "version": "7.24.7",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz",
+ "integrity": "sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w=="
+ },
+ "@babel/helper-validator-option": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-validator-option/-/helper-validator-option-7.22.5.tgz",
+ "integrity": "sha512-R3oB6xlIVKUnxNUxbmgq7pKjxpru24zlimpE8WK47fACIlM0II/Hm1RS8IaOI7NgCr6LNS+jl5l75m20npAziw=="
+ },
+ "@babel/helper-wrap-function": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.22.5.tgz",
+ "integrity": "sha512-bYqLIBSEshYcYQyfks8ewYA8S30yaGSeRslcvKMvoUk6HHPySbxHq9YRi6ghhzEU+yhQv9bP/jXnygkStOcqZw==",
+ "requires": {
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/template": "^7.22.5",
+ "@babel/traverse": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/helpers": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.22.5.tgz",
+ "integrity": "sha512-pSXRmfE1vzcUIDFQcSGA5Mr+GxBV9oiRKDuDxXvWQQBCh8HoIjs/2DlDB7H8smac1IVrB9/xdXj2N3Wol9Cr+Q==",
+ "requires": {
+ "@babel/template": "^7.22.5",
+ "@babel/traverse": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/highlight": {
+ "version": "7.24.7",
+ "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.24.7.tgz",
+ "integrity": "sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==",
+ "requires": {
+ "@babel/helper-validator-identifier": "^7.24.7",
+ "chalk": "^2.4.2",
+ "js-tokens": "^4.0.0",
+ "picocolors": "^1.0.0"
+ },
+ "dependencies": {
+ "ansi-styles": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz",
+ "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==",
+ "requires": {
+ "color-convert": "^1.9.0"
+ }
+ },
+ "chalk": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz",
+ "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==",
+ "requires": {
+ "ansi-styles": "^3.2.1",
+ "escape-string-regexp": "^1.0.5",
+ "supports-color": "^5.3.0"
+ }
+ },
+ "color-convert": {
+ "version": "1.9.3",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz",
+ "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==",
+ "requires": {
+ "color-name": "1.1.3"
+ }
+ },
+ "color-name": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz",
+ "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw=="
+ },
+ "escape-string-regexp": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz",
+ "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg=="
+ },
+ "has-flag": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz",
+ "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw=="
+ },
+ "supports-color": {
+ "version": "5.5.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz",
+ "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==",
+ "requires": {
+ "has-flag": "^3.0.0"
+ }
+ }
+ }
+ },
+ "@babel/parser": {
+ "version": "7.25.6",
+ "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.25.6.tgz",
+ "integrity": "sha512-trGdfBdbD0l1ZPmcJ83eNxB9rbEax4ALFTF7fN386TMYbeCQbyme5cOEXQhbGXKebwGaB/J52w1mrklMcbgy6Q==",
+ "requires": {
+ "@babel/types": "^7.25.6"
+ }
+ },
+ "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.22.5.tgz",
+ "integrity": "sha512-NP1M5Rf+u2Gw9qfSO4ihjcTGW5zXTi36ITLd4/EoAcEhIZ0yjMqmftDNl3QC19CX7olhrjpyU454g/2W7X0jvQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.22.5.tgz",
+ "integrity": "sha512-31Bb65aZaUwqCbWMnZPduIZxCBngHFlzyN6Dq6KAJjtx+lx6ohKHubc61OomYi7XwVD4Ol0XCVz4h+pYFR048g==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/plugin-transform-optional-chaining": "^7.22.5"
+ }
+ },
+ "@babel/plugin-proposal-object-rest-spread": {
+ "version": "7.12.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.12.1.tgz",
+ "integrity": "sha512-s6SowJIjzlhx8o7lsFx5zmY4At6CTtDvgNQDdPzkBQucle58A6b/TTeEBYtyDgmcXjUTM+vE8YOGHZzzbc/ioA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.10.4",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.0",
+ "@babel/plugin-transform-parameters": "^7.12.1"
+ }
+ },
+ "@babel/plugin-proposal-private-property-in-object": {
+ "version": "7.21.0-placeholder-for-preset-env.2",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz",
+ "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==",
+ "requires": {}
+ },
+ "@babel/plugin-proposal-unicode-property-regex": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.18.6.tgz",
+ "integrity": "sha512-2BShG/d5yoZyXZfVePH91urL5wTG6ASZU9M4o03lKK8u8UW1y08OMttBSOADTcJrnPMpvDXRG3G8fyLh4ovs8w==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.18.6",
+ "@babel/helper-plugin-utils": "^7.18.6"
+ }
+ },
+ "@babel/plugin-syntax-async-generators": {
+ "version": "7.8.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-async-generators/-/plugin-syntax-async-generators-7.8.4.tgz",
+ "integrity": "sha512-tycmZxkGfZaxhMRbXlPXuVFpdWlXpir2W4AMhSJgRKzk/eDlIXOhb2LHWoLpDF7TEHylV5zNhykX6KAgHJmTNw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-class-properties": {
+ "version": "7.12.13",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-properties/-/plugin-syntax-class-properties-7.12.13.tgz",
+ "integrity": "sha512-fm4idjKla0YahUNgFNLCB0qySdsoPiZP3iQE3rky0mBUtMZ23yDJ9SJdg6dXTSDnulOVqiF3Hgr9nbXvXTQZYA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.12.13"
+ }
+ },
+ "@babel/plugin-syntax-class-static-block": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-class-static-block/-/plugin-syntax-class-static-block-7.14.5.tgz",
+ "integrity": "sha512-b+YyPmr6ldyNnM6sqYeMWE+bgJcJpO6yS4QD7ymxgH34GBPNDM/THBh8iunyvKIZztiwLH4CJZ0RxTk9emgpjw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ }
+ },
+ "@babel/plugin-syntax-dynamic-import": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-dynamic-import/-/plugin-syntax-dynamic-import-7.8.3.tgz",
+ "integrity": "sha512-5gdGbFon+PszYzqs83S3E5mpi7/y/8M9eC90MRTZfduQOYW76ig6SOSPNe41IG5LoP3FGBn2N0RjVDSQiS94kQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-export-namespace-from": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-export-namespace-from/-/plugin-syntax-export-namespace-from-7.8.3.tgz",
+ "integrity": "sha512-MXf5laXo6c1IbEbegDmzGPwGNTsHZmEy6QGznu5Sh2UCWvueywb2ee+CCE4zQiZstxU9BMoQO9i6zUFSY0Kj0Q==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.3"
+ }
+ },
+ "@babel/plugin-syntax-import-assertions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.22.5.tgz",
+ "integrity": "sha512-rdV97N7KqsRzeNGoWUOK6yUsWarLjE5Su/Snk9IYPU9CwkWHs4t+rTGOvffTR8XGkJMTAdLfO0xVnXm8wugIJg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-syntax-import-attributes": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.22.5.tgz",
+ "integrity": "sha512-KwvoWDeNKPETmozyFE0P2rOLqh39EoQHNjqizrI5B8Vt0ZNS7M56s7dAiAqbYfiAYOuIzIh96z3iR2ktgu3tEg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-syntax-import-meta": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-import-meta/-/plugin-syntax-import-meta-7.10.4.tgz",
+ "integrity": "sha512-Yqfm+XDx0+Prh3VSeEQCPU81yC+JWZ2pDPFSS4ZdpfZhp4MkFMaDC1UqseovEKwSUpnIL7+vK+Clp7bfh0iD7g==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ }
+ },
+ "@babel/plugin-syntax-json-strings": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-json-strings/-/plugin-syntax-json-strings-7.8.3.tgz",
+ "integrity": "sha512-lY6kdGpWHvjoe2vk4WrAapEuBR69EMxZl+RoGRhrFGNYVK8mOPAW8VfbT/ZgrFbXlDNiiaxQnAtgVCZ6jv30EA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-jsx": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.22.5.tgz",
+ "integrity": "sha512-gvyP4hZrgrs/wWMaocvxZ44Hw0b3W8Pe+cMxc8V1ULQ07oh8VNbIRaoD1LRZVTvD+0nieDKjfgKg89sD7rrKrg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-syntax-logical-assignment-operators": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-logical-assignment-operators/-/plugin-syntax-logical-assignment-operators-7.10.4.tgz",
+ "integrity": "sha512-d8waShlpFDinQ5MtvGU9xDAOzKH47+FFoney2baFIoMr952hKOLp1HR7VszoZvOsV/4+RRszNY7D17ba0te0ig==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ }
+ },
+ "@babel/plugin-syntax-nullish-coalescing-operator": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz",
+ "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-numeric-separator": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.10.4.tgz",
+ "integrity": "sha512-9H6YdfkcK/uOnY/K7/aA2xpzaAgkQn37yzWUMRK7OaPOqOpGS1+n0H5hxT9AUw9EsSjPW8SVyMJwYRtWs3X3ug==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ }
+ },
+ "@babel/plugin-syntax-object-rest-spread": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz",
+ "integrity": "sha512-XoqMijGZb9y3y2XskN+P1wUGiVwWZ5JmoDRwx5+3GmEplNyVM2s2Dg8ILFQm8rWM48orGy5YpI5Bl8U1y7ydlA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-optional-catch-binding": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-catch-binding/-/plugin-syntax-optional-catch-binding-7.8.3.tgz",
+ "integrity": "sha512-6VPD0Pc1lpTqw0aKoeRTMiB+kWhAoT24PA+ksWSBrFtl5SIRVpZlwN3NNPQjehA2E/91FV3RjLWoVTglWcSV3Q==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-optional-chaining": {
+ "version": "7.8.3",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz",
+ "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.8.0"
+ }
+ },
+ "@babel/plugin-syntax-private-property-in-object": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-private-property-in-object/-/plugin-syntax-private-property-in-object-7.14.5.tgz",
+ "integrity": "sha512-0wVnp9dxJ72ZUJDV27ZfbSj6iHLoytYZmh3rFcxNnvsJF3ktkzLDZPy/mA17HGsaQT3/DQsWYX1f1QGWkCoVUg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ }
+ },
+ "@babel/plugin-syntax-top-level-await": {
+ "version": "7.14.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.14.5.tgz",
+ "integrity": "sha512-hx++upLv5U1rgYfwe1xBQUhRmU41NEvpUvrp8jkrSCdvGSnM5/qdRMtylJ6PG5OFkBaHkbTAKTnd3/YyESRHFw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.14.5"
+ }
+ },
+ "@babel/plugin-syntax-typescript": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-typescript/-/plugin-syntax-typescript-7.22.5.tgz",
+ "integrity": "sha512-1mS2o03i7t1c6VzH6fdQ3OA8tcEIxwG18zIPRp+UY1Ihv6W+XZzBCVxExF9upussPXJ0xE9XRHwMoNs1ep/nRQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-syntax-unicode-sets-regex": {
+ "version": "7.18.6",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz",
+ "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.18.6",
+ "@babel/helper-plugin-utils": "^7.18.6"
+ }
+ },
+ "@babel/plugin-transform-arrow-functions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.22.5.tgz",
+ "integrity": "sha512-26lTNXoVRdAnsaDXPpvCNUq+OVWEVC6bx7Vvz9rC53F2bagUWW4u4ii2+h8Fejfh7RYqPxn+libeFBBck9muEw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-async-generator-functions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.22.5.tgz",
+ "integrity": "sha512-gGOEvFzm3fWoyD5uZq7vVTD57pPJ3PczPUD/xCFGjzBpUosnklmXyKnGQbbbGs1NPNPskFex0j93yKbHt0cHyg==",
+ "requires": {
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-remap-async-to-generator": "^7.22.5",
+ "@babel/plugin-syntax-async-generators": "^7.8.4"
+ }
+ },
+ "@babel/plugin-transform-async-to-generator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.22.5.tgz",
+ "integrity": "sha512-b1A8D8ZzE/VhNDoV1MSJTnpKkCG5bJo+19R4o4oy03zM7ws8yEMK755j61Dc3EyvdysbqH5BOOTquJ7ZX9C6vQ==",
+ "requires": {
+ "@babel/helper-module-imports": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-remap-async-to-generator": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-block-scoped-functions": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.22.5.tgz",
+ "integrity": "sha512-tdXZ2UdknEKQWKJP1KMNmuF5Lx3MymtMN/pvA+p/VEkhK8jVcQ1fzSy8KM9qRYhAf2/lV33hoMPKI/xaI9sADA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-block-scoping": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.22.5.tgz",
+ "integrity": "sha512-EcACl1i5fSQ6bt+YGuU/XGCeZKStLmyVGytWkpyhCLeQVA0eu6Wtiw92V+I1T/hnezUv7j74dA/Ro69gWcU+hg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-class-properties": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.22.5.tgz",
+ "integrity": "sha512-nDkQ0NfkOhPTq8YCLiWNxp1+f9fCobEjCb0n8WdbNUBc4IB5V7P1QnX9IjpSoquKrXF5SKojHleVNs2vGeHCHQ==",
+ "requires": {
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-class-static-block": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.22.5.tgz",
+ "integrity": "sha512-SPToJ5eYZLxlnp1UzdARpOGeC2GbHvr9d/UV0EukuVx8atktg194oe+C5BqQ8jRTkgLRVOPYeXRSBg1IlMoVRA==",
+ "requires": {
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-class-static-block": "^7.14.5"
+ }
+ },
+ "@babel/plugin-transform-classes": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.22.5.tgz",
+ "integrity": "sha512-2edQhLfibpWpsVBx2n/GKOz6JdGQvLruZQfGr9l1qes2KQaWswjBzhQF7UDUZMNaMMQeYnQzxwOMPsbYF7wqPQ==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-environment-visitor": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-optimise-call-expression": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.5",
+ "@babel/helper-split-export-declaration": "^7.22.5",
+ "globals": "^11.1.0"
+ }
+ },
+ "@babel/plugin-transform-computed-properties": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.22.5.tgz",
+ "integrity": "sha512-4GHWBgRf0krxPX+AaPtgBAlTgTeZmqDynokHOX7aqqAB4tHs3U2Y02zH6ETFdLZGcg9UQSD1WCmkVrE9ErHeOg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/template": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-destructuring": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.22.5.tgz",
+ "integrity": "sha512-GfqcFuGW8vnEqTUBM7UtPd5A4q797LTvvwKxXTgRsFjoqaJiEg9deBG6kWeQYkVEL569NpnmpC0Pkr/8BLKGnQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-dotall-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.22.5.tgz",
+ "integrity": "sha512-5/Yk9QxCQCl+sOIB1WelKnVRxTJDSAIxtJLL2/pqL14ZVlbH0fUQUZa/T5/UnQtBNgghR7mfB8ERBKyKPCi7Vw==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-duplicate-keys": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.22.5.tgz",
+ "integrity": "sha512-dEnYD+9BBgld5VBXHnF/DbYGp3fqGMsyxKbtD1mDyIA7AkTSpKXFhCVuj/oQVOoALfBs77DudA0BE4d5mcpmqw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-dynamic-import": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.22.5.tgz",
+ "integrity": "sha512-0MC3ppTB1AMxd8fXjSrbPa7LT9hrImt+/fcj+Pg5YMD7UQyWp/02+JWpdnCymmsXwIx5Z+sYn1bwCn4ZJNvhqQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-exponentiation-operator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.22.5.tgz",
+ "integrity": "sha512-vIpJFNM/FjZ4rh1myqIya9jXwrwwgFRHPjT3DkUA9ZLHuzox8jiXkOLvwm1H+PQIP3CqfC++WPKeuDi0Sjdj1g==",
+ "requires": {
+ "@babel/helper-builder-binary-assignment-operator-visitor": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-export-namespace-from": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.22.5.tgz",
+ "integrity": "sha512-X4hhm7FRnPgd4nDA4b/5V280xCx6oL7Oob5+9qVS5C13Zq4bh1qq7LU0GgRU6b5dBWBvhGaXYVB4AcN6+ol6vg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-export-namespace-from": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-for-of": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.22.5.tgz",
+ "integrity": "sha512-3kxQjX1dU9uudwSshyLeEipvrLjBCVthCgeTp6CzE/9JYrlAIaeekVxRpCWsDDfYTfRZRoCeZatCQvwo+wvK8A==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-function-name": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.22.5.tgz",
+ "integrity": "sha512-UIzQNMS0p0HHiQm3oelztj+ECwFnj+ZRV4KnguvlsD2of1whUeM6o7wGNj6oLwcDoAXQ8gEqfgC24D+VdIcevg==",
+ "requires": {
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-function-name": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-json-strings": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.22.5.tgz",
+ "integrity": "sha512-DuCRB7fu8MyTLbEQd1ew3R85nx/88yMoqo2uPSjevMj3yoN7CDM8jkgrY0wmVxfJZyJ/B9fE1iq7EQppWQmR5A==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-json-strings": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-literals/-/plugin-transform-literals-7.22.5.tgz",
+ "integrity": "sha512-fTLj4D79M+mepcw3dgFBTIDYpbcB9Sm0bpm4ppXPaO+U+PKFFyV9MGRvS0gvGw62sd10kT5lRMKXAADb9pWy8g==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-logical-assignment-operators": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.22.5.tgz",
+ "integrity": "sha512-MQQOUW1KL8X0cDWfbwYP+TbVbZm16QmQXJQ+vndPtH/BoO0lOKpVoEDMI7+PskYxH+IiE0tS8xZye0qr1lGzSA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4"
+ }
+ },
+ "@babel/plugin-transform-member-expression-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.22.5.tgz",
+ "integrity": "sha512-RZEdkNtzzYCFl9SE9ATaUMTj2hqMb4StarOJLrZRbqqU4HSBE7UlBw9WBWQiDzrJZJdUWiMTVDI6Gv/8DPvfew==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-modules-amd": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.22.5.tgz",
+ "integrity": "sha512-R+PTfLTcYEmb1+kK7FNkhQ1gP4KgjpSO6HfH9+f8/yfp2Nt3ggBjiVpRwmwTlfqZLafYKJACy36yDXlEmI9HjQ==",
+ "requires": {
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-modules-commonjs": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.22.5.tgz",
+ "integrity": "sha512-B4pzOXj+ONRmuaQTg05b3y/4DuFz3WcCNAXPLb2Q0GT0TrGKGxNKV4jwsXts+StaM0LQczZbOpj8o1DLPDJIiA==",
+ "requires": {
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-simple-access": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-modules-systemjs": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.22.5.tgz",
+ "integrity": "sha512-emtEpoaTMsOs6Tzz+nbmcePl6AKVtS1yC4YNAeMun9U8YCsgadPNxnOPQ8GhHFB2qdx+LZu9LgoC0Lthuu05DQ==",
+ "requires": {
+ "@babel/helper-hoist-variables": "^7.22.5",
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-identifier": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-modules-umd": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.22.5.tgz",
+ "integrity": "sha512-+S6kzefN/E1vkSsKx8kmQuqeQsvCKCd1fraCM7zXm4SFoggI099Tr4G8U81+5gtMdUeMQ4ipdQffbKLX0/7dBQ==",
+ "requires": {
+ "@babel/helper-module-transforms": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-named-capturing-groups-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.22.5.tgz",
+ "integrity": "sha512-YgLLKmS3aUBhHaxp5hi1WJTgOUb/NCuDHzGT9z9WTt3YG+CPRhJs6nprbStx6DnWM4dh6gt7SU3sZodbZ08adQ==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-new-target": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.22.5.tgz",
+ "integrity": "sha512-AsF7K0Fx/cNKVyk3a+DW0JLo+Ua598/NxMRvxDnkpCIGFh43+h/v2xyhRUYf6oD8gE4QtL83C7zZVghMjHd+iw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-nullish-coalescing-operator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.22.5.tgz",
+ "integrity": "sha512-6CF8g6z1dNYZ/VXok5uYkkBBICHZPiGEl7oDnAx2Mt1hlHVHOSIKWJaXHjQJA5VB43KZnXZDIexMchY4y2PGdA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-numeric-separator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.22.5.tgz",
+ "integrity": "sha512-NbslED1/6M+sXiwwtcAB/nieypGw02Ejf4KtDeMkCEpP6gWFMX1wI9WKYua+4oBneCCEmulOkRpwywypVZzs/g==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4"
+ }
+ },
+ "@babel/plugin-transform-object-rest-spread": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.22.5.tgz",
+ "integrity": "sha512-Kk3lyDmEslH9DnvCDA1s1kkd3YWQITiBOHngOtDL9Pt6BZjzqb6hiOlb8VfjiiQJ2unmegBqZu0rx5RxJb5vmQ==",
+ "requires": {
+ "@babel/compat-data": "^7.22.5",
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-transform-parameters": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-object-super": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.22.5.tgz",
+ "integrity": "sha512-klXqyaT9trSjIUrcsYIfETAzmOEZL3cBYqOYLJxBHfMFFggmXOv+NYSX/Jbs9mzMVESw/WycLFPRx8ba/b2Ipw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-replace-supers": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-optional-catch-binding": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.22.5.tgz",
+ "integrity": "sha512-pH8orJahy+hzZje5b8e2QIlBWQvGpelS76C63Z+jhZKsmzfNaPQ+LaW6dcJ9bxTpo1mtXbgHwy765Ro3jftmUg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-optional-chaining": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.22.5.tgz",
+ "integrity": "sha512-AconbMKOMkyG+xCng2JogMCDcqW8wedQAqpVIL4cOSescZ7+iW8utC6YDZLMCSUIReEA733gzRSaOSXMAt/4WQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3"
+ }
+ },
+ "@babel/plugin-transform-parameters": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.22.5.tgz",
+ "integrity": "sha512-AVkFUBurORBREOmHRKo06FjHYgjrabpdqRSwq6+C7R5iTCZOsM4QbcB27St0a4U6fffyAOqh3s/qEfybAhfivg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-private-methods": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.22.5.tgz",
+ "integrity": "sha512-PPjh4gyrQnGe97JTalgRGMuU4icsZFnWkzicB/fUtzlKUqvsWBKEpPPfr5a2JiyirZkHxnAqkQMO5Z5B2kK3fA==",
+ "requires": {
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-private-property-in-object": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.22.5.tgz",
+ "integrity": "sha512-/9xnaTTJcVoBtSSmrVyhtSvO3kbqS2ODoh2juEU72c3aYonNF0OMGiaz2gjukyKM2wBBYJP38S4JiE0Wfb5VMQ==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-private-property-in-object": "^7.14.5"
+ }
+ },
+ "@babel/plugin-transform-property-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.22.5.tgz",
+ "integrity": "sha512-TiOArgddK3mK/x1Qwf5hay2pxI6wCZnvQqrFSqbtg1GLl2JcNMitVH/YnqjP+M31pLUeTfzY1HAXFDnUBV30rQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-react-constant-elements": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-constant-elements/-/plugin-transform-react-constant-elements-7.22.5.tgz",
+ "integrity": "sha512-BF5SXoO+nX3h5OhlN78XbbDrBOffv+AxPP2ENaJOVqjWCgBDeOY3WcaUcddutGSfoap+5NEQ/q/4I3WZIvgkXA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-react-display-name": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-display-name/-/plugin-transform-react-display-name-7.22.5.tgz",
+ "integrity": "sha512-PVk3WPYudRF5z4GKMEYUrLjPl38fJSKNaEOkFuoprioowGuWN6w2RKznuFNSlJx7pzzXXStPUnNSOEO0jL5EVw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-react-jsx": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx/-/plugin-transform-react-jsx-7.22.5.tgz",
+ "integrity": "sha512-rog5gZaVbUip5iWDMTYbVM15XQq+RkUKhET/IHR6oizR+JEoN6CAfTTuHcK4vwUyzca30qqHqEpzBOnaRMWYMA==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-module-imports": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-jsx": "^7.22.5",
+ "@babel/types": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-react-jsx-development": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-jsx-development/-/plugin-transform-react-jsx-development-7.22.5.tgz",
+ "integrity": "sha512-bDhuzwWMuInwCYeDeMzyi7TaBgRQei6DqxhbyniL7/VG4RSS7HtSL2QbY4eESy1KJqlWt8g3xeEBGPuo+XqC8A==",
+ "requires": {
+ "@babel/plugin-transform-react-jsx": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-react-pure-annotations": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-react-pure-annotations/-/plugin-transform-react-pure-annotations-7.22.5.tgz",
+ "integrity": "sha512-gP4k85wx09q+brArVinTXhWiyzLl9UpmGva0+mWyKxk6JZequ05x3eUcIUE+FyttPKJFRRVtAvQaJ6YF9h1ZpA==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-regenerator": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.22.5.tgz",
+ "integrity": "sha512-rR7KePOE7gfEtNTh9Qw+iO3Q/e4DEsoQ+hdvM6QUDH7JRJ5qxq5AA52ZzBWbI5i9lfNuvySgOGP8ZN7LAmaiPw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "regenerator-transform": "^0.15.1"
+ }
+ },
+ "@babel/plugin-transform-reserved-words": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.22.5.tgz",
+ "integrity": "sha512-DTtGKFRQUDm8svigJzZHzb/2xatPc6TzNvAIJ5GqOKDsGFYgAskjRulbR/vGsPKq3OPqtexnz327qYpP57RFyA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-runtime": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.22.5.tgz",
+ "integrity": "sha512-bg4Wxd1FWeFx3daHFTWk1pkSWK/AyQuiyAoeZAOkAOUBjnZPH6KT7eMxouV47tQ6hl6ax2zyAWBdWZXbrvXlaw==",
+ "requires": {
+ "@babel/helper-module-imports": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "babel-plugin-polyfill-corejs2": "^0.4.3",
+ "babel-plugin-polyfill-corejs3": "^0.8.1",
+ "babel-plugin-polyfill-regenerator": "^0.5.0",
+ "semver": "^6.3.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "@babel/plugin-transform-shorthand-properties": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.22.5.tgz",
+ "integrity": "sha512-vM4fq9IXHscXVKzDv5itkO1X52SmdFBFcMIBZ2FRn2nqVYqw6dBexUgMvAjHW+KXpPPViD/Yo3GrDEBaRC0QYA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-spread": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-spread/-/plugin-transform-spread-7.22.5.tgz",
+ "integrity": "sha512-5ZzDQIGyvN4w8+dMmpohL6MBo+l2G7tfC/O2Dg7/hjpgeWvUx8FzfeOKxGog9IimPa4YekaQ9PlDqTLOljkcxg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-skip-transparent-expression-wrappers": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-sticky-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.22.5.tgz",
+ "integrity": "sha512-zf7LuNpHG0iEeiyCNwX4j3gDg1jgt1k3ZdXBKbZSoA3BbGQGvMiSvfbZRR3Dr3aeJe3ooWFZxOOG3IRStYp2Bw==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-template-literals": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.22.5.tgz",
+ "integrity": "sha512-5ciOehRNf+EyUeewo8NkbQiUs4d6ZxiHo6BcBcnFlgiJfu16q0bQUw9Jvo0b0gBKFG1SMhDSjeKXSYuJLeFSMA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-typeof-symbol": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.22.5.tgz",
+ "integrity": "sha512-bYkI5lMzL4kPii4HHEEChkD0rkc+nvnlR6+o/qdqR6zrm0Sv/nodmyLhlq2DO0YKLUNd2VePmPRjJXSBh9OIdA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-typescript": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-typescript/-/plugin-transform-typescript-7.22.5.tgz",
+ "integrity": "sha512-SMubA9S7Cb5sGSFFUlqxyClTA9zWJ8qGQrppNUm05LtFuN1ELRFNndkix4zUJrC9F+YivWwa1dHMSyo0e0N9dA==",
+ "requires": {
+ "@babel/helper-annotate-as-pure": "^7.22.5",
+ "@babel/helper-create-class-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/plugin-syntax-typescript": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-unicode-escapes": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.22.5.tgz",
+ "integrity": "sha512-biEmVg1IYB/raUO5wT1tgfacCef15Fbzhkx493D3urBI++6hpJ+RFG4SrWMn0NEZLfvilqKf3QDrRVZHo08FYg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-unicode-property-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.22.5.tgz",
+ "integrity": "sha512-HCCIb+CbJIAE6sXn5CjFQXMwkCClcOfPCzTlilJ8cUatfzwHlWQkbtV0zD338u9dZskwvuOYTuuaMaA8J5EI5A==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-unicode-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.22.5.tgz",
+ "integrity": "sha512-028laaOKptN5vHJf9/Arr/HiJekMd41hOEZYvNsrsXqJ7YPYuX2bQxh31fkZzGmq3YqHRJzYFFAVYvKfMPKqyg==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/plugin-transform-unicode-sets-regex": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.22.5.tgz",
+ "integrity": "sha512-lhMfi4FC15j13eKrh3DnYHjpGj6UKQHtNKTbtc1igvAhRy4+kLhV07OpLcsN0VgDEw/MjAvJO4BdMJsHwMhzCg==",
+ "requires": {
+ "@babel/helper-create-regexp-features-plugin": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5"
+ }
+ },
+ "@babel/preset-env": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.22.5.tgz",
+ "integrity": "sha512-fj06hw89dpiZzGZtxn+QybifF07nNiZjZ7sazs2aVDcysAZVGjW7+7iFYxg6GLNM47R/thYfLdrXc+2f11Vi9A==",
+ "requires": {
+ "@babel/compat-data": "^7.22.5",
+ "@babel/helper-compilation-targets": "^7.22.5",
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.5",
+ "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.22.5",
+ "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.22.5",
+ "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2",
+ "@babel/plugin-syntax-async-generators": "^7.8.4",
+ "@babel/plugin-syntax-class-properties": "^7.12.13",
+ "@babel/plugin-syntax-class-static-block": "^7.14.5",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3",
+ "@babel/plugin-syntax-export-namespace-from": "^7.8.3",
+ "@babel/plugin-syntax-import-assertions": "^7.22.5",
+ "@babel/plugin-syntax-import-attributes": "^7.22.5",
+ "@babel/plugin-syntax-import-meta": "^7.10.4",
+ "@babel/plugin-syntax-json-strings": "^7.8.3",
+ "@babel/plugin-syntax-logical-assignment-operators": "^7.10.4",
+ "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.3",
+ "@babel/plugin-syntax-numeric-separator": "^7.10.4",
+ "@babel/plugin-syntax-object-rest-spread": "^7.8.3",
+ "@babel/plugin-syntax-optional-catch-binding": "^7.8.3",
+ "@babel/plugin-syntax-optional-chaining": "^7.8.3",
+ "@babel/plugin-syntax-private-property-in-object": "^7.14.5",
+ "@babel/plugin-syntax-top-level-await": "^7.14.5",
+ "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6",
+ "@babel/plugin-transform-arrow-functions": "^7.22.5",
+ "@babel/plugin-transform-async-generator-functions": "^7.22.5",
+ "@babel/plugin-transform-async-to-generator": "^7.22.5",
+ "@babel/plugin-transform-block-scoped-functions": "^7.22.5",
+ "@babel/plugin-transform-block-scoping": "^7.22.5",
+ "@babel/plugin-transform-class-properties": "^7.22.5",
+ "@babel/plugin-transform-class-static-block": "^7.22.5",
+ "@babel/plugin-transform-classes": "^7.22.5",
+ "@babel/plugin-transform-computed-properties": "^7.22.5",
+ "@babel/plugin-transform-destructuring": "^7.22.5",
+ "@babel/plugin-transform-dotall-regex": "^7.22.5",
+ "@babel/plugin-transform-duplicate-keys": "^7.22.5",
+ "@babel/plugin-transform-dynamic-import": "^7.22.5",
+ "@babel/plugin-transform-exponentiation-operator": "^7.22.5",
+ "@babel/plugin-transform-export-namespace-from": "^7.22.5",
+ "@babel/plugin-transform-for-of": "^7.22.5",
+ "@babel/plugin-transform-function-name": "^7.22.5",
+ "@babel/plugin-transform-json-strings": "^7.22.5",
+ "@babel/plugin-transform-literals": "^7.22.5",
+ "@babel/plugin-transform-logical-assignment-operators": "^7.22.5",
+ "@babel/plugin-transform-member-expression-literals": "^7.22.5",
+ "@babel/plugin-transform-modules-amd": "^7.22.5",
+ "@babel/plugin-transform-modules-commonjs": "^7.22.5",
+ "@babel/plugin-transform-modules-systemjs": "^7.22.5",
+ "@babel/plugin-transform-modules-umd": "^7.22.5",
+ "@babel/plugin-transform-named-capturing-groups-regex": "^7.22.5",
+ "@babel/plugin-transform-new-target": "^7.22.5",
+ "@babel/plugin-transform-nullish-coalescing-operator": "^7.22.5",
+ "@babel/plugin-transform-numeric-separator": "^7.22.5",
+ "@babel/plugin-transform-object-rest-spread": "^7.22.5",
+ "@babel/plugin-transform-object-super": "^7.22.5",
+ "@babel/plugin-transform-optional-catch-binding": "^7.22.5",
+ "@babel/plugin-transform-optional-chaining": "^7.22.5",
+ "@babel/plugin-transform-parameters": "^7.22.5",
+ "@babel/plugin-transform-private-methods": "^7.22.5",
+ "@babel/plugin-transform-private-property-in-object": "^7.22.5",
+ "@babel/plugin-transform-property-literals": "^7.22.5",
+ "@babel/plugin-transform-regenerator": "^7.22.5",
+ "@babel/plugin-transform-reserved-words": "^7.22.5",
+ "@babel/plugin-transform-shorthand-properties": "^7.22.5",
+ "@babel/plugin-transform-spread": "^7.22.5",
+ "@babel/plugin-transform-sticky-regex": "^7.22.5",
+ "@babel/plugin-transform-template-literals": "^7.22.5",
+ "@babel/plugin-transform-typeof-symbol": "^7.22.5",
+ "@babel/plugin-transform-unicode-escapes": "^7.22.5",
+ "@babel/plugin-transform-unicode-property-regex": "^7.22.5",
+ "@babel/plugin-transform-unicode-regex": "^7.22.5",
+ "@babel/plugin-transform-unicode-sets-regex": "^7.22.5",
+ "@babel/preset-modules": "^0.1.5",
+ "@babel/types": "^7.22.5",
+ "babel-plugin-polyfill-corejs2": "^0.4.3",
+ "babel-plugin-polyfill-corejs3": "^0.8.1",
+ "babel-plugin-polyfill-regenerator": "^0.5.0",
+ "core-js-compat": "^3.30.2",
+ "semver": "^6.3.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "@babel/preset-modules": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.5.tgz",
+ "integrity": "sha512-A57th6YRG7oR3cq/yt/Y84MvGgE0eJG2F1JLhKuyG+jFxEgrd/HAMJatiFtmOiZurz+0DkrvbheCLaV5f2JfjA==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.0.0",
+ "@babel/plugin-proposal-unicode-property-regex": "^7.4.4",
+ "@babel/plugin-transform-dotall-regex": "^7.4.4",
+ "@babel/types": "^7.4.4",
+ "esutils": "^2.0.2"
+ }
+ },
+ "@babel/preset-react": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/preset-react/-/preset-react-7.22.5.tgz",
+ "integrity": "sha512-M+Is3WikOpEJHgR385HbuCITPTaPRaNkibTEa9oiofmJvIsrceb4yp9RL9Kb+TE8LznmeyZqpP+Lopwcx59xPQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.5",
+ "@babel/plugin-transform-react-display-name": "^7.22.5",
+ "@babel/plugin-transform-react-jsx": "^7.22.5",
+ "@babel/plugin-transform-react-jsx-development": "^7.22.5",
+ "@babel/plugin-transform-react-pure-annotations": "^7.22.5"
+ }
+ },
+ "@babel/preset-typescript": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/preset-typescript/-/preset-typescript-7.22.5.tgz",
+ "integrity": "sha512-YbPaal9LxztSGhmndR46FmAbkJ/1fAsw293tSU+I5E5h+cnJ3d4GTwyUgGYmOXJYdGA+uNePle4qbaRzj2NISQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.22.5",
+ "@babel/helper-validator-option": "^7.22.5",
+ "@babel/plugin-syntax-jsx": "^7.22.5",
+ "@babel/plugin-transform-modules-commonjs": "^7.22.5",
+ "@babel/plugin-transform-typescript": "^7.22.5"
+ }
+ },
+ "@babel/regjsgen": {
+ "version": "0.8.0",
+ "resolved": "https://registry.npmjs.org/@babel/regjsgen/-/regjsgen-0.8.0.tgz",
+ "integrity": "sha512-x/rqGMdzj+fWZvCOYForTghzbtqPDZ5gPwaoNGHdgDfF2QA/XZbCBp4Moo5scrkAMPhB7z26XM/AaHuIJdgauA=="
+ },
+ "@babel/runtime": {
+ "version": "7.21.5",
+ "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.21.5.tgz",
+ "integrity": "sha512-8jI69toZqqcsnqGGqwGS4Qb1VwLOEp4hz+CXPywcvjs60u3B4Pom/U/7rm4W8tMOYEB+E9wgD0mW1l3r8qlI9Q==",
+ "requires": {
+ "regenerator-runtime": "^0.13.11"
+ }
+ },
+ "@babel/runtime-corejs3": {
+ "version": "7.22.5",
+ "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.22.5.tgz",
+ "integrity": "sha512-TNPDN6aBFaUox2Lu+H/Y1dKKQgr4ucz/FGyCz67RVYLsBpVpUFf1dDngzg+Od8aqbrqwyztkaZjtWCZEUOT8zA==",
+ "requires": {
+ "core-js-pure": "^3.30.2",
+ "regenerator-runtime": "^0.13.11"
+ }
+ },
+ "@babel/template": {
+ "version": "7.25.0",
+ "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.25.0.tgz",
+ "integrity": "sha512-aOOgh1/5XzKvg1jvVz7AVrx2piJ2XBi227DHmbY6y+bM9H2FlN+IfecYu4Xl0cNiiVejlsCri89LUsbj8vJD9Q==",
+ "requires": {
+ "@babel/code-frame": "^7.24.7",
+ "@babel/parser": "^7.25.0",
+ "@babel/types": "^7.25.0"
+ }
+ },
+ "@babel/traverse": {
+ "version": "7.25.6",
+ "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.25.6.tgz",
+ "integrity": "sha512-9Vrcx5ZW6UwK5tvqsj0nGpp/XzqthkT0dqIc9g1AdtygFToNtTF67XzYS//dm+SAK9cp3B9R4ZO/46p63SCjlQ==",
+ "requires": {
+ "@babel/code-frame": "^7.24.7",
+ "@babel/generator": "^7.25.6",
+ "@babel/parser": "^7.25.6",
+ "@babel/template": "^7.25.0",
+ "@babel/types": "^7.25.6",
+ "debug": "^4.3.1",
+ "globals": "^11.1.0"
+ }
+ },
+ "@babel/types": {
+ "version": "7.25.6",
+ "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.25.6.tgz",
+ "integrity": "sha512-/l42B1qxpG6RdfYf343Uw1vmDjeNhneUXtzhojE7pDgfpEypmRhI6j1kr17XCVv4Cgl9HdAiQY2x0GwKm7rWCw==",
+ "requires": {
+ "@babel/helper-string-parser": "^7.24.8",
+ "@babel/helper-validator-identifier": "^7.24.7",
+ "to-fast-properties": "^2.0.0"
+ }
+ },
+ "@colors/colors": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz",
+ "integrity": "sha512-ooWCrlZP11i8GImSjTHYHLkvFDP48nS4+204nGb1RiX/WXYHmJA2III9/e2DWVabCESdW7hBAEzHRqUn9OUVvQ==",
+ "optional": true
+ },
+ "@discoveryjs/json-ext": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz",
+ "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw=="
+ },
+ "@docusaurus/core": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-2.4.3.tgz",
+ "integrity": "sha512-dWH5P7cgeNSIg9ufReX6gaCl/TmrGKD38Orbwuz05WPhAQtFXHd5B8Qym1TiXfvUNvwoYKkAJOJuGe8ou0Z7PA==",
+ "requires": {
+ "@babel/core": "^7.18.6",
+ "@babel/generator": "^7.18.7",
+ "@babel/plugin-syntax-dynamic-import": "^7.8.3",
+ "@babel/plugin-transform-runtime": "^7.18.6",
+ "@babel/preset-env": "^7.18.6",
+ "@babel/preset-react": "^7.18.6",
+ "@babel/preset-typescript": "^7.18.6",
+ "@babel/runtime": "^7.18.6",
+ "@babel/runtime-corejs3": "^7.18.6",
+ "@babel/traverse": "^7.18.8",
+ "@docusaurus/cssnano-preset": "2.4.3",
+ "@docusaurus/logger": "2.4.3",
+ "@docusaurus/mdx-loader": "2.4.3",
+ "@docusaurus/react-loadable": "5.5.2",
+ "@docusaurus/utils": "2.4.3",
+ "@docusaurus/utils-common": "2.4.3",
+ "@docusaurus/utils-validation": "2.4.3",
+ "@slorber/static-site-generator-webpack-plugin": "^4.0.7",
+ "@svgr/webpack": "^6.2.1",
+ "autoprefixer": "^10.4.7",
+ "babel-loader": "^8.2.5",
+ "babel-plugin-dynamic-import-node": "^2.3.3",
+ "boxen": "^6.2.1",
+ "chalk": "^4.1.2",
+ "chokidar": "^3.5.3",
+ "clean-css": "^5.3.0",
+ "cli-table3": "^0.6.2",
+ "combine-promises": "^1.1.0",
+ "commander": "^5.1.0",
+ "copy-webpack-plugin": "^11.0.0",
+ "core-js": "^3.23.3",
+ "css-loader": "^6.7.1",
+ "css-minimizer-webpack-plugin": "^4.0.0",
+ "cssnano": "^5.1.12",
+ "del": "^6.1.1",
+ "detect-port": "^1.3.0",
+ "escape-html": "^1.0.3",
+ "eta": "^2.0.0",
+ "file-loader": "^6.2.0",
+ "fs-extra": "^10.1.0",
+ "html-minifier-terser": "^6.1.0",
+ "html-tags": "^3.2.0",
+ "html-webpack-plugin": "^5.5.0",
+ "import-fresh": "^3.3.0",
+ "leven": "^3.1.0",
+ "lodash": "^4.17.21",
+ "mini-css-extract-plugin": "^2.6.1",
+ "postcss": "^8.4.14",
+ "postcss-loader": "^7.0.0",
+ "prompts": "^2.4.2",
+ "react-dev-utils": "^12.0.1",
+ "react-helmet-async": "^1.3.0",
+ "react-loadable": "npm:@docusaurus/react-loadable@5.5.2",
+ "react-loadable-ssr-addon-v5-slorber": "^1.0.1",
+ "react-router": "^5.3.3",
+ "react-router-config": "^5.1.1",
+ "react-router-dom": "^5.3.3",
+ "rtl-detect": "^1.0.4",
+ "semver": "^7.3.7",
+ "serve-handler": "^6.1.3",
+ "shelljs": "^0.8.5",
+ "terser-webpack-plugin": "^5.3.3",
+ "tslib": "^2.4.0",
+ "update-notifier": "^5.1.0",
+ "url-loader": "^4.1.1",
+ "wait-on": "^6.0.1",
+ "webpack": "^5.73.0",
+ "webpack-bundle-analyzer": "^4.5.0",
+ "webpack-dev-server": "^4.9.3",
+ "webpack-merge": "^5.8.0",
+ "webpackbar": "^5.0.2"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz",
+ "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg=="
+ },
+ "fs-extra": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
+ "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
+ "requires": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ }
+ },
+ "jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "requires": {
+ "graceful-fs": "^4.1.6",
+ "universalify": "^2.0.0"
+ }
+ },
+ "universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ=="
+ }
+ }
+ },
+ "@docusaurus/cssnano-preset": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-2.4.3.tgz",
+ "integrity": "sha512-ZvGSRCi7z9wLnZrXNPG6DmVPHdKGd8dIn9pYbEOFiYihfv4uDR3UtxogmKf+rT8ZlKFf5Lqne8E8nt08zNM8CA==",
+ "requires": {
+ "cssnano-preset-advanced": "^5.3.8",
+ "postcss": "^8.4.14",
+ "postcss-sort-media-queries": "^4.2.1",
+ "tslib": "^2.4.0"
+ }
+ },
+ "@docusaurus/logger": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-2.4.3.tgz",
+ "integrity": "sha512-Zxws7r3yLufk9xM1zq9ged0YHs65mlRmtsobnFkdZTxWXdTYlWWLWdKyNKAsVC+D7zg+pv2fGbyabdOnyZOM3w==",
+ "requires": {
+ "chalk": "^4.1.2",
+ "tslib": "^2.4.0"
+ }
+ },
+ "@docusaurus/mdx-loader": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-2.4.3.tgz",
+ "integrity": "sha512-b1+fDnWtl3GiqkL0BRjYtc94FZrcDDBV1j8446+4tptB9BAOlePwG2p/pK6vGvfL53lkOsszXMghr2g67M0vCw==",
+ "requires": {
+ "@babel/parser": "^7.18.8",
+ "@babel/traverse": "^7.18.8",
+ "@docusaurus/logger": "2.4.3",
+ "@docusaurus/utils": "2.4.3",
+ "@mdx-js/mdx": "^1.6.22",
+ "escape-html": "^1.0.3",
+ "file-loader": "^6.2.0",
+ "fs-extra": "^10.1.0",
+ "image-size": "^1.0.1",
+ "mdast-util-to-string": "^2.0.0",
+ "remark-emoji": "^2.2.0",
+ "stringify-object": "^3.3.0",
+ "tslib": "^2.4.0",
+ "unified": "^9.2.2",
+ "unist-util-visit": "^2.0.3",
+ "url-loader": "^4.1.1",
+ "webpack": "^5.73.0"
+ },
+ "dependencies": {
+ "fs-extra": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
+ "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
+ "requires": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ }
+ },
+ "jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "requires": {
+ "graceful-fs": "^4.1.6",
+ "universalify": "^2.0.0"
+ }
+ },
+ "universalify": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
+ "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw=="
+ }
+ }
+ },
+ "@docusaurus/react-loadable": {
+ "version": "5.5.2",
+ "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz",
+ "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==",
+ "requires": {
+ "@types/react": "*",
+ "prop-types": "^15.6.2"
+ }
+ },
+ "@docusaurus/types": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-2.4.1.tgz",
+ "integrity": "sha512-0R+cbhpMkhbRXX138UOc/2XZFF8hiZa6ooZAEEJFp5scytzCw4tC1gChMFXrpa3d2tYE6AX8IrOEpSonLmfQuQ==",
+ "requires": {
+ "@types/history": "^4.7.11",
+ "@types/react": "*",
+ "commander": "^5.1.0",
+ "joi": "^17.6.0",
+ "react-helmet-async": "^1.3.0",
+ "utility-types": "^3.10.0",
+ "webpack": "^5.73.0",
+ "webpack-merge": "^5.8.0"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz",
+ "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg=="
+ }
+ }
+ },
+ "@docusaurus/utils": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-2.4.3.tgz",
+ "integrity": "sha512-fKcXsjrD86Smxv8Pt0TBFqYieZZCPh4cbf9oszUq/AMhZn3ujwpKaVYZACPX8mmjtYx0JOgNx52CREBfiGQB4A==",
+ "requires": {
+ "@docusaurus/logger": "2.4.3",
+ "@svgr/webpack": "^6.2.1",
+ "escape-string-regexp": "^4.0.0",
+ "file-loader": "^6.2.0",
+ "fs-extra": "^10.1.0",
+ "github-slugger": "^1.4.0",
+ "globby": "^11.1.0",
+ "gray-matter": "^4.0.3",
+ "js-yaml": "^4.1.0",
+ "lodash": "^4.17.21",
+ "micromatch": "^4.0.5",
+ "resolve-pathname": "^3.0.0",
+ "shelljs": "^0.8.5",
+ "tslib": "^2.4.0",
+ "url-loader": "^4.1.1",
+ "webpack": "^5.73.0"
+ },
+ "dependencies": {
+ "fs-extra": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
+ "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
+ "requires": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ }
+ },
+ "jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "requires": {
+ "graceful-fs": "^4.1.6",
+ "universalify": "^2.0.0"
+ }
+ },
+ "universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ=="
+ }
+ }
+ },
+ "@docusaurus/utils-common": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-2.4.3.tgz",
+ "integrity": "sha512-/jascp4GbLQCPVmcGkPzEQjNaAk3ADVfMtudk49Ggb+131B1WDD6HqlSmDf8MxGdy7Dja2gc+StHf01kiWoTDQ==",
+ "requires": {
+ "tslib": "^2.4.0"
+ }
+ },
+ "@docusaurus/utils-validation": {
+ "version": "2.4.3",
+ "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-2.4.3.tgz",
+ "integrity": "sha512-G2+Vt3WR5E/9drAobP+hhZQMaswRwDlp6qOMi7o7ZypB+VO7N//DZWhZEwhcRGepMDJGQEwtPv7UxtYwPL9PBw==",
+ "requires": {
+ "@docusaurus/logger": "2.4.3",
+ "@docusaurus/utils": "2.4.3",
+ "joi": "^17.6.0",
+ "js-yaml": "^4.1.0",
+ "tslib": "^2.4.0"
+ }
+ },
+ "@h2oai/makersaurus": {
+ "version": "0.8.4",
+ "resolved": "https://npm.pkg.github.com/download/@h2oai/makersaurus/0.8.4/c185e488ffe1d93da1cc547eeee2e489cfa253c5",
+ "integrity": "sha512-18Cl3gba9hzd7kr6jruzSasjh7VYjeap8KngNPHB/7rrVBHTgcqEcMu72aa7GghWStrwonGr+MB13mx+faOtzQ==",
+ "requires": {
+ "commander": "^9.4.1",
+ "handlebars": "^4.7.7",
+ "sync-directory": "^5.1.9",
+ "yup": "^0.32.11"
+ }
+ },
+ "@hapi/hoek": {
+ "version": "9.3.0",
+ "resolved": "https://registry.npmjs.org/@hapi/hoek/-/hoek-9.3.0.tgz",
+ "integrity": "sha512-/c6rf4UJlmHlC9b5BaNvzAcFv7HZ2QHaV0D4/HNlBdvFnvQq8RI4kYdhyPCl7Xj+oWvTWQ8ujhqS53LIgAe6KQ=="
+ },
+ "@hapi/topo": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/@hapi/topo/-/topo-5.1.0.tgz",
+ "integrity": "sha512-foQZKJig7Ob0BMAYBfcJk8d77QtOe7Wo4ox7ff1lQYoNNAb6jwcY1ncdoy2e9wQZzvNy7ODZCYJkK8kzmcAnAg==",
+ "requires": {
+ "@hapi/hoek": "^9.0.0"
+ }
+ },
+ "@jest/schemas": {
+ "version": "29.4.3",
+ "resolved": "https://registry.npmjs.org/@jest/schemas/-/schemas-29.4.3.tgz",
+ "integrity": "sha512-VLYKXQmtmuEz6IxJsrZwzG9NvtkQsWNnWMsKxqWNu3+CnfzJQhp0WDDKWLVV9hLKr0l3SLLFRqcYHjhtyuDVxg==",
+ "requires": {
+ "@sinclair/typebox": "^0.25.16"
+ }
+ },
+ "@jest/types": {
+ "version": "29.5.0",
+ "resolved": "https://registry.npmjs.org/@jest/types/-/types-29.5.0.tgz",
+ "integrity": "sha512-qbu7kN6czmVRc3xWFQcAN03RAUamgppVUdXrvl1Wr3jlNF93o9mJbGcDWrwGB6ht44u7efB1qCFgVQmca24Uog==",
+ "requires": {
+ "@jest/schemas": "^29.4.3",
+ "@types/istanbul-lib-coverage": "^2.0.0",
+ "@types/istanbul-reports": "^3.0.0",
+ "@types/node": "*",
+ "@types/yargs": "^17.0.8",
+ "chalk": "^4.0.0"
+ }
+ },
+ "@jridgewell/gen-mapping": {
+ "version": "0.3.5",
+ "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.5.tgz",
+ "integrity": "sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==",
+ "requires": {
+ "@jridgewell/set-array": "^1.2.1",
+ "@jridgewell/sourcemap-codec": "^1.4.10",
+ "@jridgewell/trace-mapping": "^0.3.24"
+ }
+ },
+ "@jridgewell/resolve-uri": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.0.tgz",
+ "integrity": "sha512-F2msla3tad+Mfht5cJq7LSXcdudKTWCVYUgw6pLFOOHSTtZlj6SWNYAp+AhuqLmWdBO2X5hPrLcu8cVP8fy28w=="
+ },
+ "@jridgewell/set-array": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/@jridgewell/set-array/-/set-array-1.2.1.tgz",
+ "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A=="
+ },
+ "@jridgewell/source-map": {
+ "version": "0.3.3",
+ "resolved": "https://registry.npmjs.org/@jridgewell/source-map/-/source-map-0.3.3.tgz",
+ "integrity": "sha512-b+fsZXeLYi9fEULmfBrhxn4IrPlINf8fiNarzTof004v3lFdntdwa9PF7vFJqm3mg7s+ScJMxXaE3Acp1irZcg==",
+ "requires": {
+ "@jridgewell/gen-mapping": "^0.3.0",
+ "@jridgewell/trace-mapping": "^0.3.9"
+ }
+ },
+ "@jridgewell/sourcemap-codec": {
+ "version": "1.4.15",
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.4.15.tgz",
+ "integrity": "sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg=="
+ },
+ "@jridgewell/trace-mapping": {
+ "version": "0.3.25",
+ "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz",
+ "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==",
+ "requires": {
+ "@jridgewell/resolve-uri": "^3.1.0",
+ "@jridgewell/sourcemap-codec": "^1.4.14"
+ }
+ },
+ "@leichtgewicht/ip-codec": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/@leichtgewicht/ip-codec/-/ip-codec-2.0.4.tgz",
+ "integrity": "sha512-Hcv+nVC0kZnQ3tD9GVu5xSMR4VVYOteQIr/hwFPVEvPdlXqgGEuRjiheChHgdM+JyqdgNcmzZOX/tnl0JOiI7A=="
+ },
+ "@mdx-js/mdx": {
+ "version": "1.6.22",
+ "resolved": "https://registry.npmjs.org/@mdx-js/mdx/-/mdx-1.6.22.tgz",
+ "integrity": "sha512-AMxuLxPz2j5/6TpF/XSdKpQP1NlG0z11dFOlq+2IP/lSgl11GY8ji6S/rgsViN/L0BDvHvUMruRb7ub+24LUYA==",
+ "requires": {
+ "@babel/core": "7.12.9",
+ "@babel/plugin-syntax-jsx": "7.12.1",
+ "@babel/plugin-syntax-object-rest-spread": "7.8.3",
+ "@mdx-js/util": "1.6.22",
+ "babel-plugin-apply-mdx-type-prop": "1.6.22",
+ "babel-plugin-extract-import-names": "1.6.22",
+ "camelcase-css": "2.0.1",
+ "detab": "2.0.4",
+ "hast-util-raw": "6.0.1",
+ "lodash.uniq": "4.5.0",
+ "mdast-util-to-hast": "10.0.1",
+ "remark-footnotes": "2.0.0",
+ "remark-mdx": "1.6.22",
+ "remark-parse": "8.0.3",
+ "remark-squeeze-paragraphs": "4.0.0",
+ "style-to-object": "0.3.0",
+ "unified": "9.2.0",
+ "unist-builder": "2.0.3",
+ "unist-util-visit": "2.0.3"
+ },
+ "dependencies": {
+ "@babel/core": {
+ "version": "7.12.9",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz",
+ "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==",
+ "requires": {
+ "@babel/code-frame": "^7.10.4",
+ "@babel/generator": "^7.12.5",
+ "@babel/helper-module-transforms": "^7.12.1",
+ "@babel/helpers": "^7.12.5",
+ "@babel/parser": "^7.12.7",
+ "@babel/template": "^7.12.7",
+ "@babel/traverse": "^7.12.9",
+ "@babel/types": "^7.12.7",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.1",
+ "json5": "^2.1.2",
+ "lodash": "^4.17.19",
+ "resolve": "^1.3.2",
+ "semver": "^5.4.1",
+ "source-map": "^0.5.0"
+ }
+ },
+ "@babel/plugin-syntax-jsx": {
+ "version": "7.12.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz",
+ "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ }
+ },
+ "semver": {
+ "version": "5.7.2",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz",
+ "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g=="
+ },
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ=="
+ },
+ "unified": {
+ "version": "9.2.0",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz",
+ "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==",
+ "requires": {
+ "bail": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-buffer": "^2.0.0",
+ "is-plain-obj": "^2.0.0",
+ "trough": "^1.0.0",
+ "vfile": "^4.0.0"
+ }
+ }
+ }
+ },
+ "@mdx-js/util": {
+ "version": "1.6.22",
+ "resolved": "https://registry.npmjs.org/@mdx-js/util/-/util-1.6.22.tgz",
+ "integrity": "sha512-H1rQc1ZOHANWBvPcW+JpGwr+juXSxM8Q8YCkm3GhZd8REu1fHR3z99CErO1p9pkcfcxZnMdIZdIsXkOHY0NilA=="
+ },
+ "@nodelib/fs.scandir": {
+ "version": "2.1.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
+ "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==",
+ "requires": {
+ "@nodelib/fs.stat": "2.0.5",
+ "run-parallel": "^1.1.9"
+ }
+ },
+ "@nodelib/fs.stat": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz",
+ "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A=="
+ },
+ "@nodelib/fs.walk": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz",
+ "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==",
+ "requires": {
+ "@nodelib/fs.scandir": "2.1.5",
+ "fastq": "^1.6.0"
+ }
+ },
+ "@polka/url": {
+ "version": "1.0.0-next.21",
+ "resolved": "https://registry.npmjs.org/@polka/url/-/url-1.0.0-next.21.tgz",
+ "integrity": "sha512-a5Sab1C4/icpTZVzZc5Ghpz88yQtGOyNqYXcZgOssB2uuAr+wF/MvN6bgtW32q7HHrvBki+BsZ0OuNv6EV3K9g=="
+ },
+ "@sideway/address": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/@sideway/address/-/address-4.1.4.tgz",
+ "integrity": "sha512-7vwq+rOHVWjyXxVlR76Agnvhy8I9rpzjosTESvmhNeXOXdZZB15Fl+TI9x1SiHZH5Jv2wTGduSxFDIaq0m3DUw==",
+ "requires": {
+ "@hapi/hoek": "^9.0.0"
+ }
+ },
+ "@sideway/formula": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@sideway/formula/-/formula-3.0.1.tgz",
+ "integrity": "sha512-/poHZJJVjx3L+zVD6g9KgHfYnb443oi7wLu/XKojDviHy6HOEOA6z1Trk5aR1dGcmPenJEgb2sK2I80LeS3MIg=="
+ },
+ "@sideway/pinpoint": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/@sideway/pinpoint/-/pinpoint-2.0.0.tgz",
+ "integrity": "sha512-RNiOoTPkptFtSVzQevY/yWtZwf/RxyVnPy/OcA9HBM3MlGDnBEYL5B41H0MTn0Uec8Hi+2qUtTfG2WWZBmMejQ=="
+ },
+ "@sinclair/typebox": {
+ "version": "0.25.24",
+ "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.25.24.tgz",
+ "integrity": "sha512-XJfwUVUKDHF5ugKwIcxEgc9k8b7HbznCp6eUfWgu710hMPNIO4aw4/zB5RogDQz8nd6gyCDpU9O/m6qYEWY6yQ=="
+ },
+ "@sindresorhus/is": {
+ "version": "0.14.0",
+ "resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-0.14.0.tgz",
+ "integrity": "sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ=="
+ },
+ "@slorber/static-site-generator-webpack-plugin": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/@slorber/static-site-generator-webpack-plugin/-/static-site-generator-webpack-plugin-4.0.7.tgz",
+ "integrity": "sha512-Ug7x6z5lwrz0WqdnNFOMYrDQNTPAprvHLSh6+/fmml3qUiz6l5eq+2MzLKWtn/q5K5NpSiFsZTP/fck/3vjSxA==",
+ "requires": {
+ "eval": "^0.1.8",
+ "p-map": "^4.0.0",
+ "webpack-sources": "^3.2.2"
+ }
+ },
+ "@svgr/babel-plugin-add-jsx-attribute": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-add-jsx-attribute/-/babel-plugin-add-jsx-attribute-6.5.1.tgz",
+ "integrity": "sha512-9PYGcXrAxitycIjRmZB+Q0JaN07GZIWaTBIGQzfaZv+qr1n8X1XUEJ5rZ/vx6OVD9RRYlrNnXWExQXcmZeD/BQ==",
+ "requires": {}
+ },
+ "@svgr/babel-plugin-remove-jsx-attribute": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-attribute/-/babel-plugin-remove-jsx-attribute-8.0.0.tgz",
+ "integrity": "sha512-BcCkm/STipKvbCl6b7QFrMh/vx00vIP63k2eM66MfHJzPr6O2U0jYEViXkHJWqXqQYjdeA9cuCl5KWmlwjDvbA==",
+ "requires": {}
+ },
+ "@svgr/babel-plugin-remove-jsx-empty-expression": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-remove-jsx-empty-expression/-/babel-plugin-remove-jsx-empty-expression-8.0.0.tgz",
+ "integrity": "sha512-5BcGCBfBxB5+XSDSWnhTThfI9jcO5f0Ai2V24gZpG+wXF14BzwxxdDb4g6trdOux0rhibGs385BeFMSmxtS3uA==",
+ "requires": {}
+ },
+ "@svgr/babel-plugin-replace-jsx-attribute-value": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-replace-jsx-attribute-value/-/babel-plugin-replace-jsx-attribute-value-6.5.1.tgz",
+ "integrity": "sha512-8DPaVVE3fd5JKuIC29dqyMB54sA6mfgki2H2+swh+zNJoynC8pMPzOkidqHOSc6Wj032fhl8Z0TVn1GiPpAiJg==",
+ "requires": {}
+ },
+ "@svgr/babel-plugin-svg-dynamic-title": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-dynamic-title/-/babel-plugin-svg-dynamic-title-6.5.1.tgz",
+ "integrity": "sha512-FwOEi0Il72iAzlkaHrlemVurgSQRDFbk0OC8dSvD5fSBPHltNh7JtLsxmZUhjYBZo2PpcU/RJvvi6Q0l7O7ogw==",
+ "requires": {}
+ },
+ "@svgr/babel-plugin-svg-em-dimensions": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-svg-em-dimensions/-/babel-plugin-svg-em-dimensions-6.5.1.tgz",
+ "integrity": "sha512-gWGsiwjb4tw+ITOJ86ndY/DZZ6cuXMNE/SjcDRg+HLuCmwpcjOktwRF9WgAiycTqJD/QXqL2f8IzE2Rzh7aVXA==",
+ "requires": {}
+ },
+ "@svgr/babel-plugin-transform-react-native-svg": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-react-native-svg/-/babel-plugin-transform-react-native-svg-6.5.1.tgz",
+ "integrity": "sha512-2jT3nTayyYP7kI6aGutkyfJ7UMGtuguD72OjeGLwVNyfPRBD8zQthlvL+fAbAKk5n9ZNcvFkp/b1lZ7VsYqVJg==",
+ "requires": {}
+ },
+ "@svgr/babel-plugin-transform-svg-component": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-plugin-transform-svg-component/-/babel-plugin-transform-svg-component-6.5.1.tgz",
+ "integrity": "sha512-a1p6LF5Jt33O3rZoVRBqdxL350oge54iZWHNI6LJB5tQ7EelvD/Mb1mfBiZNAan0dt4i3VArkFRjA4iObuNykQ==",
+ "requires": {}
+ },
+ "@svgr/babel-preset": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/babel-preset/-/babel-preset-6.5.1.tgz",
+ "integrity": "sha512-6127fvO/FF2oi5EzSQOAjo1LE3OtNVh11R+/8FXa+mHx1ptAaS4cknIjnUA7e6j6fwGGJ17NzaTJFUwOV2zwCw==",
+ "requires": {
+ "@svgr/babel-plugin-add-jsx-attribute": "^6.5.1",
+ "@svgr/babel-plugin-remove-jsx-attribute": "*",
+ "@svgr/babel-plugin-remove-jsx-empty-expression": "*",
+ "@svgr/babel-plugin-replace-jsx-attribute-value": "^6.5.1",
+ "@svgr/babel-plugin-svg-dynamic-title": "^6.5.1",
+ "@svgr/babel-plugin-svg-em-dimensions": "^6.5.1",
+ "@svgr/babel-plugin-transform-react-native-svg": "^6.5.1",
+ "@svgr/babel-plugin-transform-svg-component": "^6.5.1"
+ }
+ },
+ "@svgr/core": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/core/-/core-6.5.1.tgz",
+ "integrity": "sha512-/xdLSWxK5QkqG524ONSjvg3V/FkNyCv538OIBdQqPNaAta3AsXj/Bd2FbvR87yMbXO2hFSWiAe/Q6IkVPDw+mw==",
+ "requires": {
+ "@babel/core": "^7.19.6",
+ "@svgr/babel-preset": "^6.5.1",
+ "@svgr/plugin-jsx": "^6.5.1",
+ "camelcase": "^6.2.0",
+ "cosmiconfig": "^7.0.1"
+ }
+ },
+ "@svgr/hast-util-to-babel-ast": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/hast-util-to-babel-ast/-/hast-util-to-babel-ast-6.5.1.tgz",
+ "integrity": "sha512-1hnUxxjd83EAxbL4a0JDJoD3Dao3hmjvyvyEV8PzWmLK3B9m9NPlW7GKjFyoWE8nM7HnXzPcmmSyOW8yOddSXw==",
+ "requires": {
+ "@babel/types": "^7.20.0",
+ "entities": "^4.4.0"
+ }
+ },
+ "@svgr/plugin-jsx": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/plugin-jsx/-/plugin-jsx-6.5.1.tgz",
+ "integrity": "sha512-+UdQxI3jgtSjCykNSlEMuy1jSRQlGC7pqBCPvkG/2dATdWo082zHTTK3uhnAju2/6XpE6B5mZ3z4Z8Ns01S8Gw==",
+ "requires": {
+ "@babel/core": "^7.19.6",
+ "@svgr/babel-preset": "^6.5.1",
+ "@svgr/hast-util-to-babel-ast": "^6.5.1",
+ "svg-parser": "^2.0.4"
+ }
+ },
+ "@svgr/plugin-svgo": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/plugin-svgo/-/plugin-svgo-6.5.1.tgz",
+ "integrity": "sha512-omvZKf8ixP9z6GWgwbtmP9qQMPX4ODXi+wzbVZgomNFsUIlHA1sf4fThdwTWSsZGgvGAG6yE+b/F5gWUkcZ/iQ==",
+ "requires": {
+ "cosmiconfig": "^7.0.1",
+ "deepmerge": "^4.2.2",
+ "svgo": "^2.8.0"
+ }
+ },
+ "@svgr/webpack": {
+ "version": "6.5.1",
+ "resolved": "https://registry.npmjs.org/@svgr/webpack/-/webpack-6.5.1.tgz",
+ "integrity": "sha512-cQ/AsnBkXPkEK8cLbv4Dm7JGXq2XrumKnL1dRpJD9rIO2fTIlJI9a1uCciYG1F2aUsox/hJQyNGbt3soDxSRkA==",
+ "requires": {
+ "@babel/core": "^7.19.6",
+ "@babel/plugin-transform-react-constant-elements": "^7.18.12",
+ "@babel/preset-env": "^7.19.4",
+ "@babel/preset-react": "^7.18.6",
+ "@babel/preset-typescript": "^7.18.6",
+ "@svgr/core": "^6.5.1",
+ "@svgr/plugin-jsx": "^6.5.1",
+ "@svgr/plugin-svgo": "^6.5.1"
+ }
+ },
+ "@szmarczak/http-timer": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-1.1.2.tgz",
+ "integrity": "sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA==",
+ "requires": {
+ "defer-to-connect": "^1.0.1"
+ }
+ },
+ "@trysound/sax": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/@trysound/sax/-/sax-0.2.0.tgz",
+ "integrity": "sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA=="
+ },
+ "@types/body-parser": {
+ "version": "1.19.2",
+ "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.2.tgz",
+ "integrity": "sha512-ALYone6pm6QmwZoAgeyNksccT9Q4AWZQ6PvfwR37GT6r6FWUPguq6sUmNGSMV2Wr761oQoBxwGGa6DR5o1DC9g==",
+ "requires": {
+ "@types/connect": "*",
+ "@types/node": "*"
+ }
+ },
+ "@types/bonjour": {
+ "version": "3.5.10",
+ "resolved": "https://registry.npmjs.org/@types/bonjour/-/bonjour-3.5.10.tgz",
+ "integrity": "sha512-p7ienRMiS41Nu2/igbJxxLDWrSZ0WxM8UQgCeO9KhoVF7cOVFkrKsiDr1EsJIla8vV3oEEjGcz11jc5yimhzZw==",
+ "requires": {
+ "@types/node": "*"
+ }
+ },
+ "@types/connect": {
+ "version": "3.4.35",
+ "resolved": "https://registry.npmjs.org/@types/connect/-/connect-3.4.35.tgz",
+ "integrity": "sha512-cdeYyv4KWoEgpBISTxWvqYsVy444DOqehiF3fM3ne10AmJ62RSyNkUnxMJXHQWRQQX2eR94m5y1IZyDwBjV9FQ==",
+ "requires": {
+ "@types/node": "*"
+ }
+ },
+ "@types/connect-history-api-fallback": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/@types/connect-history-api-fallback/-/connect-history-api-fallback-1.5.0.tgz",
+ "integrity": "sha512-4x5FkPpLipqwthjPsF7ZRbOv3uoLUFkTA9G9v583qi4pACvq0uTELrB8OLUzPWUI4IJIyvM85vzkV1nyiI2Lig==",
+ "requires": {
+ "@types/express-serve-static-core": "*",
+ "@types/node": "*"
+ }
+ },
+ "@types/estree": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz",
+ "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw=="
+ },
+ "@types/express": {
+ "version": "4.17.17",
+ "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.17.tgz",
+ "integrity": "sha512-Q4FmmuLGBG58btUnfS1c1r/NQdlp3DMfGDGig8WhfpA2YRUtEkxAjkZb0yvplJGYdF1fsQ81iMDcH24sSCNC/Q==",
+ "requires": {
+ "@types/body-parser": "*",
+ "@types/express-serve-static-core": "^4.17.33",
+ "@types/qs": "*",
+ "@types/serve-static": "*"
+ }
+ },
+ "@types/express-serve-static-core": {
+ "version": "4.17.35",
+ "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.17.35.tgz",
+ "integrity": "sha512-wALWQwrgiB2AWTT91CB62b6Yt0sNHpznUXeZEcnPU3DRdlDIz74x8Qg1UUYKSVFi+va5vKOLYRBI1bRKiLLKIg==",
+ "requires": {
+ "@types/node": "*",
+ "@types/qs": "*",
+ "@types/range-parser": "*",
+ "@types/send": "*"
+ }
+ },
+ "@types/hast": {
+ "version": "2.3.10",
+ "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.10.tgz",
+ "integrity": "sha512-McWspRw8xx8J9HurkVBfYj0xKoE25tOFlHGdx4MJ5xORQrMGZNqJhVQWaIbm6Oyla5kYOXtDiopzKRJzEOkwJw==",
+ "requires": {
+ "@types/unist": "^2"
+ }
+ },
+ "@types/history": {
+ "version": "4.7.11",
+ "resolved": "https://registry.npmjs.org/@types/history/-/history-4.7.11.tgz",
+ "integrity": "sha512-qjDJRrmvBMiTx+jyLxvLfJU7UznFuokDv4f3WRuriHKERccVpFU+8XMQUAbDzoiJCsmexxRExQeMwwCdamSKDA=="
+ },
+ "@types/html-minifier-terser": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/@types/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz",
+ "integrity": "sha512-oh/6byDPnL1zeNXFrDXFLyZjkr1MsBG667IM792caf1L2UPOOMf65NFzjUH/ltyfwjAGfs1rsX1eftK0jC/KIg=="
+ },
+ "@types/http-proxy": {
+ "version": "1.17.11",
+ "resolved": "https://registry.npmjs.org/@types/http-proxy/-/http-proxy-1.17.11.tgz",
+ "integrity": "sha512-HC8G7c1WmaF2ekqpnFq626xd3Zz0uvaqFmBJNRZCGEZCXkvSdJoNFn/8Ygbd9fKNQj8UzLdCETaI0UWPAjK7IA==",
+ "requires": {
+ "@types/node": "*"
+ }
+ },
+ "@types/istanbul-lib-coverage": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-coverage/-/istanbul-lib-coverage-2.0.4.tgz",
+ "integrity": "sha512-z/QT1XN4K4KYuslS23k62yDIDLwLFkzxOuMplDtObz0+y7VqJCaO2o+SPwHCvLFZh7xazvvoor2tA/hPz9ee7g=="
+ },
+ "@types/istanbul-lib-report": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-lib-report/-/istanbul-lib-report-3.0.0.tgz",
+ "integrity": "sha512-plGgXAPfVKFoYfa9NpYDAkseG+g6Jr294RqeqcqDixSbU34MZVJRi/P+7Y8GDpzkEwLaGZZOpKIEmeVZNtKsrg==",
+ "requires": {
+ "@types/istanbul-lib-coverage": "*"
+ }
+ },
+ "@types/istanbul-reports": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/@types/istanbul-reports/-/istanbul-reports-3.0.1.tgz",
+ "integrity": "sha512-c3mAZEuK0lvBp8tmuL74XRKn1+y2dcwOUpH7x4WrF6gk1GIgiluDRgMYQtw2OFcBvAJWlt6ASU3tSqxp0Uu0Aw==",
+ "requires": {
+ "@types/istanbul-lib-report": "*"
+ }
+ },
+ "@types/json-schema": {
+ "version": "7.0.12",
+ "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz",
+ "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA=="
+ },
+ "@types/lodash": {
+ "version": "4.14.194",
+ "resolved": "https://registry.npmjs.org/@types/lodash/-/lodash-4.14.194.tgz",
+ "integrity": "sha512-r22s9tAS7imvBt2lyHC9B8AGwWnXaYb1tY09oyLkXDs4vArpYJzw09nj8MLx5VfciBPGIb+ZwG0ssYnEPJxn/g=="
+ },
+ "@types/mdast": {
+ "version": "3.0.15",
+ "resolved": "https://registry.npmjs.org/@types/mdast/-/mdast-3.0.15.tgz",
+ "integrity": "sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==",
+ "requires": {
+ "@types/unist": "^2"
+ }
+ },
+ "@types/mime": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/@types/mime/-/mime-1.3.2.tgz",
+ "integrity": "sha512-YATxVxgRqNH6nHEIsvg6k2Boc1JHI9ZbH5iWFFv/MTkchz3b1ieGDa5T0a9RznNdI0KhVbdbWSN+KWWrQZRxTw=="
+ },
+ "@types/node": {
+ "version": "20.2.5",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-20.2.5.tgz",
+ "integrity": "sha512-JJulVEQXmiY9Px5axXHeYGLSjhkZEnD+MDPDGbCbIAbMslkKwmygtZFy1X6s/075Yo94sf8GuSlFfPzysQrWZQ=="
+ },
+ "@types/parse-json": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.0.tgz",
+ "integrity": "sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA=="
+ },
+ "@types/parse5": {
+ "version": "5.0.3",
+ "resolved": "https://registry.npmjs.org/@types/parse5/-/parse5-5.0.3.tgz",
+ "integrity": "sha512-kUNnecmtkunAoQ3CnjmMkzNU/gtxG8guhi+Fk2U/kOpIKjIMKnXGp4IJCgQJrXSgMsWYimYG4TGjz/UzbGEBTw=="
+ },
+ "@types/prop-types": {
+ "version": "15.7.5",
+ "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz",
+ "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w=="
+ },
+ "@types/qs": {
+ "version": "6.9.7",
+ "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.7.tgz",
+ "integrity": "sha512-FGa1F62FT09qcrueBA6qYTrJPVDzah9a+493+o2PCXsesWHIn27G98TsSMs3WPNbZIEj4+VJf6saSFpvD+3Zsw=="
+ },
+ "@types/range-parser": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/@types/range-parser/-/range-parser-1.2.4.tgz",
+ "integrity": "sha512-EEhsLsD6UsDM1yFhAvy0Cjr6VwmpMWqFBCb9w07wVugF7w9nfajxLuVmngTIpgS6svCnm6Vaw+MZhoDCKnOfsw=="
+ },
+ "@types/react": {
+ "version": "18.2.9",
+ "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.9.tgz",
+ "integrity": "sha512-pL3JAesUkF7PEQGxh5XOwdXGV907te6m1/Qe1ERJLgomojS6Ne790QiA7GUl434JEkFA2aAaB6qJ5z4e1zJn/w==",
+ "requires": {
+ "@types/prop-types": "*",
+ "@types/scheduler": "*",
+ "csstype": "^3.0.2"
+ }
+ },
+ "@types/retry": {
+ "version": "0.12.0",
+ "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz",
+ "integrity": "sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA=="
+ },
+ "@types/scheduler": {
+ "version": "0.16.3",
+ "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz",
+ "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ=="
+ },
+ "@types/send": {
+ "version": "0.17.1",
+ "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.1.tgz",
+ "integrity": "sha512-Cwo8LE/0rnvX7kIIa3QHCkcuF21c05Ayb0ZfxPiv0W8VRiZiNW/WuRupHKpqqGVGf7SUA44QSOUKaEd9lIrd/Q==",
+ "requires": {
+ "@types/mime": "^1",
+ "@types/node": "*"
+ }
+ },
+ "@types/serve-index": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/@types/serve-index/-/serve-index-1.9.1.tgz",
+ "integrity": "sha512-d/Hs3nWDxNL2xAczmOVZNj92YZCS6RGxfBPjKzuu/XirCgXdpKEb88dYNbrYGint6IVWLNP+yonwVAuRC0T2Dg==",
+ "requires": {
+ "@types/express": "*"
+ }
+ },
+ "@types/serve-static": {
+ "version": "1.15.1",
+ "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.1.tgz",
+ "integrity": "sha512-NUo5XNiAdULrJENtJXZZ3fHtfMolzZwczzBbnAeBbqBwG+LaG6YaJtuwzwGSQZ2wsCrxjEhNNjAkKigy3n8teQ==",
+ "requires": {
+ "@types/mime": "*",
+ "@types/node": "*"
+ }
+ },
+ "@types/sockjs": {
+ "version": "0.3.33",
+ "resolved": "https://registry.npmjs.org/@types/sockjs/-/sockjs-0.3.33.tgz",
+ "integrity": "sha512-f0KEEe05NvUnat+boPTZ0dgaLZ4SfSouXUgv5noUiefG2ajgKjmETo9ZJyuqsl7dfl2aHlLJUiki6B4ZYldiiw==",
+ "requires": {
+ "@types/node": "*"
+ }
+ },
+ "@types/unist": {
+ "version": "2.0.11",
+ "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.11.tgz",
+ "integrity": "sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA=="
+ },
+ "@types/ws": {
+ "version": "8.5.5",
+ "resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.5.tgz",
+ "integrity": "sha512-lwhs8hktwxSjf9UaZ9tG5M03PGogvFaH8gUgLNbN9HKIg0dvv6q+gkSuJ8HN4/VbyxkuLzCjlN7GquQ0gUJfIg==",
+ "requires": {
+ "@types/node": "*"
+ }
+ },
+ "@types/yargs": {
+ "version": "17.0.24",
+ "resolved": "https://registry.npmjs.org/@types/yargs/-/yargs-17.0.24.tgz",
+ "integrity": "sha512-6i0aC7jV6QzQB8ne1joVZ0eSFIstHsCrobmOtghM11yGlH0j43FKL2UhWdELkyps0zuf7qVTUVCCR+tgSlyLLw==",
+ "requires": {
+ "@types/yargs-parser": "*"
+ }
+ },
+ "@types/yargs-parser": {
+ "version": "21.0.0",
+ "resolved": "https://registry.npmjs.org/@types/yargs-parser/-/yargs-parser-21.0.0.tgz",
+ "integrity": "sha512-iO9ZQHkZxHn4mSakYV0vFHAVDyEOIJQrV2uZ06HxEPcx+mt8swXoZHIbaaJ2crJYFfErySgktuTZ3BeLz+XmFA=="
+ },
+ "@webassemblyjs/ast": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.12.1.tgz",
+ "integrity": "sha512-EKfMUOPRRUTy5UII4qJDGPpqfwjOmZ5jeGFwid9mnoqIFK+e0vqoi1qH56JpmZSzEL53jKnNzScdmftJyG5xWg==",
+ "requires": {
+ "@webassemblyjs/helper-numbers": "1.11.6",
+ "@webassemblyjs/helper-wasm-bytecode": "1.11.6"
+ }
+ },
+ "@webassemblyjs/floating-point-hex-parser": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.11.6.tgz",
+ "integrity": "sha512-ejAj9hfRJ2XMsNHk/v6Fu2dGS+i4UaXBXGemOfQ/JfQ6mdQg/WXtwleQRLLS4OvfDhv8rYnVwH27YJLMyYsxhw=="
+ },
+ "@webassemblyjs/helper-api-error": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.11.6.tgz",
+ "integrity": "sha512-o0YkoP4pVu4rN8aTJgAyj9hC2Sv5UlkzCHhxqWj8butaLvnpdc2jOwh4ewE6CX0txSfLn/UYaV/pheS2Txg//Q=="
+ },
+ "@webassemblyjs/helper-buffer": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.12.1.tgz",
+ "integrity": "sha512-nzJwQw99DNDKr9BVCOZcLuJJUlqkJh+kVzVl6Fmq/tI5ZtEyWT1KZMyOXltXLZJmDtvLCDgwsyrkohEtopTXCw=="
+ },
+ "@webassemblyjs/helper-numbers": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-numbers/-/helper-numbers-1.11.6.tgz",
+ "integrity": "sha512-vUIhZ8LZoIWHBohiEObxVm6hwP034jwmc9kuq5GdHZH0wiLVLIPcMCdpJzG4C11cHoQ25TFIQj9kaVADVX7N3g==",
+ "requires": {
+ "@webassemblyjs/floating-point-hex-parser": "1.11.6",
+ "@webassemblyjs/helper-api-error": "1.11.6",
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "@webassemblyjs/helper-wasm-bytecode": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.11.6.tgz",
+ "integrity": "sha512-sFFHKwcmBprO9e7Icf0+gddyWYDViL8bpPjJJl0WHxCdETktXdmtWLGVzoHbqUcY4Be1LkNfwTmXOJUFZYSJdA=="
+ },
+ "@webassemblyjs/helper-wasm-section": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.12.1.tgz",
+ "integrity": "sha512-Jif4vfB6FJlUlSbgEMHUyk1j234GTNG9dBJ4XJdOySoj518Xj0oGsNi59cUQF4RRMS9ouBUxDDdyBVfPTypa5g==",
+ "requires": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@webassemblyjs/helper-buffer": "1.12.1",
+ "@webassemblyjs/helper-wasm-bytecode": "1.11.6",
+ "@webassemblyjs/wasm-gen": "1.12.1"
+ }
+ },
+ "@webassemblyjs/ieee754": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.11.6.tgz",
+ "integrity": "sha512-LM4p2csPNvbij6U1f19v6WR56QZ8JcHg3QIJTlSwzFcmx6WSORicYj6I63f9yU1kEUtrpG+kjkiIAkevHpDXrg==",
+ "requires": {
+ "@xtuc/ieee754": "^1.2.0"
+ }
+ },
+ "@webassemblyjs/leb128": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.11.6.tgz",
+ "integrity": "sha512-m7a0FhE67DQXgouf1tbN5XQcdWoNgaAuoULHIfGFIEVKA6tu/edls6XnIlkmS6FrXAquJRPni3ZZKjw6FSPjPQ==",
+ "requires": {
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "@webassemblyjs/utf8": {
+ "version": "1.11.6",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.11.6.tgz",
+ "integrity": "sha512-vtXf2wTQ3+up9Zsg8sa2yWiQpzSsMyXj0qViVP6xKGCUT8p8YJ6HqI7l5eCnWx1T/FYdsv07HQs2wTFbbof/RA=="
+ },
+ "@webassemblyjs/wasm-edit": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.12.1.tgz",
+ "integrity": "sha512-1DuwbVvADvS5mGnXbE+c9NfA8QRcZ6iKquqjjmR10k6o+zzsRVesil54DKexiowcFCPdr/Q0qaMgB01+SQ1u6g==",
+ "requires": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@webassemblyjs/helper-buffer": "1.12.1",
+ "@webassemblyjs/helper-wasm-bytecode": "1.11.6",
+ "@webassemblyjs/helper-wasm-section": "1.12.1",
+ "@webassemblyjs/wasm-gen": "1.12.1",
+ "@webassemblyjs/wasm-opt": "1.12.1",
+ "@webassemblyjs/wasm-parser": "1.12.1",
+ "@webassemblyjs/wast-printer": "1.12.1"
+ }
+ },
+ "@webassemblyjs/wasm-gen": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.12.1.tgz",
+ "integrity": "sha512-TDq4Ojh9fcohAw6OIMXqiIcTq5KUXTGRkVxbSo1hQnSy6lAM5GSdfwWeSxpAo0YzgsgF182E/U0mDNhuA0tW7w==",
+ "requires": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@webassemblyjs/helper-wasm-bytecode": "1.11.6",
+ "@webassemblyjs/ieee754": "1.11.6",
+ "@webassemblyjs/leb128": "1.11.6",
+ "@webassemblyjs/utf8": "1.11.6"
+ }
+ },
+ "@webassemblyjs/wasm-opt": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.12.1.tgz",
+ "integrity": "sha512-Jg99j/2gG2iaz3hijw857AVYekZe2SAskcqlWIZXjji5WStnOpVoat3gQfT/Q5tb2djnCjBtMocY/Su1GfxPBg==",
+ "requires": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@webassemblyjs/helper-buffer": "1.12.1",
+ "@webassemblyjs/wasm-gen": "1.12.1",
+ "@webassemblyjs/wasm-parser": "1.12.1"
+ }
+ },
+ "@webassemblyjs/wasm-parser": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.12.1.tgz",
+ "integrity": "sha512-xikIi7c2FHXysxXe3COrVUPSheuBtpcfhbpFj4gmu7KRLYOzANztwUU0IbsqvMqzuNK2+glRGWCEqZo1WCLyAQ==",
+ "requires": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@webassemblyjs/helper-api-error": "1.11.6",
+ "@webassemblyjs/helper-wasm-bytecode": "1.11.6",
+ "@webassemblyjs/ieee754": "1.11.6",
+ "@webassemblyjs/leb128": "1.11.6",
+ "@webassemblyjs/utf8": "1.11.6"
+ }
+ },
+ "@webassemblyjs/wast-printer": {
+ "version": "1.12.1",
+ "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.12.1.tgz",
+ "integrity": "sha512-+X4WAlOisVWQMikjbcvY2e0rwPsKQ9F688lksZhBcPycBBuii3O7m8FACbDMWDojpAqvjIncrG8J0XHKyQfVeA==",
+ "requires": {
+ "@webassemblyjs/ast": "1.12.1",
+ "@xtuc/long": "4.2.2"
+ }
+ },
+ "@xtuc/ieee754": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/@xtuc/ieee754/-/ieee754-1.2.0.tgz",
+ "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA=="
+ },
+ "@xtuc/long": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/@xtuc/long/-/long-4.2.2.tgz",
+ "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ=="
+ },
+ "accepts": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
+ "integrity": "sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==",
+ "requires": {
+ "mime-types": "~2.1.34",
+ "negotiator": "0.6.3"
+ },
+ "dependencies": {
+ "mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="
+ },
+ "mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "requires": {
+ "mime-db": "1.52.0"
+ }
+ }
+ }
+ },
+ "acorn": {
+ "version": "8.8.2",
+ "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.8.2.tgz",
+ "integrity": "sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw=="
+ },
+ "acorn-import-attributes": {
+ "version": "1.9.5",
+ "resolved": "https://registry.npmjs.org/acorn-import-attributes/-/acorn-import-attributes-1.9.5.tgz",
+ "integrity": "sha512-n02Vykv5uA3eHGM/Z2dQrcD56kL8TyDb2p1+0P83PClMnC/nc+anbQRhIOWnSq4Ke/KvDPrY3C9hDtC/A3eHnQ==",
+ "requires": {}
+ },
+ "acorn-walk": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.2.0.tgz",
+ "integrity": "sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA=="
+ },
+ "address": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/address/-/address-1.2.2.tgz",
+ "integrity": "sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA=="
+ },
+ "aggregate-error": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/aggregate-error/-/aggregate-error-3.1.0.tgz",
+ "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==",
+ "requires": {
+ "clean-stack": "^2.0.0",
+ "indent-string": "^4.0.0"
+ }
+ },
+ "ajv": {
+ "version": "6.12.6",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
+ "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "fast-json-stable-stringify": "^2.0.0",
+ "json-schema-traverse": "^0.4.1",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "ajv-formats": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/ajv-formats/-/ajv-formats-2.1.1.tgz",
+ "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==",
+ "requires": {
+ "ajv": "^8.0.0"
+ },
+ "dependencies": {
+ "ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ }
+ }
+ },
+ "ajv-keywords": {
+ "version": "3.5.2",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-3.5.2.tgz",
+ "integrity": "sha512-5p6WTN0DdTGVQk6VjcEju19IgaHudalcfabD7yhDGeA6bcQnmL+CpveLJq/3hvfwd1aof6L386Ougkx6RfyMIQ==",
+ "requires": {}
+ },
+ "ansi-align": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-align/-/ansi-align-3.0.1.tgz",
+ "integrity": "sha512-IOfwwBF5iczOjp/WeY4YxyjqAFMQoZufdQWDd19SEExbVLNXqvpzSJ/M7Za4/sCPmQ0+GRquoA7bGcINcxew6w==",
+ "requires": {
+ "string-width": "^4.1.0"
+ },
+ "dependencies": {
+ "emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "requires": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ }
+ }
+ }
+ },
+ "ansi-html-community": {
+ "version": "0.0.8",
+ "resolved": "https://registry.npmjs.org/ansi-html-community/-/ansi-html-community-0.0.8.tgz",
+ "integrity": "sha512-1APHAyr3+PCamwNw3bXCPp4HFLONZt/yIH0sZp0/469KWNTEy+qN5jQ3GVX6DMZ1UXAi34yVwtTeaG/HpBuuzw=="
+ },
+ "ansi-regex": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz",
+ "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ=="
+ },
+ "ansi-styles": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz",
+ "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==",
+ "requires": {
+ "color-convert": "^2.0.1"
+ }
+ },
+ "anymatch": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz",
+ "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==",
+ "requires": {
+ "normalize-path": "^3.0.0",
+ "picomatch": "^2.0.4"
+ }
+ },
+ "argparse": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+ "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q=="
+ },
+ "array-flatten": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-2.1.2.tgz",
+ "integrity": "sha512-hNfzcOV8W4NdualtqBFPyVO+54DSJuZGY9qT4pRroB6S9e3iiido2ISIC5h9R2sPJ8H3FHCIiEnsv1lPXO3KtQ=="
+ },
+ "array-union": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz",
+ "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw=="
+ },
+ "at-least-node": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz",
+ "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg=="
+ },
+ "autoprefixer": {
+ "version": "10.4.20",
+ "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.20.tgz",
+ "integrity": "sha512-XY25y5xSv/wEoqzDyXXME4AFfkZI0P23z6Fs3YgymDnKJkCGOnkL0iTxCa85UTqaSgfcqyf3UA6+c7wUvx/16g==",
+ "requires": {
+ "browserslist": "^4.23.3",
+ "caniuse-lite": "^1.0.30001646",
+ "fraction.js": "^4.3.7",
+ "normalize-range": "^0.1.2",
+ "picocolors": "^1.0.1",
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "axios": {
+ "version": "0.25.0",
+ "resolved": "https://registry.npmjs.org/axios/-/axios-0.25.0.tgz",
+ "integrity": "sha512-cD8FOb0tRH3uuEe6+evtAbgJtfxr7ly3fQjYcMcuPlgkwVS9xboaVIpcDV+cYQe+yGykgwZCs1pzjntcGa6l5g==",
+ "requires": {
+ "follow-redirects": "^1.14.7"
+ }
+ },
+ "babel-loader": {
+ "version": "8.3.0",
+ "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.3.0.tgz",
+ "integrity": "sha512-H8SvsMF+m9t15HNLMipppzkC+Y2Yq+v3SonZyU70RBL/h1gxPkH08Ot8pEE9Z4Kd+czyWJClmFS8qzIP9OZ04Q==",
+ "requires": {
+ "find-cache-dir": "^3.3.1",
+ "loader-utils": "^2.0.0",
+ "make-dir": "^3.1.0",
+ "schema-utils": "^2.6.5"
+ }
+ },
+ "babel-plugin-apply-mdx-type-prop": {
+ "version": "1.6.22",
+ "resolved": "https://registry.npmjs.org/babel-plugin-apply-mdx-type-prop/-/babel-plugin-apply-mdx-type-prop-1.6.22.tgz",
+ "integrity": "sha512-VefL+8o+F/DfK24lPZMtJctrCVOfgbqLAGZSkxwhazQv4VxPg3Za/i40fu22KR2m8eEda+IfSOlPLUSIiLcnCQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "7.10.4",
+ "@mdx-js/util": "1.6.22"
+ },
+ "dependencies": {
+ "@babel/helper-plugin-utils": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz",
+ "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg=="
+ }
+ }
+ },
+ "babel-plugin-dynamic-import-node": {
+ "version": "2.3.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-dynamic-import-node/-/babel-plugin-dynamic-import-node-2.3.3.tgz",
+ "integrity": "sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==",
+ "requires": {
+ "object.assign": "^4.1.0"
+ }
+ },
+ "babel-plugin-extract-import-names": {
+ "version": "1.6.22",
+ "resolved": "https://registry.npmjs.org/babel-plugin-extract-import-names/-/babel-plugin-extract-import-names-1.6.22.tgz",
+ "integrity": "sha512-yJ9BsJaISua7d8zNT7oRG1ZLBJCIdZ4PZqmH8qa9N5AK01ifk3fnkc98AXhtzE7UkfCsEumvoQWgoYLhOnJ7jQ==",
+ "requires": {
+ "@babel/helper-plugin-utils": "7.10.4"
+ },
+ "dependencies": {
+ "@babel/helper-plugin-utils": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz",
+ "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg=="
+ }
+ }
+ },
+ "babel-plugin-polyfill-corejs2": {
+ "version": "0.4.3",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.3.tgz",
+ "integrity": "sha512-bM3gHc337Dta490gg+/AseNB9L4YLHxq1nGKZZSHbhXv4aTYU2MD2cjza1Ru4S6975YLTaL1K8uJf6ukJhhmtw==",
+ "requires": {
+ "@babel/compat-data": "^7.17.7",
+ "@babel/helper-define-polyfill-provider": "^0.4.0",
+ "semver": "^6.1.1"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "babel-plugin-polyfill-corejs3": {
+ "version": "0.8.1",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.8.1.tgz",
+ "integrity": "sha512-ikFrZITKg1xH6pLND8zT14UPgjKHiGLqex7rGEZCH2EvhsneJaJPemmpQaIZV5AL03II+lXylw3UmddDK8RU5Q==",
+ "requires": {
+ "@babel/helper-define-polyfill-provider": "^0.4.0",
+ "core-js-compat": "^3.30.1"
+ }
+ },
+ "babel-plugin-polyfill-regenerator": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.5.0.tgz",
+ "integrity": "sha512-hDJtKjMLVa7Z+LwnTCxoDLQj6wdc+B8dun7ayF2fYieI6OzfuvcLMB32ihJZ4UhCBwNYGl5bg/x/P9cMdnkc2g==",
+ "requires": {
+ "@babel/helper-define-polyfill-provider": "^0.4.0"
+ }
+ },
+ "bail": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/bail/-/bail-1.0.5.tgz",
+ "integrity": "sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ=="
+ },
+ "balanced-match": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
+ "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
+ },
+ "batch": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/batch/-/batch-0.6.1.tgz",
+ "integrity": "sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw=="
+ },
+ "big.js": {
+ "version": "5.2.2",
+ "resolved": "https://registry.npmjs.org/big.js/-/big.js-5.2.2.tgz",
+ "integrity": "sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ=="
+ },
+ "binary-extensions": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.2.0.tgz",
+ "integrity": "sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA=="
+ },
+ "body-parser": {
+ "version": "1.20.3",
+ "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz",
+ "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==",
+ "requires": {
+ "bytes": "3.1.2",
+ "content-type": "~1.0.5",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "on-finished": "2.4.1",
+ "qs": "6.13.0",
+ "raw-body": "2.5.2",
+ "type-is": "~1.6.18",
+ "unpipe": "1.0.0"
+ },
+ "dependencies": {
+ "bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="
+ },
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "qs": {
+ "version": "6.13.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz",
+ "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==",
+ "requires": {
+ "side-channel": "^1.0.6"
+ }
+ }
+ }
+ },
+ "bonjour-service": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/bonjour-service/-/bonjour-service-1.1.1.tgz",
+ "integrity": "sha512-Z/5lQRMOG9k7W+FkeGTNjh7htqn/2LMnfOvBZ8pynNZCM9MwkQkI3zeI4oz09uWdcgmgHugVvBqxGg4VQJ5PCg==",
+ "requires": {
+ "array-flatten": "^2.1.2",
+ "dns-equal": "^1.0.0",
+ "fast-deep-equal": "^3.1.3",
+ "multicast-dns": "^7.2.5"
+ }
+ },
+ "boolbase": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz",
+ "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww=="
+ },
+ "boxen": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/boxen/-/boxen-6.2.1.tgz",
+ "integrity": "sha512-H4PEsJXfFI/Pt8sjDWbHlQPx4zL/bvSQjcilJmaulGt5mLDorHOHpmdXAJcBcmru7PhYSp/cDMWRko4ZUMFkSw==",
+ "requires": {
+ "ansi-align": "^3.0.1",
+ "camelcase": "^6.2.0",
+ "chalk": "^4.1.2",
+ "cli-boxes": "^3.0.0",
+ "string-width": "^5.0.1",
+ "type-fest": "^2.5.0",
+ "widest-line": "^4.0.1",
+ "wrap-ansi": "^8.0.1"
+ }
+ },
+ "brace-expansion": {
+ "version": "1.1.11",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
+ "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
+ "requires": {
+ "balanced-match": "^1.0.0",
+ "concat-map": "0.0.1"
+ }
+ },
+ "braces": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
+ "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
+ "requires": {
+ "fill-range": "^7.1.1"
+ }
+ },
+ "browserslist": {
+ "version": "4.23.3",
+ "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.23.3.tgz",
+ "integrity": "sha512-btwCFJVjI4YWDNfau8RhZ+B1Q/VLoUITrm3RlP6y1tYGWIOa+InuYiRGXUBXo8nA1qKmHMyLB/iVQg5TT4eFoA==",
+ "requires": {
+ "caniuse-lite": "^1.0.30001646",
+ "electron-to-chromium": "^1.5.4",
+ "node-releases": "^2.0.18",
+ "update-browserslist-db": "^1.1.0"
+ }
+ },
+ "buffer-from": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.2.tgz",
+ "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ=="
+ },
+ "bytes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.0.0.tgz",
+ "integrity": "sha512-pMhOfFDPiv9t5jjIXkHosWmkSyQbvsgEVNkz0ERHbuLh2T/7j4Mqqpz523Fe8MVY89KC6Sh/QfS2sM+SjgFDcw=="
+ },
+ "cacheable-request": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-6.1.0.tgz",
+ "integrity": "sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg==",
+ "requires": {
+ "clone-response": "^1.0.2",
+ "get-stream": "^5.1.0",
+ "http-cache-semantics": "^4.0.0",
+ "keyv": "^3.0.0",
+ "lowercase-keys": "^2.0.0",
+ "normalize-url": "^4.1.0",
+ "responselike": "^1.0.2"
+ },
+ "dependencies": {
+ "get-stream": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz",
+ "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==",
+ "requires": {
+ "pump": "^3.0.0"
+ }
+ },
+ "lowercase-keys": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz",
+ "integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA=="
+ },
+ "normalize-url": {
+ "version": "4.5.1",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-4.5.1.tgz",
+ "integrity": "sha512-9UZCFRHQdNrfTpGg8+1INIg93B6zE0aXMVFkw1WFwvO4SlZywU6aLg5Of0Ap/PgcbSw4LNxvMWXMeugwMCX0AA=="
+ }
+ }
+ },
+ "call-bind": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.7.tgz",
+ "integrity": "sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==",
+ "requires": {
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "set-function-length": "^1.2.1"
+ }
+ },
+ "call-me-maybe": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/call-me-maybe/-/call-me-maybe-1.0.2.tgz",
+ "integrity": "sha512-HpX65o1Hnr9HH25ojC1YGs7HCQLq0GCOibSaWER0eNpgJ/Z1MZv2mTc7+xh6WOPxbRVcmgbv4hGU+uSQ/2xFZQ=="
+ },
+ "callsites": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz",
+ "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ=="
+ },
+ "camel-case": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/camel-case/-/camel-case-4.1.2.tgz",
+ "integrity": "sha512-gxGWBrTT1JuMx6R+o5PTXMmUnhnVzLQ9SNutD4YqKtI6ap897t3tKECYla6gCWEkplXnlNybEkZg9GEGxKFCgw==",
+ "requires": {
+ "pascal-case": "^3.1.2",
+ "tslib": "^2.0.3"
+ }
+ },
+ "camelcase": {
+ "version": "6.3.0",
+ "resolved": "https://registry.npmjs.org/camelcase/-/camelcase-6.3.0.tgz",
+ "integrity": "sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA=="
+ },
+ "camelcase-css": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz",
+ "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA=="
+ },
+ "caniuse-api": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/caniuse-api/-/caniuse-api-3.0.0.tgz",
+ "integrity": "sha512-bsTwuIg/BZZK/vreVTYYbSWoe2F+71P7K5QGEX+pT250DZbfU1MQ5prOKpPR+LL6uWKK3KMwMCAS74QB3Um1uw==",
+ "requires": {
+ "browserslist": "^4.0.0",
+ "caniuse-lite": "^1.0.0",
+ "lodash.memoize": "^4.1.2",
+ "lodash.uniq": "^4.5.0"
+ }
+ },
+ "caniuse-lite": {
+ "version": "1.0.30001660",
+ "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001660.tgz",
+ "integrity": "sha512-GacvNTTuATm26qC74pt+ad1fW15mlQ/zuTzzY1ZoIzECTP8HURDfF43kNxPgf7H1jmelCBQTTbBNxdSXOA7Bqg=="
+ },
+ "ccount": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/ccount/-/ccount-1.1.0.tgz",
+ "integrity": "sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg=="
+ },
+ "chalk": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
+ "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
+ "requires": {
+ "ansi-styles": "^4.1.0",
+ "supports-color": "^7.1.0"
+ }
+ },
+ "character-entities": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz",
+ "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw=="
+ },
+ "character-entities-legacy": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz",
+ "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA=="
+ },
+ "character-reference-invalid": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz",
+ "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg=="
+ },
+ "chokidar": {
+ "version": "3.5.3",
+ "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz",
+ "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==",
+ "requires": {
+ "anymatch": "~3.1.2",
+ "braces": "~3.0.2",
+ "fsevents": "~2.3.2",
+ "glob-parent": "~5.1.2",
+ "is-binary-path": "~2.1.0",
+ "is-glob": "~4.0.1",
+ "normalize-path": "~3.0.0",
+ "readdirp": "~3.6.0"
+ }
+ },
+ "chrome-trace-event": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/chrome-trace-event/-/chrome-trace-event-1.0.3.tgz",
+ "integrity": "sha512-p3KULyQg4S7NIHixdwbGX+nFHkoBiA4YQmyWtjb8XngSKV124nJmRysgAeujbUVb15vh+RvFUfCPqU7rXk+hZg=="
+ },
+ "ci-info": {
+ "version": "3.8.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.8.0.tgz",
+ "integrity": "sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw=="
+ },
+ "clean-css": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/clean-css/-/clean-css-5.3.2.tgz",
+ "integrity": "sha512-JVJbM+f3d3Q704rF4bqQ5UUyTtuJ0JRKNbTKVEeujCCBoMdkEi+V+e8oktO9qGQNSvHrFTM6JZRXrUvGR1czww==",
+ "requires": {
+ "source-map": "~0.6.0"
+ }
+ },
+ "clean-stack": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/clean-stack/-/clean-stack-2.2.0.tgz",
+ "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A=="
+ },
+ "cli-boxes": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-3.0.0.tgz",
+ "integrity": "sha512-/lzGpEWL/8PfI0BmBOPRwp0c/wFNX1RdUML3jK/RcSBA9T8mZDdQpqYBKtCFTOfQbwPqWEOpjqW+Fnayc0969g=="
+ },
+ "cli-table3": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.3.tgz",
+ "integrity": "sha512-w5Jac5SykAeZJKntOxJCrm63Eg5/4dhMWIcuTbo9rpE+brgaSZo0RuNJZeOyMgsUdhDeojvgyQLmjI+K50ZGyg==",
+ "requires": {
+ "@colors/colors": "1.5.0",
+ "string-width": "^4.2.0"
+ },
+ "dependencies": {
+ "emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "requires": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ }
+ }
+ }
+ },
+ "clone-deep": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/clone-deep/-/clone-deep-4.0.1.tgz",
+ "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==",
+ "requires": {
+ "is-plain-object": "^2.0.4",
+ "kind-of": "^6.0.2",
+ "shallow-clone": "^3.0.0"
+ }
+ },
+ "clone-response": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz",
+ "integrity": "sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA==",
+ "requires": {
+ "mimic-response": "^1.0.0"
+ }
+ },
+ "collapse-white-space": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/collapse-white-space/-/collapse-white-space-1.0.6.tgz",
+ "integrity": "sha512-jEovNnrhMuqyCcjfEJA56v0Xq8SkIoPKDyaHahwo3POf4qcSXqMYuwNcOTzp74vTsR9Tn08z4MxWqAhcekogkQ=="
+ },
+ "color-convert": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz",
+ "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==",
+ "requires": {
+ "color-name": "~1.1.4"
+ }
+ },
+ "color-name": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz",
+ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA=="
+ },
+ "colord": {
+ "version": "2.9.3",
+ "resolved": "https://registry.npmjs.org/colord/-/colord-2.9.3.tgz",
+ "integrity": "sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw=="
+ },
+ "colorette": {
+ "version": "2.0.20",
+ "resolved": "https://registry.npmjs.org/colorette/-/colorette-2.0.20.tgz",
+ "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w=="
+ },
+ "combine-promises": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/combine-promises/-/combine-promises-1.1.0.tgz",
+ "integrity": "sha512-ZI9jvcLDxqwaXEixOhArm3r7ReIivsXkpbyEWyeOhzz1QS0iSgBPnWvEqvIQtYyamGCYA88gFhmUrs9hrrQ0pg=="
+ },
+ "comma-separated-tokens": {
+ "version": "1.0.8",
+ "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz",
+ "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw=="
+ },
+ "commander": {
+ "version": "9.5.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-9.5.0.tgz",
+ "integrity": "sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ=="
+ },
+ "commondir": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/commondir/-/commondir-1.0.1.tgz",
+ "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg=="
+ },
+ "compressible": {
+ "version": "2.0.18",
+ "resolved": "https://registry.npmjs.org/compressible/-/compressible-2.0.18.tgz",
+ "integrity": "sha512-AF3r7P5dWxL8MxyITRMlORQNaOA2IkAFaTr4k7BUumjPtRpGDTZpl0Pb1XCO6JeDCBdp126Cgs9sMxqSjgYyRg==",
+ "requires": {
+ "mime-db": ">= 1.43.0 < 2"
+ },
+ "dependencies": {
+ "mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="
+ }
+ }
+ },
+ "compression": {
+ "version": "1.7.4",
+ "resolved": "https://registry.npmjs.org/compression/-/compression-1.7.4.tgz",
+ "integrity": "sha512-jaSIDzP9pZVS4ZfQ+TzvtiWhdpFhE2RDHz8QJkpX9SIpLq88VueF5jJw6t+6CUQcAoA6t+x89MLrWAqpfDE8iQ==",
+ "requires": {
+ "accepts": "~1.3.5",
+ "bytes": "3.0.0",
+ "compressible": "~2.0.16",
+ "debug": "2.6.9",
+ "on-headers": "~1.0.2",
+ "safe-buffer": "5.1.2",
+ "vary": "~1.1.2"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+ }
+ }
+ },
+ "concat-map": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
+ "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="
+ },
+ "configstore": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz",
+ "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==",
+ "requires": {
+ "dot-prop": "^5.2.0",
+ "graceful-fs": "^4.1.2",
+ "make-dir": "^3.0.0",
+ "unique-string": "^2.0.0",
+ "write-file-atomic": "^3.0.0",
+ "xdg-basedir": "^4.0.0"
+ }
+ },
+ "connect-history-api-fallback": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/connect-history-api-fallback/-/connect-history-api-fallback-2.0.0.tgz",
+ "integrity": "sha512-U73+6lQFmfiNPrYbXqr6kZ1i1wiRqXnp2nhMsINseWXO8lDau0LGEffJ8kQi4EjLZympVgRdvqjAgiZ1tgzDDA=="
+ },
+ "consola": {
+ "version": "2.15.3",
+ "resolved": "https://registry.npmjs.org/consola/-/consola-2.15.3.tgz",
+ "integrity": "sha512-9vAdYbHj6x2fLKC4+oPH0kFzY/orMZyG2Aj+kNylHxKGJ/Ed4dpNyAQYwJOdqO4zdM7XpVHmyejQDcQHrnuXbw=="
+ },
+ "content-disposition": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.2.tgz",
+ "integrity": "sha512-kRGRZw3bLlFISDBgwTSA1TMBFN6J6GWDeubmDE3AF+3+yXL8hTWv8r5rkLbqYXY4RjPk/EzHnClI3zQf1cFmHA=="
+ },
+ "content-type": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/content-type/-/content-type-1.0.5.tgz",
+ "integrity": "sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA=="
+ },
+ "convert-source-map": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz",
+ "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A=="
+ },
+ "cookie": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.6.0.tgz",
+ "integrity": "sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw=="
+ },
+ "cookie-signature": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz",
+ "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ=="
+ },
+ "copy-webpack-plugin": {
+ "version": "11.0.0",
+ "resolved": "https://registry.npmjs.org/copy-webpack-plugin/-/copy-webpack-plugin-11.0.0.tgz",
+ "integrity": "sha512-fX2MWpamkW0hZxMEg0+mYnA40LTosOSa5TqZ9GYIBzyJa9C3QUaMPSE2xAi/buNr8u89SfD9wHSQVBzrRa/SOQ==",
+ "requires": {
+ "fast-glob": "^3.2.11",
+ "glob-parent": "^6.0.1",
+ "globby": "^13.1.1",
+ "normalize-path": "^3.0.0",
+ "schema-utils": "^4.0.0",
+ "serialize-javascript": "^6.0.0"
+ },
+ "dependencies": {
+ "ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "ajv-keywords": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
+ "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
+ "requires": {
+ "fast-deep-equal": "^3.1.3"
+ }
+ },
+ "glob-parent": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz",
+ "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==",
+ "requires": {
+ "is-glob": "^4.0.3"
+ }
+ },
+ "globby": {
+ "version": "13.1.4",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-13.1.4.tgz",
+ "integrity": "sha512-iui/IiiW+QrJ1X1hKH5qwlMQyv34wJAYwH1vrf8b9kBA4sNiif3gKsMHa+BrdnOpEudWjpotfa7LrTzB1ERS/g==",
+ "requires": {
+ "dir-glob": "^3.0.1",
+ "fast-glob": "^3.2.11",
+ "ignore": "^5.2.0",
+ "merge2": "^1.4.1",
+ "slash": "^4.0.0"
+ }
+ },
+ "json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "schema-utils": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz",
+ "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==",
+ "requires": {
+ "@types/json-schema": "^7.0.9",
+ "ajv": "^8.9.0",
+ "ajv-formats": "^2.1.1",
+ "ajv-keywords": "^5.1.0"
+ }
+ },
+ "slash": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-4.0.0.tgz",
+ "integrity": "sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew=="
+ }
+ }
+ },
+ "core-js": {
+ "version": "3.30.2",
+ "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.30.2.tgz",
+ "integrity": "sha512-uBJiDmwqsbJCWHAwjrx3cvjbMXP7xD72Dmsn5LOJpiRmE3WbBbN5rCqQ2Qh6Ek6/eOrjlWngEynBWo4VxerQhg=="
+ },
+ "core-js-compat": {
+ "version": "3.30.2",
+ "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.30.2.tgz",
+ "integrity": "sha512-nriW1nuJjUgvkEjIot1Spwakz52V9YkYHZAQG6A1eCgC8AA1p0zngrQEP9R0+V6hji5XilWKG1Bd0YRppmGimA==",
+ "requires": {
+ "browserslist": "^4.21.5"
+ }
+ },
+ "core-js-pure": {
+ "version": "3.30.2",
+ "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.30.2.tgz",
+ "integrity": "sha512-p/npFUJXXBkCCTIlEGBdghofn00jWG6ZOtdoIXSJmAu2QBvN0IqpZXWweOytcwE6cfx8ZvVUy1vw8zxhe4Y2vg=="
+ },
+ "core-util-is": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
+ "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ=="
+ },
+ "cosmiconfig": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz",
+ "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==",
+ "requires": {
+ "@types/parse-json": "^4.0.0",
+ "import-fresh": "^3.2.1",
+ "parse-json": "^5.0.0",
+ "path-type": "^4.0.0",
+ "yaml": "^1.10.0"
+ }
+ },
+ "cross-spawn": {
+ "version": "7.0.3",
+ "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
+ "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
+ "requires": {
+ "path-key": "^3.1.0",
+ "shebang-command": "^2.0.0",
+ "which": "^2.0.1"
+ }
+ },
+ "crypto-random-string": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/crypto-random-string/-/crypto-random-string-2.0.0.tgz",
+ "integrity": "sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA=="
+ },
+ "css-declaration-sorter": {
+ "version": "6.4.0",
+ "resolved": "https://registry.npmjs.org/css-declaration-sorter/-/css-declaration-sorter-6.4.0.tgz",
+ "integrity": "sha512-jDfsatwWMWN0MODAFuHszfjphEXfNw9JUAhmY4pLu3TyTU+ohUpsbVtbU+1MZn4a47D9kqh03i4eyOm+74+zew==",
+ "requires": {}
+ },
+ "css-loader": {
+ "version": "6.8.1",
+ "resolved": "https://registry.npmjs.org/css-loader/-/css-loader-6.8.1.tgz",
+ "integrity": "sha512-xDAXtEVGlD0gJ07iclwWVkLoZOpEvAWaSyf6W18S2pOC//K8+qUDIx8IIT3D+HjnmkJPQeesOPv5aiUaJsCM2g==",
+ "requires": {
+ "icss-utils": "^5.1.0",
+ "postcss": "^8.4.21",
+ "postcss-modules-extract-imports": "^3.0.0",
+ "postcss-modules-local-by-default": "^4.0.3",
+ "postcss-modules-scope": "^3.0.0",
+ "postcss-modules-values": "^4.0.0",
+ "postcss-value-parser": "^4.2.0",
+ "semver": "^7.3.8"
+ }
+ },
+ "css-minimizer-webpack-plugin": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/css-minimizer-webpack-plugin/-/css-minimizer-webpack-plugin-4.2.2.tgz",
+ "integrity": "sha512-s3Of/4jKfw1Hj9CxEO1E5oXhQAxlayuHO2y/ML+C6I9sQ7FdzfEV6QgMLN3vI+qFsjJGIAFLKtQK7t8BOXAIyA==",
+ "requires": {
+ "cssnano": "^5.1.8",
+ "jest-worker": "^29.1.2",
+ "postcss": "^8.4.17",
+ "schema-utils": "^4.0.0",
+ "serialize-javascript": "^6.0.0",
+ "source-map": "^0.6.1"
+ },
+ "dependencies": {
+ "ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "ajv-keywords": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
+ "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
+ "requires": {
+ "fast-deep-equal": "^3.1.3"
+ }
+ },
+ "json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "schema-utils": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz",
+ "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==",
+ "requires": {
+ "@types/json-schema": "^7.0.9",
+ "ajv": "^8.9.0",
+ "ajv-formats": "^2.1.1",
+ "ajv-keywords": "^5.1.0"
+ }
+ }
+ }
+ },
+ "css-select": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/css-select/-/css-select-4.3.0.tgz",
+ "integrity": "sha512-wPpOYtnsVontu2mODhA19JrqWxNsfdatRKd64kmpRbQgh1KtItko5sTnEpPdpSaJszTOhEMlF/RPz28qj4HqhQ==",
+ "requires": {
+ "boolbase": "^1.0.0",
+ "css-what": "^6.0.1",
+ "domhandler": "^4.3.1",
+ "domutils": "^2.8.0",
+ "nth-check": "^2.0.1"
+ }
+ },
+ "css-tree": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.1.3.tgz",
+ "integrity": "sha512-tRpdppF7TRazZrjJ6v3stzv93qxRcSsFmW6cX0Zm2NVKpxE1WV1HblnghVv9TreireHkqI/VDEsfolRF1p6y7Q==",
+ "requires": {
+ "mdn-data": "2.0.14",
+ "source-map": "^0.6.1"
+ }
+ },
+ "css-what": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz",
+ "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw=="
+ },
+ "cssesc": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz",
+ "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg=="
+ },
+ "cssnano": {
+ "version": "5.1.15",
+ "resolved": "https://registry.npmjs.org/cssnano/-/cssnano-5.1.15.tgz",
+ "integrity": "sha512-j+BKgDcLDQA+eDifLx0EO4XSA56b7uut3BQFH+wbSaSTuGLuiyTa/wbRYthUXX8LC9mLg+WWKe8h+qJuwTAbHw==",
+ "requires": {
+ "cssnano-preset-default": "^5.2.14",
+ "lilconfig": "^2.0.3",
+ "yaml": "^1.10.2"
+ }
+ },
+ "cssnano-preset-advanced": {
+ "version": "5.3.10",
+ "resolved": "https://registry.npmjs.org/cssnano-preset-advanced/-/cssnano-preset-advanced-5.3.10.tgz",
+ "integrity": "sha512-fnYJyCS9jgMU+cmHO1rPSPf9axbQyD7iUhLO5Df6O4G+fKIOMps+ZbU0PdGFejFBBZ3Pftf18fn1eG7MAPUSWQ==",
+ "requires": {
+ "autoprefixer": "^10.4.12",
+ "cssnano-preset-default": "^5.2.14",
+ "postcss-discard-unused": "^5.1.0",
+ "postcss-merge-idents": "^5.1.1",
+ "postcss-reduce-idents": "^5.2.0",
+ "postcss-zindex": "^5.1.0"
+ }
+ },
+ "cssnano-preset-default": {
+ "version": "5.2.14",
+ "resolved": "https://registry.npmjs.org/cssnano-preset-default/-/cssnano-preset-default-5.2.14.tgz",
+ "integrity": "sha512-t0SFesj/ZV2OTylqQVOrFgEh5uanxbO6ZAdeCrNsUQ6fVuXwYTxJPNAGvGTxHbD68ldIJNec7PyYZDBrfDQ+6A==",
+ "requires": {
+ "css-declaration-sorter": "^6.3.1",
+ "cssnano-utils": "^3.1.0",
+ "postcss-calc": "^8.2.3",
+ "postcss-colormin": "^5.3.1",
+ "postcss-convert-values": "^5.1.3",
+ "postcss-discard-comments": "^5.1.2",
+ "postcss-discard-duplicates": "^5.1.0",
+ "postcss-discard-empty": "^5.1.1",
+ "postcss-discard-overridden": "^5.1.0",
+ "postcss-merge-longhand": "^5.1.7",
+ "postcss-merge-rules": "^5.1.4",
+ "postcss-minify-font-values": "^5.1.0",
+ "postcss-minify-gradients": "^5.1.1",
+ "postcss-minify-params": "^5.1.4",
+ "postcss-minify-selectors": "^5.2.1",
+ "postcss-normalize-charset": "^5.1.0",
+ "postcss-normalize-display-values": "^5.1.0",
+ "postcss-normalize-positions": "^5.1.1",
+ "postcss-normalize-repeat-style": "^5.1.1",
+ "postcss-normalize-string": "^5.1.0",
+ "postcss-normalize-timing-functions": "^5.1.0",
+ "postcss-normalize-unicode": "^5.1.1",
+ "postcss-normalize-url": "^5.1.0",
+ "postcss-normalize-whitespace": "^5.1.1",
+ "postcss-ordered-values": "^5.1.3",
+ "postcss-reduce-initial": "^5.1.2",
+ "postcss-reduce-transforms": "^5.1.0",
+ "postcss-svgo": "^5.1.0",
+ "postcss-unique-selectors": "^5.1.1"
+ }
+ },
+ "cssnano-utils": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/cssnano-utils/-/cssnano-utils-3.1.0.tgz",
+ "integrity": "sha512-JQNR19/YZhz4psLX/rQ9M83e3z2Wf/HdJbryzte4a3NSuafyp9w/I4U+hx5C2S9g41qlstH7DEWnZaaj83OuEA==",
+ "requires": {}
+ },
+ "csso": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/csso/-/csso-4.2.0.tgz",
+ "integrity": "sha512-wvlcdIbf6pwKEk7vHj8/Bkc0B4ylXZruLvOgs9doS5eOsOpuodOV2zJChSpkp+pRpYQLQMeF04nr3Z68Sta9jA==",
+ "requires": {
+ "css-tree": "^1.1.2"
+ }
+ },
+ "csstype": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz",
+ "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ=="
+ },
+ "debug": {
+ "version": "4.3.4",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.4.tgz",
+ "integrity": "sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==",
+ "requires": {
+ "ms": "2.1.2"
+ }
+ },
+ "decompress-response": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-3.3.0.tgz",
+ "integrity": "sha512-BzRPQuY1ip+qDonAOz42gRm/pg9F768C+npV/4JOsxRC2sq+Rlk+Q4ZCAsOhnIaMrgarILY+RMUIvMmmX1qAEA==",
+ "requires": {
+ "mimic-response": "^1.0.0"
+ }
+ },
+ "deep-extend": {
+ "version": "0.6.0",
+ "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz",
+ "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA=="
+ },
+ "deepmerge": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz",
+ "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A=="
+ },
+ "default-gateway": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/default-gateway/-/default-gateway-6.0.3.tgz",
+ "integrity": "sha512-fwSOJsbbNzZ/CUFpqFBqYfYNLj1NbMPm8MMCIzHjC83iSJRBEGmDUxU+WP661BaBQImeC2yHwXtz+P/O9o+XEg==",
+ "requires": {
+ "execa": "^5.0.0"
+ }
+ },
+ "defer-to-connect": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-1.1.3.tgz",
+ "integrity": "sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ=="
+ },
+ "define-data-property": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
+ "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
+ "requires": {
+ "es-define-property": "^1.0.0",
+ "es-errors": "^1.3.0",
+ "gopd": "^1.0.1"
+ }
+ },
+ "define-lazy-prop": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/define-lazy-prop/-/define-lazy-prop-2.0.0.tgz",
+ "integrity": "sha512-Ds09qNh8yw3khSjiJjiUInaGX9xlqZDY7JVryGxdxV7NPeuqQfplOpQ66yJFZut3jLa5zOwkXw1g9EI2uKh4Og=="
+ },
+ "define-properties": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.0.tgz",
+ "integrity": "sha512-xvqAVKGfT1+UAvPwKTVw/njhdQ8ZhXK4lI0bCIuCMrp2up9nPnaDftrLtmpTazqd1o+UY4zgzU+avtMbDP+ldA==",
+ "requires": {
+ "has-property-descriptors": "^1.0.0",
+ "object-keys": "^1.1.1"
+ }
+ },
+ "del": {
+ "version": "6.1.1",
+ "resolved": "https://registry.npmjs.org/del/-/del-6.1.1.tgz",
+ "integrity": "sha512-ua8BhapfP0JUJKC/zV9yHHDW/rDoDxP4Zhn3AkA6/xT6gY7jYXJiaeyBZznYVujhZZET+UgcbZiQ7sN3WqcImg==",
+ "requires": {
+ "globby": "^11.0.1",
+ "graceful-fs": "^4.2.4",
+ "is-glob": "^4.0.1",
+ "is-path-cwd": "^2.2.0",
+ "is-path-inside": "^3.0.2",
+ "p-map": "^4.0.0",
+ "rimraf": "^3.0.2",
+ "slash": "^3.0.0"
+ }
+ },
+ "depd": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz",
+ "integrity": "sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw=="
+ },
+ "destroy": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
+ "integrity": "sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg=="
+ },
+ "detab": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/detab/-/detab-2.0.4.tgz",
+ "integrity": "sha512-8zdsQA5bIkoRECvCrNKPla84lyoR7DSAyf7p0YgXzBO9PDJx8KntPUay7NS6yp+KdxdVtiE5SpHKtbp2ZQyA9g==",
+ "requires": {
+ "repeat-string": "^1.5.4"
+ }
+ },
+ "detect-node": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz",
+ "integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g=="
+ },
+ "detect-port": {
+ "version": "1.5.1",
+ "resolved": "https://registry.npmjs.org/detect-port/-/detect-port-1.5.1.tgz",
+ "integrity": "sha512-aBzdj76lueB6uUst5iAs7+0H/oOjqI5D16XUWxlWMIMROhcM0rfsNVk93zTngq1dDNpoXRr++Sus7ETAExppAQ==",
+ "requires": {
+ "address": "^1.0.1",
+ "debug": "4"
+ }
+ },
+ "detect-port-alt": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/detect-port-alt/-/detect-port-alt-1.1.6.tgz",
+ "integrity": "sha512-5tQykt+LqfJFBEYaDITx7S7cR7mJ/zQmLXZ2qt5w04ainYZw6tBf9dBunMjVeVOdYVRUzUOE4HkY5J7+uttb5Q==",
+ "requires": {
+ "address": "^1.0.1",
+ "debug": "^2.6.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ }
+ }
+ },
+ "dir-glob": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz",
+ "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==",
+ "requires": {
+ "path-type": "^4.0.0"
+ }
+ },
+ "dns-equal": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/dns-equal/-/dns-equal-1.0.0.tgz",
+ "integrity": "sha512-z+paD6YUQsk+AbGCEM4PrOXSss5gd66QfcVBFTKR/HpFL9jCqikS94HYwKww6fQyO7IxrIIyUu+g0Ka9tUS2Cg=="
+ },
+ "dns-packet": {
+ "version": "5.6.0",
+ "resolved": "https://registry.npmjs.org/dns-packet/-/dns-packet-5.6.0.tgz",
+ "integrity": "sha512-rza3UH1LwdHh9qyPXp8lkwpjSNk/AMD3dPytUoRoqnypDUhY0xvbdmVhWOfxO68frEfV9BU8V12Ez7ZsHGZpCQ==",
+ "requires": {
+ "@leichtgewicht/ip-codec": "^2.0.1"
+ }
+ },
+ "docusaurus-plugin-includes": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/docusaurus-plugin-includes/-/docusaurus-plugin-includes-1.1.5.tgz",
+ "integrity": "sha512-CqucZ7S820ayVP2s2Yq61lHezx0zMzt+qIPOmzmDv1k6Fru03+yU66mieSBvwN/B0w/g5M9xvjwDfLNU+eNtiQ==",
+ "requires": {
+ "@docusaurus/core": "^2.3.0",
+ "@docusaurus/types": "^2.3.0",
+ "@docusaurus/utils": "^2.3.0",
+ "fs-extra": "^10.0.0",
+ "path": "^0.12.7"
+ },
+ "dependencies": {
+ "fs-extra": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
+ "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
+ "requires": {
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ }
+ },
+ "jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "requires": {
+ "graceful-fs": "^4.1.6",
+ "universalify": "^2.0.0"
+ }
+ },
+ "universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ=="
+ }
+ }
+ },
+ "dom-converter": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/dom-converter/-/dom-converter-0.2.0.tgz",
+ "integrity": "sha512-gd3ypIPfOMr9h5jIKq8E3sHOTCjeirnl0WK5ZdS1AW0Odt0b1PaWaHdJ4Qk4klv+YB9aJBS7mESXjFoDQPu6DA==",
+ "requires": {
+ "utila": "~0.4"
+ }
+ },
+ "dom-serializer": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-1.4.1.tgz",
+ "integrity": "sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==",
+ "requires": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^4.2.0",
+ "entities": "^2.0.0"
+ },
+ "dependencies": {
+ "entities": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz",
+ "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A=="
+ }
+ }
+ },
+ "domelementtype": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz",
+ "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw=="
+ },
+ "domhandler": {
+ "version": "4.3.1",
+ "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-4.3.1.tgz",
+ "integrity": "sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==",
+ "requires": {
+ "domelementtype": "^2.2.0"
+ }
+ },
+ "domutils": {
+ "version": "2.8.0",
+ "resolved": "https://registry.npmjs.org/domutils/-/domutils-2.8.0.tgz",
+ "integrity": "sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==",
+ "requires": {
+ "dom-serializer": "^1.0.1",
+ "domelementtype": "^2.2.0",
+ "domhandler": "^4.2.0"
+ }
+ },
+ "dot-case": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/dot-case/-/dot-case-3.0.4.tgz",
+ "integrity": "sha512-Kv5nKlh6yRrdrGvxeJ2e5y2eRUpkUosIW4A2AS38zwSz27zu7ufDwQPi5Jhs3XAlGNetl3bmnGhQsMtkKJnj3w==",
+ "requires": {
+ "no-case": "^3.0.4",
+ "tslib": "^2.0.3"
+ }
+ },
+ "dot-prop": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.3.0.tgz",
+ "integrity": "sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q==",
+ "requires": {
+ "is-obj": "^2.0.0"
+ },
+ "dependencies": {
+ "is-obj": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz",
+ "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w=="
+ }
+ }
+ },
+ "duplexer": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/duplexer/-/duplexer-0.1.2.tgz",
+ "integrity": "sha512-jtD6YG370ZCIi/9GTaJKQxWTZD045+4R4hTk/x1UyoqadyJ9x9CgSi1RlVDQF8U2sxLLSnFkCaMihqljHIWgMg=="
+ },
+ "duplexer3": {
+ "version": "0.1.5",
+ "resolved": "https://registry.npmjs.org/duplexer3/-/duplexer3-0.1.5.tgz",
+ "integrity": "sha512-1A8za6ws41LQgv9HrE/66jyC5yuSjQ3L/KOpFtoBilsAK2iA2wuS5rTt1OCzIvtS2V7nVmedsUU+DGRcjBmOYA=="
+ },
+ "eastasianwidth": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz",
+ "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA=="
+ },
+ "ee-first": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/ee-first/-/ee-first-1.1.1.tgz",
+ "integrity": "sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow=="
+ },
+ "electron-to-chromium": {
+ "version": "1.5.19",
+ "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.19.tgz",
+ "integrity": "sha512-kpLJJi3zxTR1U828P+LIUDZ5ohixyo68/IcYOHLqnbTPr/wdgn4i1ECvmALN9E16JPA6cvCG5UG79gVwVdEK5w=="
+ },
+ "emoji-regex": {
+ "version": "9.2.2",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz",
+ "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg=="
+ },
+ "emojis-list": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz",
+ "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q=="
+ },
+ "emoticon": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/emoticon/-/emoticon-3.2.0.tgz",
+ "integrity": "sha512-SNujglcLTTg+lDAcApPNgEdudaqQFiAbJCqzjNxJkvN9vAwCGi0uu8IUVvx+f16h+V44KCY6Y2yboroc9pilHg=="
+ },
+ "encodeurl": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz",
+ "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w=="
+ },
+ "end-of-stream": {
+ "version": "1.4.4",
+ "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz",
+ "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==",
+ "requires": {
+ "once": "^1.4.0"
+ }
+ },
+ "enhanced-resolve": {
+ "version": "5.17.1",
+ "resolved": "https://registry.npmjs.org/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz",
+ "integrity": "sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg==",
+ "requires": {
+ "graceful-fs": "^4.2.4",
+ "tapable": "^2.2.0"
+ }
+ },
+ "entities": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz",
+ "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw=="
+ },
+ "error-ex": {
+ "version": "1.3.2",
+ "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.2.tgz",
+ "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==",
+ "requires": {
+ "is-arrayish": "^0.2.1"
+ }
+ },
+ "es-define-property": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.0.tgz",
+ "integrity": "sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==",
+ "requires": {
+ "get-intrinsic": "^1.2.4"
+ }
+ },
+ "es-errors": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
+ "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw=="
+ },
+ "es-module-lexer": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.2.1.tgz",
+ "integrity": "sha512-9978wrXM50Y4rTMmW5kXIC09ZdXQZqkE4mxhwkd8VbzsGkXGPgV4zWuqQJgCEzYngdo2dYDa0l8xhX4fkSwJSg=="
+ },
+ "es6-promise": {
+ "version": "4.2.8",
+ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz",
+ "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w=="
+ },
+ "escalade": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
+ "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA=="
+ },
+ "escape-goat": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz",
+ "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q=="
+ },
+ "escape-html": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz",
+ "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow=="
+ },
+ "escape-string-regexp": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
+ "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA=="
+ },
+ "eslint-scope": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-5.1.1.tgz",
+ "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==",
+ "requires": {
+ "esrecurse": "^4.3.0",
+ "estraverse": "^4.1.1"
+ }
+ },
+ "esprima": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/esprima/-/esprima-4.0.1.tgz",
+ "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A=="
+ },
+ "esrecurse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz",
+ "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==",
+ "requires": {
+ "estraverse": "^5.2.0"
+ },
+ "dependencies": {
+ "estraverse": {
+ "version": "5.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz",
+ "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA=="
+ }
+ }
+ },
+ "estraverse": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-4.3.0.tgz",
+ "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw=="
+ },
+ "esutils": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",
+ "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g=="
+ },
+ "eta": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/eta/-/eta-2.2.0.tgz",
+ "integrity": "sha512-UVQ72Rqjy/ZKQalzV5dCCJP80GrmPrMxh6NlNf+erV6ObL0ZFkhCstWRawS85z3smdr3d2wXPsZEY7rDPfGd2g=="
+ },
+ "etag": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz",
+ "integrity": "sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg=="
+ },
+ "eval": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/eval/-/eval-0.1.8.tgz",
+ "integrity": "sha512-EzV94NYKoO09GLXGjXj9JIlXijVck4ONSr5wiCWDvhsvj5jxSrzTmRU/9C1DyB6uToszLs8aifA6NQ7lEQdvFw==",
+ "requires": {
+ "@types/node": "*",
+ "require-like": ">= 0.1.1"
+ }
+ },
+ "eventemitter3": {
+ "version": "4.0.7",
+ "resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-4.0.7.tgz",
+ "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw=="
+ },
+ "events": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/events/-/events-3.3.0.tgz",
+ "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q=="
+ },
+ "execa": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
+ "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==",
+ "requires": {
+ "cross-spawn": "^7.0.3",
+ "get-stream": "^6.0.0",
+ "human-signals": "^2.1.0",
+ "is-stream": "^2.0.0",
+ "merge-stream": "^2.0.0",
+ "npm-run-path": "^4.0.1",
+ "onetime": "^5.1.2",
+ "signal-exit": "^3.0.3",
+ "strip-final-newline": "^2.0.0"
+ },
+ "dependencies": {
+ "get-stream": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-6.0.1.tgz",
+ "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg=="
+ }
+ }
+ },
+ "express": {
+ "version": "4.20.0",
+ "resolved": "https://registry.npmjs.org/express/-/express-4.20.0.tgz",
+ "integrity": "sha512-pLdae7I6QqShF5PnNTCVn4hI91Dx0Grkn2+IAsMTgMIKuQVte2dN9PeGSSAME2FR8anOhVA62QDIUaWVfEXVLw==",
+ "requires": {
+ "accepts": "~1.3.8",
+ "array-flatten": "1.1.1",
+ "body-parser": "1.20.3",
+ "content-disposition": "0.5.4",
+ "content-type": "~1.0.4",
+ "cookie": "0.6.0",
+ "cookie-signature": "1.0.6",
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "encodeurl": "~2.0.0",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "finalhandler": "1.2.0",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "merge-descriptors": "1.0.3",
+ "methods": "~1.1.2",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "path-to-regexp": "0.1.10",
+ "proxy-addr": "~2.0.7",
+ "qs": "6.11.0",
+ "range-parser": "~1.2.1",
+ "safe-buffer": "5.2.1",
+ "send": "0.19.0",
+ "serve-static": "1.16.0",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "type-is": "~1.6.18",
+ "utils-merge": "1.0.1",
+ "vary": "~1.1.2"
+ },
+ "dependencies": {
+ "array-flatten": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz",
+ "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg=="
+ },
+ "content-disposition": {
+ "version": "0.5.4",
+ "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz",
+ "integrity": "sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==",
+ "requires": {
+ "safe-buffer": "5.2.1"
+ }
+ },
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "encodeurl": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-2.0.0.tgz",
+ "integrity": "sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg=="
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "path-to-regexp": {
+ "version": "0.1.10",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.10.tgz",
+ "integrity": "sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w=="
+ },
+ "range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
+ }
+ }
+ },
+ "extend": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
+ "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g=="
+ },
+ "extend-shallow": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/extend-shallow/-/extend-shallow-2.0.1.tgz",
+ "integrity": "sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==",
+ "requires": {
+ "is-extendable": "^0.1.0"
+ }
+ },
+ "fast-deep-equal": {
+ "version": "3.1.3",
+ "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
+ "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q=="
+ },
+ "fast-glob": {
+ "version": "3.2.12",
+ "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.12.tgz",
+ "integrity": "sha512-DVj4CQIYYow0BlaelwK1pHl5n5cRSJfM60UA0zK891sVInoPri2Ekj7+e1CT3/3qxXenpI+nBBmQAcJPJgaj4w==",
+ "requires": {
+ "@nodelib/fs.stat": "^2.0.2",
+ "@nodelib/fs.walk": "^1.2.3",
+ "glob-parent": "^5.1.2",
+ "merge2": "^1.3.0",
+ "micromatch": "^4.0.4"
+ }
+ },
+ "fast-json-stable-stringify": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
+ "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw=="
+ },
+ "fast-url-parser": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/fast-url-parser/-/fast-url-parser-1.1.3.tgz",
+ "integrity": "sha512-5jOCVXADYNuRkKFzNJ0dCCewsZiYo0dz8QNYljkOpFC6r2U4OBmKtvm/Tsuh4w1YYdDqDb31a8TVhBJ2OJKdqQ==",
+ "requires": {
+ "punycode": "^1.3.2"
+ }
+ },
+ "fastq": {
+ "version": "1.15.0",
+ "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz",
+ "integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==",
+ "requires": {
+ "reusify": "^1.0.4"
+ }
+ },
+ "faye-websocket": {
+ "version": "0.11.4",
+ "resolved": "https://registry.npmjs.org/faye-websocket/-/faye-websocket-0.11.4.tgz",
+ "integrity": "sha512-CzbClwlXAuiRQAlUyfqPgvPoNKTckTPGfwZV4ZdAhVcP2lh9KUxJg2b5GkE7XbjKQ3YJnQ9z6D9ntLAlB+tP8g==",
+ "requires": {
+ "websocket-driver": ">=0.5.1"
+ }
+ },
+ "file-loader": {
+ "version": "6.2.0",
+ "resolved": "https://registry.npmjs.org/file-loader/-/file-loader-6.2.0.tgz",
+ "integrity": "sha512-qo3glqyTa61Ytg4u73GultjHGjdRyig3tG6lPtyX/jOEJvHif9uB0/OCI2Kif6ctF3caQTW2G5gym21oAsI4pw==",
+ "requires": {
+ "loader-utils": "^2.0.0",
+ "schema-utils": "^3.0.0"
+ },
+ "dependencies": {
+ "schema-utils": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz",
+ "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==",
+ "requires": {
+ "@types/json-schema": "^7.0.8",
+ "ajv": "^6.12.5",
+ "ajv-keywords": "^3.5.2"
+ }
+ }
+ }
+ },
+ "filesize": {
+ "version": "8.0.7",
+ "resolved": "https://registry.npmjs.org/filesize/-/filesize-8.0.7.tgz",
+ "integrity": "sha512-pjmC+bkIF8XI7fWaH8KxHcZL3DPybs1roSKP4rKDvy20tAWwIObE4+JIseG2byfGKhud5ZnM4YSGKBz7Sh0ndQ=="
+ },
+ "fill-range": {
+ "version": "7.1.1",
+ "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
+ "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
+ "requires": {
+ "to-regex-range": "^5.0.1"
+ }
+ },
+ "finalhandler": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.2.0.tgz",
+ "integrity": "sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==",
+ "requires": {
+ "debug": "2.6.9",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "on-finished": "2.4.1",
+ "parseurl": "~1.3.3",
+ "statuses": "2.0.1",
+ "unpipe": "~1.0.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ }
+ }
+ },
+ "find-cache-dir": {
+ "version": "3.3.2",
+ "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-3.3.2.tgz",
+ "integrity": "sha512-wXZV5emFEjrridIgED11OoUKLxiYjAcqot/NJdAkOhlJ+vGzwhOAfcG5OX1jP+S0PcjEn8bdMJv+g2jwQ3Onig==",
+ "requires": {
+ "commondir": "^1.0.1",
+ "make-dir": "^3.0.2",
+ "pkg-dir": "^4.1.0"
+ }
+ },
+ "find-up": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-4.1.0.tgz",
+ "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==",
+ "requires": {
+ "locate-path": "^5.0.0",
+ "path-exists": "^4.0.0"
+ }
+ },
+ "follow-redirects": {
+ "version": "1.15.9",
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.9.tgz",
+ "integrity": "sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ=="
+ },
+ "fork-ts-checker-webpack-plugin": {
+ "version": "6.5.3",
+ "resolved": "https://registry.npmjs.org/fork-ts-checker-webpack-plugin/-/fork-ts-checker-webpack-plugin-6.5.3.tgz",
+ "integrity": "sha512-SbH/l9ikmMWycd5puHJKTkZJKddF4iRLyW3DeZ08HTI7NGyLS38MXd/KGgeWumQO7YNQbW2u/NtPT2YowbPaGQ==",
+ "requires": {
+ "@babel/code-frame": "^7.8.3",
+ "@types/json-schema": "^7.0.5",
+ "chalk": "^4.1.0",
+ "chokidar": "^3.4.2",
+ "cosmiconfig": "^6.0.0",
+ "deepmerge": "^4.2.2",
+ "fs-extra": "^9.0.0",
+ "glob": "^7.1.6",
+ "memfs": "^3.1.2",
+ "minimatch": "^3.0.4",
+ "schema-utils": "2.7.0",
+ "semver": "^7.3.2",
+ "tapable": "^1.0.0"
+ },
+ "dependencies": {
+ "cosmiconfig": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-6.0.0.tgz",
+ "integrity": "sha512-xb3ZL6+L8b9JLLCx3ZdoZy4+2ECphCMo2PwqgP1tlfVq6M6YReyzBJtvWWtbDSpNr9hn96pkCiZqUcFEc+54Qg==",
+ "requires": {
+ "@types/parse-json": "^4.0.0",
+ "import-fresh": "^3.1.0",
+ "parse-json": "^5.0.0",
+ "path-type": "^4.0.0",
+ "yaml": "^1.7.2"
+ }
+ },
+ "fs-extra": {
+ "version": "9.1.0",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz",
+ "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==",
+ "requires": {
+ "at-least-node": "^1.0.0",
+ "graceful-fs": "^4.2.0",
+ "jsonfile": "^6.0.1",
+ "universalify": "^2.0.0"
+ }
+ },
+ "jsonfile": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz",
+ "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==",
+ "requires": {
+ "graceful-fs": "^4.1.6",
+ "universalify": "^2.0.0"
+ }
+ },
+ "schema-utils": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.0.tgz",
+ "integrity": "sha512-0ilKFI6QQF5nxDZLFn2dMjvc4hjg/Wkg7rHd3jK6/A4a1Hl9VFdQWvgB1UMGoU94pad1P/8N7fMcEnLnSiju8A==",
+ "requires": {
+ "@types/json-schema": "^7.0.4",
+ "ajv": "^6.12.2",
+ "ajv-keywords": "^3.4.1"
+ }
+ },
+ "tapable": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-1.1.3.tgz",
+ "integrity": "sha512-4WK/bYZmj8xLr+HUCODHGF1ZFzsYffasLUgEiMBY4fgtltdO6B4WJtlSbPaDTLpYTcGVwM2qLnFTICEcNxs3kA=="
+ },
+ "universalify": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz",
+ "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ=="
+ }
+ }
+ },
+ "forwarded": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/forwarded/-/forwarded-0.2.0.tgz",
+ "integrity": "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="
+ },
+ "fraction.js": {
+ "version": "4.3.7",
+ "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz",
+ "integrity": "sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew=="
+ },
+ "fresh": {
+ "version": "0.5.2",
+ "resolved": "https://registry.npmjs.org/fresh/-/fresh-0.5.2.tgz",
+ "integrity": "sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q=="
+ },
+ "fs-extra": {
+ "version": "7.0.1",
+ "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-7.0.1.tgz",
+ "integrity": "sha512-YJDaCJZEnBmcbw13fvdAM9AwNOJwOzrE4pqMqBq5nFiEqXUqHwlK4B+3pUw6JNvfSPtX05xFHtYy/1ni01eGCw==",
+ "requires": {
+ "graceful-fs": "^4.1.2",
+ "jsonfile": "^4.0.0",
+ "universalify": "^0.1.0"
+ }
+ },
+ "fs-monkey": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/fs-monkey/-/fs-monkey-1.0.4.tgz",
+ "integrity": "sha512-INM/fWAxMICjttnD0DX1rBvinKskj5G1w+oy/pnm9u/tSlnBrzFonJMcalKJ30P8RRsPzKcCG7Q8l0jx5Fh9YQ=="
+ },
+ "fs.realpath": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
+ "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="
+ },
+ "fsevents": {
+ "version": "2.3.2",
+ "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
+ "integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
+ "optional": true
+ },
+ "function-bind": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
+ "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA=="
+ },
+ "gensync": {
+ "version": "1.0.0-beta.2",
+ "resolved": "https://registry.npmjs.org/gensync/-/gensync-1.0.0-beta.2.tgz",
+ "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg=="
+ },
+ "get-intrinsic": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.4.tgz",
+ "integrity": "sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==",
+ "requires": {
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "has-proto": "^1.0.1",
+ "has-symbols": "^1.0.3",
+ "hasown": "^2.0.0"
+ }
+ },
+ "get-own-enumerable-property-symbols": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/get-own-enumerable-property-symbols/-/get-own-enumerable-property-symbols-3.0.2.tgz",
+ "integrity": "sha512-I0UBV/XOz1XkIJHEUDMZAbzCThU/H8DxmSfmdGcKPnVhu2VfFqr34jr9777IyaTYvxjedWhqVIilEDsCdP5G6g=="
+ },
+ "get-stream": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-4.1.0.tgz",
+ "integrity": "sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w==",
+ "requires": {
+ "pump": "^3.0.0"
+ }
+ },
+ "github-slugger": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/github-slugger/-/github-slugger-1.5.0.tgz",
+ "integrity": "sha512-wIh+gKBI9Nshz2o46B0B3f5k/W+WI9ZAv6y5Dn5WJ5SK1t0TnDimB4WE5rmTD05ZAIn8HALCZVmCsvj0w0v0lw=="
+ },
+ "glob": {
+ "version": "7.2.3",
+ "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
+ "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
+ "requires": {
+ "fs.realpath": "^1.0.0",
+ "inflight": "^1.0.4",
+ "inherits": "2",
+ "minimatch": "^3.1.1",
+ "once": "^1.3.0",
+ "path-is-absolute": "^1.0.0"
+ }
+ },
+ "glob-parent": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz",
+ "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==",
+ "requires": {
+ "is-glob": "^4.0.1"
+ }
+ },
+ "glob-to-regexp": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.3.0.tgz",
+ "integrity": "sha512-Iozmtbqv0noj0uDDqoL0zNq0VBEfK2YFoMAZoxJe4cwphvLR+JskfF30QhXHOR4m3KrE6NLRYw+U9MRXvifyig=="
+ },
+ "global-dirs": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-3.0.1.tgz",
+ "integrity": "sha512-NBcGGFbBA9s1VzD41QXDG+3++t9Mn5t1FpLdhESY6oKY4gYTFpX4wO3sqGUa0Srjtbfj3szX0RnemmrVRUdULA==",
+ "requires": {
+ "ini": "2.0.0"
+ },
+ "dependencies": {
+ "ini": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-2.0.0.tgz",
+ "integrity": "sha512-7PnF4oN3CvZF23ADhA5wRaYEQpJ8qygSkbtTXWBeXWXmEVRXK+1ITciHWwHhsjv1TmW0MgacIv6hEi5pX5NQdA=="
+ }
+ }
+ },
+ "global-modules": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/global-modules/-/global-modules-2.0.0.tgz",
+ "integrity": "sha512-NGbfmJBp9x8IxyJSd1P+otYK8vonoJactOogrVfFRIAEY1ukil8RSKDz2Yo7wh1oihl51l/r6W4epkeKJHqL8A==",
+ "requires": {
+ "global-prefix": "^3.0.0"
+ }
+ },
+ "global-prefix": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/global-prefix/-/global-prefix-3.0.0.tgz",
+ "integrity": "sha512-awConJSVCHVGND6x3tmMaKcQvwXLhjdkmomy2W+Goaui8YPgYgXJZewhg3fWC+DlfqqQuWg8AwqjGTD2nAPVWg==",
+ "requires": {
+ "ini": "^1.3.5",
+ "kind-of": "^6.0.2",
+ "which": "^1.3.1"
+ },
+ "dependencies": {
+ "which": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
+ "integrity": "sha512-HxJdYWq1MTIQbJ3nw0cqssHoTNU267KlrDuGZ1WYlxDStUtKUhOaJmh112/TZmHxxUfuJqPXSOm7tDyas0OSIQ==",
+ "requires": {
+ "isexe": "^2.0.0"
+ }
+ }
+ }
+ },
+ "globals": {
+ "version": "11.12.0",
+ "resolved": "https://registry.npmjs.org/globals/-/globals-11.12.0.tgz",
+ "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA=="
+ },
+ "globby": {
+ "version": "11.1.0",
+ "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz",
+ "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==",
+ "requires": {
+ "array-union": "^2.1.0",
+ "dir-glob": "^3.0.1",
+ "fast-glob": "^3.2.9",
+ "ignore": "^5.2.0",
+ "merge2": "^1.4.1",
+ "slash": "^3.0.0"
+ }
+ },
+ "gopd": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz",
+ "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==",
+ "requires": {
+ "get-intrinsic": "^1.1.3"
+ }
+ },
+ "got": {
+ "version": "9.6.0",
+ "resolved": "https://registry.npmjs.org/got/-/got-9.6.0.tgz",
+ "integrity": "sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q==",
+ "requires": {
+ "@sindresorhus/is": "^0.14.0",
+ "@szmarczak/http-timer": "^1.1.2",
+ "cacheable-request": "^6.0.0",
+ "decompress-response": "^3.3.0",
+ "duplexer3": "^0.1.4",
+ "get-stream": "^4.1.0",
+ "lowercase-keys": "^1.0.1",
+ "mimic-response": "^1.0.1",
+ "p-cancelable": "^1.0.0",
+ "to-readable-stream": "^1.0.0",
+ "url-parse-lax": "^3.0.0"
+ }
+ },
+ "graceful-fs": {
+ "version": "4.2.11",
+ "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
+ "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
+ },
+ "gray-matter": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/gray-matter/-/gray-matter-4.0.3.tgz",
+ "integrity": "sha512-5v6yZd4JK3eMI3FqqCouswVqwugaA9r4dNZB1wwcmrD02QkV5H0y7XBQW8QwQqEaZY1pM9aqORSORhJRdNK44Q==",
+ "requires": {
+ "js-yaml": "^3.13.1",
+ "kind-of": "^6.0.2",
+ "section-matter": "^1.0.0",
+ "strip-bom-string": "^1.0.0"
+ },
+ "dependencies": {
+ "argparse": {
+ "version": "1.0.10",
+ "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz",
+ "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==",
+ "requires": {
+ "sprintf-js": "~1.0.2"
+ }
+ },
+ "js-yaml": {
+ "version": "3.14.1",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-3.14.1.tgz",
+ "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==",
+ "requires": {
+ "argparse": "^1.0.7",
+ "esprima": "^4.0.0"
+ }
+ }
+ }
+ },
+ "gzip-size": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/gzip-size/-/gzip-size-6.0.0.tgz",
+ "integrity": "sha512-ax7ZYomf6jqPTQ4+XCpUGyXKHk5WweS+e05MBO4/y3WJ5RkmPXNKvX+bx1behVILVwr6JSQvZAku021CHPXG3Q==",
+ "requires": {
+ "duplexer": "^0.1.2"
+ }
+ },
+ "handle-thing": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz",
+ "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg=="
+ },
+ "handlebars": {
+ "version": "4.7.7",
+ "resolved": "https://registry.npmjs.org/handlebars/-/handlebars-4.7.7.tgz",
+ "integrity": "sha512-aAcXm5OAfE/8IXkcZvCepKU3VzW1/39Fb5ZuqMtgI/hT8X2YgoMvBY5dLhq/cpOvw7Lk1nK/UF71aLG/ZnVYRA==",
+ "requires": {
+ "minimist": "^1.2.5",
+ "neo-async": "^2.6.0",
+ "source-map": "^0.6.1",
+ "uglify-js": "^3.1.4",
+ "wordwrap": "^1.0.0"
+ }
+ },
+ "has": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has/-/has-1.0.3.tgz",
+ "integrity": "sha512-f2dvO0VU6Oej7RkWJGrehjbzMAjFp5/VKPp5tTpWIV4JHHZK1/BxbFRtf/siA2SWTe09caDmVtYYzWEIbBS4zw==",
+ "requires": {
+ "function-bind": "^1.1.1"
+ }
+ },
+ "has-flag": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
+ "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ=="
+ },
+ "has-property-descriptors": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
+ "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
+ "requires": {
+ "es-define-property": "^1.0.0"
+ }
+ },
+ "has-proto": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz",
+ "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg=="
+ },
+ "has-symbols": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz",
+ "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A=="
+ },
+ "has-yarn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/has-yarn/-/has-yarn-2.1.0.tgz",
+ "integrity": "sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw=="
+ },
+ "hasown": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
+ "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
+ "requires": {
+ "function-bind": "^1.1.2"
+ }
+ },
+ "hast-to-hyperscript": {
+ "version": "9.0.1",
+ "resolved": "https://registry.npmjs.org/hast-to-hyperscript/-/hast-to-hyperscript-9.0.1.tgz",
+ "integrity": "sha512-zQgLKqF+O2F72S1aa4y2ivxzSlko3MAvxkwG8ehGmNiqd98BIN3JM1rAJPmplEyLmGLO2QZYJtIneOSZ2YbJuA==",
+ "requires": {
+ "@types/unist": "^2.0.3",
+ "comma-separated-tokens": "^1.0.0",
+ "property-information": "^5.3.0",
+ "space-separated-tokens": "^1.0.0",
+ "style-to-object": "^0.3.0",
+ "unist-util-is": "^4.0.0",
+ "web-namespaces": "^1.0.0"
+ }
+ },
+ "hast-util-from-parse5": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/hast-util-from-parse5/-/hast-util-from-parse5-6.0.1.tgz",
+ "integrity": "sha512-jeJUWiN5pSxW12Rh01smtVkZgZr33wBokLzKLwinYOUfSzm1Nl/c3GUGebDyOKjdsRgMvoVbV0VpAcpjF4NrJA==",
+ "requires": {
+ "@types/parse5": "^5.0.0",
+ "hastscript": "^6.0.0",
+ "property-information": "^5.0.0",
+ "vfile": "^4.0.0",
+ "vfile-location": "^3.2.0",
+ "web-namespaces": "^1.0.0"
+ }
+ },
+ "hast-util-parse-selector": {
+ "version": "2.2.5",
+ "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz",
+ "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ=="
+ },
+ "hast-util-raw": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/hast-util-raw/-/hast-util-raw-6.0.1.tgz",
+ "integrity": "sha512-ZMuiYA+UF7BXBtsTBNcLBF5HzXzkyE6MLzJnL605LKE8GJylNjGc4jjxazAHUtcwT5/CEt6afRKViYB4X66dig==",
+ "requires": {
+ "@types/hast": "^2.0.0",
+ "hast-util-from-parse5": "^6.0.0",
+ "hast-util-to-parse5": "^6.0.0",
+ "html-void-elements": "^1.0.0",
+ "parse5": "^6.0.0",
+ "unist-util-position": "^3.0.0",
+ "vfile": "^4.0.0",
+ "web-namespaces": "^1.0.0",
+ "xtend": "^4.0.0",
+ "zwitch": "^1.0.0"
+ }
+ },
+ "hast-util-to-parse5": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/hast-util-to-parse5/-/hast-util-to-parse5-6.0.0.tgz",
+ "integrity": "sha512-Lu5m6Lgm/fWuz8eWnrKezHtVY83JeRGaNQ2kn9aJgqaxvVkFCZQBEhgodZUDUvoodgyROHDb3r5IxAEdl6suJQ==",
+ "requires": {
+ "hast-to-hyperscript": "^9.0.0",
+ "property-information": "^5.0.0",
+ "web-namespaces": "^1.0.0",
+ "xtend": "^4.0.0",
+ "zwitch": "^1.0.0"
+ }
+ },
+ "hastscript": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz",
+ "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==",
+ "requires": {
+ "@types/hast": "^2.0.0",
+ "comma-separated-tokens": "^1.0.0",
+ "hast-util-parse-selector": "^2.0.0",
+ "property-information": "^5.0.0",
+ "space-separated-tokens": "^1.0.0"
+ }
+ },
+ "he": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz",
+ "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw=="
+ },
+ "history": {
+ "version": "4.10.1",
+ "resolved": "https://registry.npmjs.org/history/-/history-4.10.1.tgz",
+ "integrity": "sha512-36nwAD620w12kuzPAsyINPWJqlNbij+hpK1k9XRloDtym8mxzGYl2c17LnV6IAGB2Dmg4tEa7G7DlawS0+qjew==",
+ "requires": {
+ "@babel/runtime": "^7.1.2",
+ "loose-envify": "^1.2.0",
+ "resolve-pathname": "^3.0.0",
+ "tiny-invariant": "^1.0.2",
+ "tiny-warning": "^1.0.0",
+ "value-equal": "^1.0.1"
+ }
+ },
+ "hoist-non-react-statics": {
+ "version": "3.3.2",
+ "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz",
+ "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==",
+ "requires": {
+ "react-is": "^16.7.0"
+ }
+ },
+ "hpack.js": {
+ "version": "2.1.6",
+ "resolved": "https://registry.npmjs.org/hpack.js/-/hpack.js-2.1.6.tgz",
+ "integrity": "sha512-zJxVehUdMGIKsRaNt7apO2Gqp0BdqW5yaiGHXXmbpvxgBYVZnAql+BJb4RO5ad2MgpbZKn5G6nMnegrH1FcNYQ==",
+ "requires": {
+ "inherits": "^2.0.1",
+ "obuf": "^1.0.0",
+ "readable-stream": "^2.0.1",
+ "wbuf": "^1.1.0"
+ },
+ "dependencies": {
+ "isarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+ "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ=="
+ },
+ "readable-stream": {
+ "version": "2.3.8",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.8.tgz",
+ "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==",
+ "requires": {
+ "core-util-is": "~1.0.0",
+ "inherits": "~2.0.3",
+ "isarray": "~1.0.0",
+ "process-nextick-args": "~2.0.0",
+ "safe-buffer": "~5.1.1",
+ "string_decoder": "~1.1.1",
+ "util-deprecate": "~1.0.1"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz",
+ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g=="
+ },
+ "string_decoder": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+ "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+ "requires": {
+ "safe-buffer": "~5.1.0"
+ }
+ }
+ }
+ },
+ "html-entities": {
+ "version": "2.3.5",
+ "resolved": "https://registry.npmjs.org/html-entities/-/html-entities-2.3.5.tgz",
+ "integrity": "sha512-72TJlcMkYsEJASa/3HnX7VT59htM7iSHbH59NSZbtc+22Ap0Txnlx91sfeB+/A7wNZg7UxtZdhAW4y+/jimrdg=="
+ },
+ "html-minifier-terser": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/html-minifier-terser/-/html-minifier-terser-6.1.0.tgz",
+ "integrity": "sha512-YXxSlJBZTP7RS3tWnQw74ooKa6L9b9i9QYXY21eUEvhZ3u9XLfv6OnFsQq6RxkhHygsaUMvYsZRV5rU/OVNZxw==",
+ "requires": {
+ "camel-case": "^4.1.2",
+ "clean-css": "^5.2.2",
+ "commander": "^8.3.0",
+ "he": "^1.2.0",
+ "param-case": "^3.0.4",
+ "relateurl": "^0.2.7",
+ "terser": "^5.10.0"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "8.3.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-8.3.0.tgz",
+ "integrity": "sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww=="
+ }
+ }
+ },
+ "html-tags": {
+ "version": "3.3.1",
+ "resolved": "https://registry.npmjs.org/html-tags/-/html-tags-3.3.1.tgz",
+ "integrity": "sha512-ztqyC3kLto0e9WbNp0aeP+M3kTt+nbaIveGmUxAtZa+8iFgKLUOD4YKM5j+f3QD89bra7UeumolZHKuOXnTmeQ=="
+ },
+ "html-void-elements": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/html-void-elements/-/html-void-elements-1.0.5.tgz",
+ "integrity": "sha512-uE/TxKuyNIcx44cIWnjr/rfIATDH7ZaOMmstu0CwhFG1Dunhlp4OC6/NMbhiwoq5BpW0ubi303qnEk/PZj614w=="
+ },
+ "html-webpack-plugin": {
+ "version": "5.5.1",
+ "resolved": "https://registry.npmjs.org/html-webpack-plugin/-/html-webpack-plugin-5.5.1.tgz",
+ "integrity": "sha512-cTUzZ1+NqjGEKjmVgZKLMdiFg3m9MdRXkZW2OEe69WYVi5ONLMmlnSZdXzGGMOq0C8jGDrL6EWyEDDUioHO/pA==",
+ "requires": {
+ "@types/html-minifier-terser": "^6.0.0",
+ "html-minifier-terser": "^6.0.2",
+ "lodash": "^4.17.21",
+ "pretty-error": "^4.0.0",
+ "tapable": "^2.0.0"
+ }
+ },
+ "htmlparser2": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-6.1.0.tgz",
+ "integrity": "sha512-gyyPk6rgonLFEDGoeRgQNaEUvdJ4ktTmmUh/h2t7s+M8oPpIPxgNACWa+6ESR57kXstwqPiCut0V8NRpcwgU7A==",
+ "requires": {
+ "domelementtype": "^2.0.1",
+ "domhandler": "^4.0.0",
+ "domutils": "^2.5.2",
+ "entities": "^2.0.0"
+ },
+ "dependencies": {
+ "entities": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/entities/-/entities-2.2.0.tgz",
+ "integrity": "sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A=="
+ }
+ }
+ },
+ "http-cache-semantics": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz",
+ "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ=="
+ },
+ "http-deceiver": {
+ "version": "1.2.7",
+ "resolved": "https://registry.npmjs.org/http-deceiver/-/http-deceiver-1.2.7.tgz",
+ "integrity": "sha512-LmpOGxTfbpgtGVxJrj5k7asXHCgNZp5nLfp+hWc8QQRqtb7fUy6kRY3BO1h9ddF6yIPYUARgxGOwB42DnxIaNw=="
+ },
+ "http-errors": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz",
+ "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==",
+ "requires": {
+ "depd": "2.0.0",
+ "inherits": "2.0.4",
+ "setprototypeof": "1.2.0",
+ "statuses": "2.0.1",
+ "toidentifier": "1.0.1"
+ }
+ },
+ "http-parser-js": {
+ "version": "0.5.8",
+ "resolved": "https://registry.npmjs.org/http-parser-js/-/http-parser-js-0.5.8.tgz",
+ "integrity": "sha512-SGeBX54F94Wgu5RH3X5jsDtf4eHyRogWX1XGT3b4HuW3tQPM4AaBzoUji/4AAJNXCEOWZ5O0DgZmJw1947gD5Q=="
+ },
+ "http-proxy": {
+ "version": "1.18.1",
+ "resolved": "https://registry.npmjs.org/http-proxy/-/http-proxy-1.18.1.tgz",
+ "integrity": "sha512-7mz/721AbnJwIVbnaSv1Cz3Am0ZLT/UBwkC92VlxhXv/k/BBQfM2fXElQNC27BVGr0uwUpplYPQM9LnaBMR5NQ==",
+ "requires": {
+ "eventemitter3": "^4.0.0",
+ "follow-redirects": "^1.0.0",
+ "requires-port": "^1.0.0"
+ }
+ },
+ "http-proxy-middleware": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/http-proxy-middleware/-/http-proxy-middleware-2.0.6.tgz",
+ "integrity": "sha512-ya/UeJ6HVBYxrgYotAZo1KvPWlgB48kUJLDePFeneHsVujFaW5WNj2NgWCAE//B1Dl02BIfYlpNgBy8Kf8Rjmw==",
+ "requires": {
+ "@types/http-proxy": "^1.17.8",
+ "http-proxy": "^1.18.1",
+ "is-glob": "^4.0.1",
+ "is-plain-obj": "^3.0.0",
+ "micromatch": "^4.0.2"
+ },
+ "dependencies": {
+ "is-plain-obj": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-3.0.0.tgz",
+ "integrity": "sha512-gwsOE28k+23GP1B6vFl1oVh/WOzmawBrKwo5Ev6wMKzPkaXaCDIQKzLnvsA42DRlbVTWorkgTKIviAKCWkfUwA=="
+ }
+ }
+ },
+ "human-signals": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-2.1.0.tgz",
+ "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw=="
+ },
+ "iconv-lite": {
+ "version": "0.4.24",
+ "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.4.24.tgz",
+ "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==",
+ "requires": {
+ "safer-buffer": ">= 2.1.2 < 3"
+ }
+ },
+ "icss-utils": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/icss-utils/-/icss-utils-5.1.0.tgz",
+ "integrity": "sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==",
+ "requires": {}
+ },
+ "ignore": {
+ "version": "5.2.4",
+ "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz",
+ "integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ=="
+ },
+ "image-size": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz",
+ "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==",
+ "requires": {
+ "queue": "6.0.2"
+ }
+ },
+ "immer": {
+ "version": "9.0.21",
+ "resolved": "https://registry.npmjs.org/immer/-/immer-9.0.21.tgz",
+ "integrity": "sha512-bc4NBHqOqSfRW7POMkHd51LvClaeMXpm8dx0e8oE2GORbq5aRK7Bxl4FyzVLdGtLmvLKL7BTDBG5ACQm4HWjTA=="
+ },
+ "import-fresh": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz",
+ "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==",
+ "requires": {
+ "parent-module": "^1.0.0",
+ "resolve-from": "^4.0.0"
+ }
+ },
+ "import-lazy": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/import-lazy/-/import-lazy-2.1.0.tgz",
+ "integrity": "sha512-m7ZEHgtw69qOGw+jwxXkHlrlIPdTGkyh66zXZ1ajZbxkDBNjSY/LGbmjc7h0s2ELsUDTAhFr55TrPSSqJGPG0A=="
+ },
+ "imurmurhash": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz",
+ "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA=="
+ },
+ "indent-string": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-4.0.0.tgz",
+ "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg=="
+ },
+ "inflight": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
+ "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
+ "requires": {
+ "once": "^1.3.0",
+ "wrappy": "1"
+ }
+ },
+ "inherits": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
+ "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
+ },
+ "ini": {
+ "version": "1.3.8",
+ "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz",
+ "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew=="
+ },
+ "inline-style-parser": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/inline-style-parser/-/inline-style-parser-0.1.1.tgz",
+ "integrity": "sha512-7NXolsK4CAS5+xvdj5OMMbI962hU/wvwoxk+LWR9Ek9bVtyuuYScDN6eS0rUm6TxApFpw7CX1o4uJzcd4AyD3Q=="
+ },
+ "interpret": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/interpret/-/interpret-1.4.0.tgz",
+ "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA=="
+ },
+ "invariant": {
+ "version": "2.2.4",
+ "resolved": "https://registry.npmjs.org/invariant/-/invariant-2.2.4.tgz",
+ "integrity": "sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==",
+ "requires": {
+ "loose-envify": "^1.0.0"
+ }
+ },
+ "ipaddr.js": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-2.1.0.tgz",
+ "integrity": "sha512-LlbxQ7xKzfBusov6UMi4MFpEg0m+mAm9xyNGEduwXMEDuf4WfzB/RZwMVYEd7IKGvh4IUkEXYxtAVu9T3OelJQ=="
+ },
+ "is-absolute": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-absolute/-/is-absolute-1.0.0.tgz",
+ "integrity": "sha512-dOWoqflvcydARa360Gvv18DZ/gRuHKi2NU/wU5X1ZFzdYfH29nkiNZsF3mp4OJ3H4yo9Mx8A/uAGNzpzPN3yBA==",
+ "requires": {
+ "is-relative": "^1.0.0",
+ "is-windows": "^1.0.1"
+ }
+ },
+ "is-alphabetical": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz",
+ "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg=="
+ },
+ "is-alphanumerical": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz",
+ "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==",
+ "requires": {
+ "is-alphabetical": "^1.0.0",
+ "is-decimal": "^1.0.0"
+ }
+ },
+ "is-arrayish": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz",
+ "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg=="
+ },
+ "is-binary-path": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
+ "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==",
+ "requires": {
+ "binary-extensions": "^2.0.0"
+ }
+ },
+ "is-buffer": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/is-buffer/-/is-buffer-2.0.5.tgz",
+ "integrity": "sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ=="
+ },
+ "is-ci": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz",
+ "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==",
+ "requires": {
+ "ci-info": "^2.0.0"
+ },
+ "dependencies": {
+ "ci-info": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz",
+ "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ=="
+ }
+ }
+ },
+ "is-core-module": {
+ "version": "2.12.1",
+ "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.12.1.tgz",
+ "integrity": "sha512-Q4ZuBAe2FUsKtyQJoQHlvP8OvBERxO3jEmy1I7hcRXcJBGGHFh/aJBswbXuS9sgrDH2QUO8ilkwNPHvHMd8clg==",
+ "requires": {
+ "has": "^1.0.3"
+ }
+ },
+ "is-decimal": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz",
+ "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw=="
+ },
+ "is-docker": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
+ "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ=="
+ },
+ "is-extendable": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/is-extendable/-/is-extendable-0.1.1.tgz",
+ "integrity": "sha512-5BMULNob1vgFX6EjQw5izWDxrecWK9AM72rugNr0TFldMOi0fj6Jk+zeKIt0xGj4cEfQIJth4w3OKWOJ4f+AFw=="
+ },
+ "is-extglob": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz",
+ "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ=="
+ },
+ "is-fullwidth-code-point": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
+ "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg=="
+ },
+ "is-glob": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz",
+ "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==",
+ "requires": {
+ "is-extglob": "^2.1.1"
+ }
+ },
+ "is-hexadecimal": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz",
+ "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw=="
+ },
+ "is-installed-globally": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.4.0.tgz",
+ "integrity": "sha512-iwGqO3J21aaSkC7jWnHP/difazwS7SFeIqxv6wEtLU8Y5KlzFTjyqcSIT0d8s4+dDhKytsk9PJZ2BkS5eZwQRQ==",
+ "requires": {
+ "global-dirs": "^3.0.0",
+ "is-path-inside": "^3.0.2"
+ }
+ },
+ "is-npm": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/is-npm/-/is-npm-5.0.0.tgz",
+ "integrity": "sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA=="
+ },
+ "is-number": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
+ "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng=="
+ },
+ "is-obj": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz",
+ "integrity": "sha512-l4RyHgRqGN4Y3+9JHVrNqO+tN0rV5My76uW5/nuO4K1b6vw5G8d/cmFjP9tRfEsdhZNt0IFdZuK/c2Vr4Nb+Qg=="
+ },
+ "is-path-cwd": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/is-path-cwd/-/is-path-cwd-2.2.0.tgz",
+ "integrity": "sha512-w942bTcih8fdJPJmQHFzkS76NEP8Kzzvmw92cXsazb8intwLqPibPPdXf4ANdKV3rYMuuQYGIWtvz9JilB3NFQ=="
+ },
+ "is-path-inside": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz",
+ "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ=="
+ },
+ "is-plain-obj": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-plain-obj/-/is-plain-obj-2.1.0.tgz",
+ "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA=="
+ },
+ "is-plain-object": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-2.0.4.tgz",
+ "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==",
+ "requires": {
+ "isobject": "^3.0.1"
+ }
+ },
+ "is-regexp": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-regexp/-/is-regexp-1.0.0.tgz",
+ "integrity": "sha512-7zjFAPO4/gwyQAAgRRmqeEeyIICSdmCqa3tsVHMdBzaXXRiqopZL4Cyghg/XulGWrtABTpbnYYzzIRffLkP4oA=="
+ },
+ "is-relative": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-relative/-/is-relative-1.0.0.tgz",
+ "integrity": "sha512-Kw/ReK0iqwKeu0MITLFuj0jbPAmEiOsIwyIXvvbfa6QfmN9pkD1M+8pdk7Rl/dTKbH34/XBFMbgD4iMJhLQbGA==",
+ "requires": {
+ "is-unc-path": "^1.0.0"
+ }
+ },
+ "is-root": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/is-root/-/is-root-2.1.0.tgz",
+ "integrity": "sha512-AGOriNp96vNBd3HtU+RzFEc75FfR5ymiYv8E553I71SCeXBiMsVDUtdio1OEFvrPyLIQ9tVR5RxXIFe5PUFjMg=="
+ },
+ "is-stream": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.1.tgz",
+ "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg=="
+ },
+ "is-typedarray": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
+ "integrity": "sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA=="
+ },
+ "is-unc-path": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/is-unc-path/-/is-unc-path-1.0.0.tgz",
+ "integrity": "sha512-mrGpVd0fs7WWLfVsStvgF6iEJnbjDFZh9/emhRDcGWTduTfNHd9CHeUwH3gYIjdbwo4On6hunkztwOaAw0yllQ==",
+ "requires": {
+ "unc-path-regex": "^0.1.2"
+ }
+ },
+ "is-whitespace-character": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-whitespace-character/-/is-whitespace-character-1.0.4.tgz",
+ "integrity": "sha512-SDweEzfIZM0SJV0EUga669UTKlmL0Pq8Lno0QDQsPnvECB3IM2aP0gdx5TrU0A01MAPfViaZiI2V1QMZLaKK5w=="
+ },
+ "is-windows": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/is-windows/-/is-windows-1.0.2.tgz",
+ "integrity": "sha512-eXK1UInq2bPmjyX6e3VHIzMLobc4J94i4AWn+Hpq3OU5KkrRC96OAcR3PRJ/pGu6m8TRnBHP9dkXQVsT/COVIA=="
+ },
+ "is-word-character": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/is-word-character/-/is-word-character-1.0.4.tgz",
+ "integrity": "sha512-5SMO8RVennx3nZrqtKwCGyyetPE9VDba5ugvKLaD4KopPG5kR4mQ7tNt/r7feL5yt5h3lpuBbIUmCOG2eSzXHA=="
+ },
+ "is-wsl": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
+ "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
+ "requires": {
+ "is-docker": "^2.0.0"
+ }
+ },
+ "is-yarn-global": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/is-yarn-global/-/is-yarn-global-0.3.0.tgz",
+ "integrity": "sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw=="
+ },
+ "isarray": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+ "integrity": "sha512-D2S+3GLxWH+uhrNEcoh/fnmYeP8E8/zHl644d/jdA0g2uyXvy3sb0qxotE+ne0LtccHknQzWwZEzhak7oJ0COQ=="
+ },
+ "isexe": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
+ "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="
+ },
+ "isobject": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/isobject/-/isobject-3.0.1.tgz",
+ "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg=="
+ },
+ "jest-util": {
+ "version": "29.5.0",
+ "resolved": "https://registry.npmjs.org/jest-util/-/jest-util-29.5.0.tgz",
+ "integrity": "sha512-RYMgG/MTadOr5t8KdhejfvUU82MxsCu5MF6KuDUHl+NuwzUt+Sm6jJWxTJVrDR1j5M/gJVCPKQEpWXY+yIQ6lQ==",
+ "requires": {
+ "@jest/types": "^29.5.0",
+ "@types/node": "*",
+ "chalk": "^4.0.0",
+ "ci-info": "^3.2.0",
+ "graceful-fs": "^4.2.9",
+ "picomatch": "^2.2.3"
+ }
+ },
+ "jest-worker": {
+ "version": "29.5.0",
+ "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-29.5.0.tgz",
+ "integrity": "sha512-NcrQnevGoSp4b5kg+akIpthoAFHxPBcb5P6mYPY0fUNT+sSvmtu6jlkEle3anczUKIKEbMxFimk9oTP/tpIPgA==",
+ "requires": {
+ "@types/node": "*",
+ "jest-util": "^29.5.0",
+ "merge-stream": "^2.0.0",
+ "supports-color": "^8.0.0"
+ },
+ "dependencies": {
+ "supports-color": {
+ "version": "8.1.1",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
+ "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ }
+ }
+ },
+ "jiti": {
+ "version": "1.18.2",
+ "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.18.2.tgz",
+ "integrity": "sha512-QAdOptna2NYiSSpv0O/BwoHBSmz4YhpzJHyi+fnMRTXFjp7B8i/YG5Z8IfusxB1ufjcD2Sre1F3R+nX3fvy7gg=="
+ },
+ "joi": {
+ "version": "17.9.2",
+ "resolved": "https://registry.npmjs.org/joi/-/joi-17.9.2.tgz",
+ "integrity": "sha512-Itk/r+V4Dx0V3c7RLFdRh12IOjySm2/WGPMubBT92cQvRfYZhPM2W0hZlctjj72iES8jsRCwp7S/cRmWBnJ4nw==",
+ "requires": {
+ "@hapi/hoek": "^9.0.0",
+ "@hapi/topo": "^5.0.0",
+ "@sideway/address": "^4.1.3",
+ "@sideway/formula": "^3.0.1",
+ "@sideway/pinpoint": "^2.0.0"
+ }
+ },
+ "js-tokens": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
+ "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ=="
+ },
+ "js-yaml": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz",
+ "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==",
+ "requires": {
+ "argparse": "^2.0.1"
+ }
+ },
+ "jsesc": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-2.5.2.tgz",
+ "integrity": "sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA=="
+ },
+ "json-buffer": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.0.tgz",
+ "integrity": "sha512-CuUqjv0FUZIdXkHPI8MezCnFCdaTAacej1TZYulLoAg1h/PhwkdXFN4V/gzY4g+fMBCOV2xF+rp7t2XD2ns/NQ=="
+ },
+ "json-parse-even-better-errors": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz",
+ "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w=="
+ },
+ "json-schema-traverse": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
+ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="
+ },
+ "json5": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/json5/-/json5-2.2.3.tgz",
+ "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="
+ },
+ "jsonfile": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
+ "integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==",
+ "requires": {
+ "graceful-fs": "^4.1.6"
+ }
+ },
+ "keyv": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/keyv/-/keyv-3.1.0.tgz",
+ "integrity": "sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA==",
+ "requires": {
+ "json-buffer": "3.0.0"
+ }
+ },
+ "kind-of": {
+ "version": "6.0.3",
+ "resolved": "https://registry.npmjs.org/kind-of/-/kind-of-6.0.3.tgz",
+ "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw=="
+ },
+ "kleur": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/kleur/-/kleur-3.0.3.tgz",
+ "integrity": "sha512-eTIzlVOSUR+JxdDFepEYcBMtZ9Qqdef+rnzWdRZuMbOywu5tO2w2N7rqjoANZ5k9vywhL6Br1VRjUIgTQx4E8w=="
+ },
+ "klona": {
+ "version": "2.0.6",
+ "resolved": "https://registry.npmjs.org/klona/-/klona-2.0.6.tgz",
+ "integrity": "sha512-dhG34DXATL5hSxJbIexCft8FChFXtmskoZYnoPWjXQuebWYCNkVeV3KkGegCK9CP1oswI/vQibS2GY7Em/sJJA=="
+ },
+ "latest-version": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/latest-version/-/latest-version-5.1.0.tgz",
+ "integrity": "sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA==",
+ "requires": {
+ "package-json": "^6.3.0"
+ }
+ },
+ "launch-editor": {
+ "version": "2.6.0",
+ "resolved": "https://registry.npmjs.org/launch-editor/-/launch-editor-2.6.0.tgz",
+ "integrity": "sha512-JpDCcQnyAAzZZaZ7vEiSqL690w7dAEyLao+KC96zBplnYbJS7TYNjvM3M7y3dGz+v7aIsJk3hllWuc0kWAjyRQ==",
+ "requires": {
+ "picocolors": "^1.0.0",
+ "shell-quote": "^1.7.3"
+ }
+ },
+ "leven": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz",
+ "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A=="
+ },
+ "lilconfig": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-2.1.0.tgz",
+ "integrity": "sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ=="
+ },
+ "lines-and-columns": {
+ "version": "1.2.4",
+ "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz",
+ "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg=="
+ },
+ "loader-runner": {
+ "version": "4.3.0",
+ "resolved": "https://registry.npmjs.org/loader-runner/-/loader-runner-4.3.0.tgz",
+ "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg=="
+ },
+ "loader-utils": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-2.0.4.tgz",
+ "integrity": "sha512-xXqpXoINfFhgua9xiqD8fPFHgkoq1mmmpE92WlDbm9rNRd/EbRb+Gqf908T2DMfuHjjJlksiK2RbHVOdD/MqSw==",
+ "requires": {
+ "big.js": "^5.2.2",
+ "emojis-list": "^3.0.0",
+ "json5": "^2.1.2"
+ }
+ },
+ "locate-path": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-5.0.0.tgz",
+ "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==",
+ "requires": {
+ "p-locate": "^4.1.0"
+ }
+ },
+ "lodash": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz",
+ "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg=="
+ },
+ "lodash-es": {
+ "version": "4.17.21",
+ "resolved": "https://registry.npmjs.org/lodash-es/-/lodash-es-4.17.21.tgz",
+ "integrity": "sha512-mKnC+QJ9pWVzv+C4/U3rRsHapFfHvQFoFB92e52xeyGMcX6/OlIl78je1u8vePzYZSkkogMPJ2yjxxsb89cxyw=="
+ },
+ "lodash.debounce": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz",
+ "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow=="
+ },
+ "lodash.memoize": {
+ "version": "4.1.2",
+ "resolved": "https://registry.npmjs.org/lodash.memoize/-/lodash.memoize-4.1.2.tgz",
+ "integrity": "sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag=="
+ },
+ "lodash.uniq": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/lodash.uniq/-/lodash.uniq-4.5.0.tgz",
+ "integrity": "sha512-xfBaXQd9ryd9dlSDvnvI0lvxfLJlYAZzXomUYzLKtUeOQvOP5piqAWuGtrhWeqaXK9hhoM/iyJc5AV+XfsX3HQ=="
+ },
+ "loose-envify": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/loose-envify/-/loose-envify-1.4.0.tgz",
+ "integrity": "sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==",
+ "requires": {
+ "js-tokens": "^3.0.0 || ^4.0.0"
+ }
+ },
+ "lower-case": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/lower-case/-/lower-case-2.0.2.tgz",
+ "integrity": "sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==",
+ "requires": {
+ "tslib": "^2.0.3"
+ }
+ },
+ "lowercase-keys": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-1.0.1.tgz",
+ "integrity": "sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA=="
+ },
+ "lru-cache": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-5.1.1.tgz",
+ "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==",
+ "requires": {
+ "yallist": "^3.0.2"
+ }
+ },
+ "make-dir": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.1.0.tgz",
+ "integrity": "sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw==",
+ "requires": {
+ "semver": "^6.0.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "markdown-escapes": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/markdown-escapes/-/markdown-escapes-1.0.4.tgz",
+ "integrity": "sha512-8z4efJYk43E0upd0NbVXwgSTQs6cT3T06etieCMEg7dRbzCbxUCK/GHlX8mhHRDcp+OLlHkPKsvqQTCvsRl2cg=="
+ },
+ "mdast-squeeze-paragraphs": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-squeeze-paragraphs/-/mdast-squeeze-paragraphs-4.0.0.tgz",
+ "integrity": "sha512-zxdPn69hkQ1rm4J+2Cs2j6wDEv7O17TfXTJ33tl/+JPIoEmtV9t2ZzBM5LPHE8QlHsmVD8t3vPKCyY3oH+H8MQ==",
+ "requires": {
+ "unist-util-remove": "^2.0.0"
+ }
+ },
+ "mdast-util-definitions": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-definitions/-/mdast-util-definitions-4.0.0.tgz",
+ "integrity": "sha512-k8AJ6aNnUkB7IE+5azR9h81O5EQ/cTDXtWdMq9Kk5KcEW/8ritU5CeLg/9HhOC++nALHBlaogJ5jz0Ybk3kPMQ==",
+ "requires": {
+ "unist-util-visit": "^2.0.0"
+ }
+ },
+ "mdast-util-to-hast": {
+ "version": "10.0.1",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-10.0.1.tgz",
+ "integrity": "sha512-BW3LM9SEMnjf4HXXVApZMt8gLQWVNXc3jryK0nJu/rOXPOnlkUjmdkDlmxMirpbU9ILncGFIwLH/ubnWBbcdgA==",
+ "requires": {
+ "@types/mdast": "^3.0.0",
+ "@types/unist": "^2.0.0",
+ "mdast-util-definitions": "^4.0.0",
+ "mdurl": "^1.0.0",
+ "unist-builder": "^2.0.0",
+ "unist-util-generated": "^1.0.0",
+ "unist-util-position": "^3.0.0",
+ "unist-util-visit": "^2.0.0"
+ }
+ },
+ "mdast-util-to-string": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz",
+ "integrity": "sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w=="
+ },
+ "mdn-data": {
+ "version": "2.0.14",
+ "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.14.tgz",
+ "integrity": "sha512-dn6wd0uw5GsdswPFfsgMp5NSB0/aDe6fK94YJV/AJDYXL6HVLWBsxeq7js7Ad+mU2K9LAlwpk6kN2D5mwCPVow=="
+ },
+ "mdurl": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz",
+ "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g=="
+ },
+ "media-typer": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz",
+ "integrity": "sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ=="
+ },
+ "memfs": {
+ "version": "3.5.3",
+ "resolved": "https://registry.npmjs.org/memfs/-/memfs-3.5.3.tgz",
+ "integrity": "sha512-UERzLsxzllchadvbPs5aolHh65ISpKpM+ccLbOJ8/vvpBKmAWf+la7dXFy7Mr0ySHbdHrFv5kGFCUHHe6GFEmw==",
+ "requires": {
+ "fs-monkey": "^1.0.4"
+ }
+ },
+ "merge-descriptors": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/merge-descriptors/-/merge-descriptors-1.0.3.tgz",
+ "integrity": "sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ=="
+ },
+ "merge-stream": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz",
+ "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w=="
+ },
+ "merge2": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz",
+ "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg=="
+ },
+ "methods": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/methods/-/methods-1.1.2.tgz",
+ "integrity": "sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w=="
+ },
+ "micromatch": {
+ "version": "4.0.8",
+ "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
+ "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
+ "requires": {
+ "braces": "^3.0.3",
+ "picomatch": "^2.3.1"
+ }
+ },
+ "mime": {
+ "version": "1.6.0",
+ "resolved": "https://registry.npmjs.org/mime/-/mime-1.6.0.tgz",
+ "integrity": "sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg=="
+ },
+ "mime-db": {
+ "version": "1.33.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.33.0.tgz",
+ "integrity": "sha512-BHJ/EKruNIqJf/QahvxwQZXKygOQ256myeN/Ew+THcAa5q+PjyTTMMeNQC4DZw5AwfvelsUrA6B67NKMqXDbzQ=="
+ },
+ "mime-types": {
+ "version": "2.1.18",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.18.tgz",
+ "integrity": "sha512-lc/aahn+t4/SWV/qcmumYjymLsWfN3ELhpmVuUFjgsORruuZPVSwAQryq+HHGvO/SI2KVX26bx+En+zhM8g8hQ==",
+ "requires": {
+ "mime-db": "~1.33.0"
+ }
+ },
+ "mimic-fn": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz",
+ "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg=="
+ },
+ "mimic-response": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz",
+ "integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ=="
+ },
+ "mini-css-extract-plugin": {
+ "version": "2.7.6",
+ "resolved": "https://registry.npmjs.org/mini-css-extract-plugin/-/mini-css-extract-plugin-2.7.6.tgz",
+ "integrity": "sha512-Qk7HcgaPkGG6eD77mLvZS1nmxlao3j+9PkrT9Uc7HAE1id3F41+DdBRYRYkbyfNRGzm8/YWtzhw7nVPmwhqTQw==",
+ "requires": {
+ "schema-utils": "^4.0.0"
+ },
+ "dependencies": {
+ "ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "ajv-keywords": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
+ "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
+ "requires": {
+ "fast-deep-equal": "^3.1.3"
+ }
+ },
+ "json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "schema-utils": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz",
+ "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==",
+ "requires": {
+ "@types/json-schema": "^7.0.9",
+ "ajv": "^8.9.0",
+ "ajv-formats": "^2.1.1",
+ "ajv-keywords": "^5.1.0"
+ }
+ }
+ }
+ },
+ "minimalistic-assert": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/minimalistic-assert/-/minimalistic-assert-1.0.1.tgz",
+ "integrity": "sha512-UtJcAD4yEaGtjPezWuO9wC4nwUnVH/8/Im3yEHQP4b67cXlD/Qr9hdITCU1xDbSEXg2XKNaP8jsReV7vQd00/A=="
+ },
+ "minimatch": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
+ "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "requires": {
+ "brace-expansion": "^1.1.7"
+ }
+ },
+ "minimist": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
+ "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="
+ },
+ "mrmime": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/mrmime/-/mrmime-1.0.1.tgz",
+ "integrity": "sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw=="
+ },
+ "ms": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz",
+ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w=="
+ },
+ "multicast-dns": {
+ "version": "7.2.5",
+ "resolved": "https://registry.npmjs.org/multicast-dns/-/multicast-dns-7.2.5.tgz",
+ "integrity": "sha512-2eznPJP8z2BFLX50tf0LuODrpINqP1RVIm/CObbTcBRITQgmC/TjcREF1NeTBzIcR5XO/ukWo+YHOjBbFwIupg==",
+ "requires": {
+ "dns-packet": "^5.2.2",
+ "thunky": "^1.0.2"
+ }
+ },
+ "nanoclone": {
+ "version": "0.2.1",
+ "resolved": "https://registry.npmjs.org/nanoclone/-/nanoclone-0.2.1.tgz",
+ "integrity": "sha512-wynEP02LmIbLpcYw8uBKpcfF6dmg2vcpKqxeH5UcoKEYdExslsdUA4ugFauuaeYdTB76ez6gJW8XAZ6CgkXYxA=="
+ },
+ "nanoid": {
+ "version": "3.3.7",
+ "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz",
+ "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g=="
+ },
+ "negotiator": {
+ "version": "0.6.3",
+ "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz",
+ "integrity": "sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg=="
+ },
+ "neo-async": {
+ "version": "2.6.2",
+ "resolved": "https://registry.npmjs.org/neo-async/-/neo-async-2.6.2.tgz",
+ "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw=="
+ },
+ "no-case": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/no-case/-/no-case-3.0.4.tgz",
+ "integrity": "sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==",
+ "requires": {
+ "lower-case": "^2.0.2",
+ "tslib": "^2.0.3"
+ }
+ },
+ "node-emoji": {
+ "version": "1.11.0",
+ "resolved": "https://registry.npmjs.org/node-emoji/-/node-emoji-1.11.0.tgz",
+ "integrity": "sha512-wo2DpQkQp7Sjm2A0cq+sN7EHKO6Sl0ctXeBdFZrL9T9+UywORbufTcTZxom8YqpLQt/FqNMUkOpkZrJVYSKD3A==",
+ "requires": {
+ "lodash": "^4.17.21"
+ }
+ },
+ "node-forge": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/node-forge/-/node-forge-1.3.1.tgz",
+ "integrity": "sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA=="
+ },
+ "node-releases": {
+ "version": "2.0.18",
+ "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.18.tgz",
+ "integrity": "sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g=="
+ },
+ "normalize-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz",
+ "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA=="
+ },
+ "normalize-range": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/normalize-range/-/normalize-range-0.1.2.tgz",
+ "integrity": "sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA=="
+ },
+ "normalize-url": {
+ "version": "6.1.0",
+ "resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz",
+ "integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A=="
+ },
+ "npm-run-path": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz",
+ "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==",
+ "requires": {
+ "path-key": "^3.0.0"
+ }
+ },
+ "nth-check": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz",
+ "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==",
+ "requires": {
+ "boolbase": "^1.0.0"
+ }
+ },
+ "object-assign": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz",
+ "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg=="
+ },
+ "object-inspect": {
+ "version": "1.13.2",
+ "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.2.tgz",
+ "integrity": "sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g=="
+ },
+ "object-keys": {
+ "version": "1.1.1",
+ "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
+ "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA=="
+ },
+ "object.assign": {
+ "version": "4.1.4",
+ "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.4.tgz",
+ "integrity": "sha512-1mxKf0e58bvyjSCtKYY4sRe9itRk3PJpquJOjeIkz885CczcI4IvJJDLPS72oowuSh+pBxUFROpX+TU++hxhZQ==",
+ "requires": {
+ "call-bind": "^1.0.2",
+ "define-properties": "^1.1.4",
+ "has-symbols": "^1.0.3",
+ "object-keys": "^1.1.1"
+ }
+ },
+ "obuf": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/obuf/-/obuf-1.1.2.tgz",
+ "integrity": "sha512-PX1wu0AmAdPqOL1mWhqmlOd8kOIZQwGZw6rh7uby9fTc5lhaOWFLX3I6R1hrF9k3zUY40e6igsLGkDXK92LJNg=="
+ },
+ "on-finished": {
+ "version": "2.4.1",
+ "resolved": "https://registry.npmjs.org/on-finished/-/on-finished-2.4.1.tgz",
+ "integrity": "sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==",
+ "requires": {
+ "ee-first": "1.1.1"
+ }
+ },
+ "on-headers": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz",
+ "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA=="
+ },
+ "once": {
+ "version": "1.4.0",
+ "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
+ "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
+ "requires": {
+ "wrappy": "1"
+ }
+ },
+ "onetime": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz",
+ "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==",
+ "requires": {
+ "mimic-fn": "^2.1.0"
+ }
+ },
+ "open": {
+ "version": "8.4.2",
+ "resolved": "https://registry.npmjs.org/open/-/open-8.4.2.tgz",
+ "integrity": "sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==",
+ "requires": {
+ "define-lazy-prop": "^2.0.0",
+ "is-docker": "^2.1.1",
+ "is-wsl": "^2.2.0"
+ }
+ },
+ "opener": {
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/opener/-/opener-1.5.2.tgz",
+ "integrity": "sha512-ur5UIdyw5Y7yEj9wLzhqXiy6GZ3Mwx0yGI+5sMn2r0N0v3cKJvUmFH5yPP+WXh9e0xfyzyJX95D8l088DNFj7A=="
+ },
+ "p-cancelable": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-1.1.0.tgz",
+ "integrity": "sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw=="
+ },
+ "p-limit": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz",
+ "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==",
+ "requires": {
+ "p-try": "^2.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-4.1.0.tgz",
+ "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==",
+ "requires": {
+ "p-limit": "^2.2.0"
+ }
+ },
+ "p-map": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/p-map/-/p-map-4.0.0.tgz",
+ "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==",
+ "requires": {
+ "aggregate-error": "^3.0.0"
+ }
+ },
+ "p-retry": {
+ "version": "4.6.2",
+ "resolved": "https://registry.npmjs.org/p-retry/-/p-retry-4.6.2.tgz",
+ "integrity": "sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==",
+ "requires": {
+ "@types/retry": "0.12.0",
+ "retry": "^0.13.1"
+ }
+ },
+ "p-try": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz",
+ "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ=="
+ },
+ "package-json": {
+ "version": "6.5.0",
+ "resolved": "https://registry.npmjs.org/package-json/-/package-json-6.5.0.tgz",
+ "integrity": "sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ==",
+ "requires": {
+ "got": "^9.6.0",
+ "registry-auth-token": "^4.0.0",
+ "registry-url": "^5.0.0",
+ "semver": "^6.2.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "param-case": {
+ "version": "3.0.4",
+ "resolved": "https://registry.npmjs.org/param-case/-/param-case-3.0.4.tgz",
+ "integrity": "sha512-RXlj7zCYokReqWpOPH9oYivUzLYZ5vAPIfEmCTNViosC78F8F0H9y7T7gG2M39ymgutxF5gcFEsyZQSph9Bp3A==",
+ "requires": {
+ "dot-case": "^3.0.4",
+ "tslib": "^2.0.3"
+ }
+ },
+ "parent-module": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz",
+ "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==",
+ "requires": {
+ "callsites": "^3.0.0"
+ }
+ },
+ "parse-entities": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz",
+ "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==",
+ "requires": {
+ "character-entities": "^1.0.0",
+ "character-entities-legacy": "^1.0.0",
+ "character-reference-invalid": "^1.0.0",
+ "is-alphanumerical": "^1.0.0",
+ "is-decimal": "^1.0.0",
+ "is-hexadecimal": "^1.0.0"
+ }
+ },
+ "parse-json": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz",
+ "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==",
+ "requires": {
+ "@babel/code-frame": "^7.0.0",
+ "error-ex": "^1.3.1",
+ "json-parse-even-better-errors": "^2.3.0",
+ "lines-and-columns": "^1.1.6"
+ }
+ },
+ "parse5": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/parse5/-/parse5-6.0.1.tgz",
+ "integrity": "sha512-Ofn/CTFzRGTTxwpNEs9PP93gXShHcTq255nzRYSKe8AkVpZY7e1fpmTfOyoIvjP5HG7Z2ZM7VS9PPhQGW2pOpw=="
+ },
+ "parseurl": {
+ "version": "1.3.3",
+ "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz",
+ "integrity": "sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ=="
+ },
+ "pascal-case": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/pascal-case/-/pascal-case-3.1.2.tgz",
+ "integrity": "sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==",
+ "requires": {
+ "no-case": "^3.0.4",
+ "tslib": "^2.0.3"
+ }
+ },
+ "path": {
+ "version": "0.12.7",
+ "resolved": "https://registry.npmjs.org/path/-/path-0.12.7.tgz",
+ "integrity": "sha512-aXXC6s+1w7otVF9UletFkFcDsJeO7lSZBPUQhtb5O0xJe8LtYhj/GxldoL09bBj9+ZmE2hNoHqQSFMN5fikh4Q==",
+ "requires": {
+ "process": "^0.11.1",
+ "util": "^0.10.3"
+ }
+ },
+ "path-exists": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz",
+ "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w=="
+ },
+ "path-is-absolute": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
+ "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg=="
+ },
+ "path-is-inside": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/path-is-inside/-/path-is-inside-1.0.2.tgz",
+ "integrity": "sha512-DUWJr3+ULp4zXmol/SZkFf3JGsS9/SIv+Y3Rt93/UjPpDpklB5f1er4O3POIbUuUJ3FXgqte2Q7SrU6zAqwk8w=="
+ },
+ "path-key": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
+ "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q=="
+ },
+ "path-parse": {
+ "version": "1.0.7",
+ "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz",
+ "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw=="
+ },
+ "path-to-regexp": {
+ "version": "1.9.0",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-1.9.0.tgz",
+ "integrity": "sha512-xIp7/apCFJuUHdDLWe8O1HIkb0kQrOMb/0u6FXQjemHn/ii5LrIzU6bdECnsiTF/GjZkMEKg1xdiZwNqDYlZ6g==",
+ "requires": {
+ "isarray": "0.0.1"
+ }
+ },
+ "path-type": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz",
+ "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw=="
+ },
+ "picocolors": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.0.tgz",
+ "integrity": "sha512-TQ92mBOW0l3LeMeyLV6mzy/kWr8lkd/hp3mTg7wYK7zJhuBStmGMBG0BdeDZS/dZx1IukaX6Bk11zcln25o1Aw=="
+ },
+ "picomatch": {
+ "version": "2.3.1",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
+ "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA=="
+ },
+ "pkg-dir": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/pkg-dir/-/pkg-dir-4.2.0.tgz",
+ "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==",
+ "requires": {
+ "find-up": "^4.0.0"
+ }
+ },
+ "pkg-up": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/pkg-up/-/pkg-up-3.1.0.tgz",
+ "integrity": "sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==",
+ "requires": {
+ "find-up": "^3.0.0"
+ },
+ "dependencies": {
+ "find-up": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz",
+ "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==",
+ "requires": {
+ "locate-path": "^3.0.0"
+ }
+ },
+ "locate-path": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz",
+ "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==",
+ "requires": {
+ "p-locate": "^3.0.0",
+ "path-exists": "^3.0.0"
+ }
+ },
+ "p-locate": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz",
+ "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==",
+ "requires": {
+ "p-limit": "^2.0.0"
+ }
+ },
+ "path-exists": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz",
+ "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ=="
+ }
+ }
+ },
+ "postcss": {
+ "version": "8.4.45",
+ "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.45.tgz",
+ "integrity": "sha512-7KTLTdzdZZYscUc65XmjFiB73vBhBfbPztCYdUNvlaso9PrzjzcmjqBPR0lNGkcVlcO4BjiO5rK/qNz+XAen1Q==",
+ "requires": {
+ "nanoid": "^3.3.7",
+ "picocolors": "^1.0.1",
+ "source-map-js": "^1.2.0"
+ }
+ },
+ "postcss-calc": {
+ "version": "8.2.4",
+ "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-8.2.4.tgz",
+ "integrity": "sha512-SmWMSJmB8MRnnULldx0lQIyhSNvuDl9HfrZkaqqE/WHAhToYsAvDq+yAsA/kIyINDszOp3Rh0GFoNuH5Ypsm3Q==",
+ "requires": {
+ "postcss-selector-parser": "^6.0.9",
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-colormin": {
+ "version": "5.3.1",
+ "resolved": "https://registry.npmjs.org/postcss-colormin/-/postcss-colormin-5.3.1.tgz",
+ "integrity": "sha512-UsWQG0AqTFQmpBegeLLc1+c3jIqBNB0zlDGRWR+dQ3pRKJL1oeMzyqmH3o2PIfn9MBdNrVPWhDbT769LxCTLJQ==",
+ "requires": {
+ "browserslist": "^4.21.4",
+ "caniuse-api": "^3.0.0",
+ "colord": "^2.9.1",
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-convert-values": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/postcss-convert-values/-/postcss-convert-values-5.1.3.tgz",
+ "integrity": "sha512-82pC1xkJZtcJEfiLw6UXnXVXScgtBrjlO5CBmuDQc+dlb88ZYheFsjTn40+zBVi3DkfF7iezO0nJUPLcJK3pvA==",
+ "requires": {
+ "browserslist": "^4.21.4",
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-discard-comments": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-discard-comments/-/postcss-discard-comments-5.1.2.tgz",
+ "integrity": "sha512-+L8208OVbHVF2UQf1iDmRcbdjJkuBF6IS29yBDSiWUIzpYaAhtNl6JYnYm12FnkeCwQqF5LeklOu6rAqgfBZqQ==",
+ "requires": {}
+ },
+ "postcss-discard-duplicates": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-discard-duplicates/-/postcss-discard-duplicates-5.1.0.tgz",
+ "integrity": "sha512-zmX3IoSI2aoenxHV6C7plngHWWhUOV3sP1T8y2ifzxzbtnuhk1EdPwm0S1bIUNaJ2eNbWeGLEwzw8huPD67aQw==",
+ "requires": {}
+ },
+ "postcss-discard-empty": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-discard-empty/-/postcss-discard-empty-5.1.1.tgz",
+ "integrity": "sha512-zPz4WljiSuLWsI0ir4Mcnr4qQQ5e1Ukc3i7UfE2XcrwKK2LIPIqE5jxMRxO6GbI3cv//ztXDsXwEWT3BHOGh3A==",
+ "requires": {}
+ },
+ "postcss-discard-overridden": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-discard-overridden/-/postcss-discard-overridden-5.1.0.tgz",
+ "integrity": "sha512-21nOL7RqWR1kasIVdKs8HNqQJhFxLsyRfAnUDm4Fe4t4mCWL9OJiHvlHPjcd8zc5Myu89b/7wZDnOSjFgeWRtw==",
+ "requires": {}
+ },
+ "postcss-discard-unused": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-discard-unused/-/postcss-discard-unused-5.1.0.tgz",
+ "integrity": "sha512-KwLWymI9hbwXmJa0dkrzpRbSJEh0vVUd7r8t0yOGPcfKzyJJxFM8kLyC5Ev9avji6nY95pOp1W6HqIrfT+0VGw==",
+ "requires": {
+ "postcss-selector-parser": "^6.0.5"
+ }
+ },
+ "postcss-loader": {
+ "version": "7.3.2",
+ "resolved": "https://registry.npmjs.org/postcss-loader/-/postcss-loader-7.3.2.tgz",
+ "integrity": "sha512-c7qDlXErX6n0VT+LUsW+nwefVtTu3ORtVvK8EXuUIDcxo+b/euYqpuHlJAvePb0Af5e8uMjR/13e0lTuYifaig==",
+ "requires": {
+ "cosmiconfig": "^8.1.3",
+ "jiti": "^1.18.2",
+ "klona": "^2.0.6",
+ "semver": "^7.3.8"
+ },
+ "dependencies": {
+ "cosmiconfig": {
+ "version": "8.2.0",
+ "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-8.2.0.tgz",
+ "integrity": "sha512-3rTMnFJA1tCOPwRxtgF4wd7Ab2qvDbL8jX+3smjIbS4HlZBagTlpERbdN7iAbWlrfxE3M8c27kTwTawQ7st+OQ==",
+ "requires": {
+ "import-fresh": "^3.2.1",
+ "js-yaml": "^4.1.0",
+ "parse-json": "^5.0.0",
+ "path-type": "^4.0.0"
+ }
+ }
+ }
+ },
+ "postcss-merge-idents": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-merge-idents/-/postcss-merge-idents-5.1.1.tgz",
+ "integrity": "sha512-pCijL1TREiCoog5nQp7wUe+TUonA2tC2sQ54UGeMmryK3UFGIYKqDyjnqd6RcuI4znFn9hWSLNN8xKE/vWcUQw==",
+ "requires": {
+ "cssnano-utils": "^3.1.0",
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-merge-longhand": {
+ "version": "5.1.7",
+ "resolved": "https://registry.npmjs.org/postcss-merge-longhand/-/postcss-merge-longhand-5.1.7.tgz",
+ "integrity": "sha512-YCI9gZB+PLNskrK0BB3/2OzPnGhPkBEwmwhfYk1ilBHYVAZB7/tkTHFBAnCrvBBOmeYyMYw3DMjT55SyxMBzjQ==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0",
+ "stylehacks": "^5.1.1"
+ }
+ },
+ "postcss-merge-rules": {
+ "version": "5.1.4",
+ "resolved": "https://registry.npmjs.org/postcss-merge-rules/-/postcss-merge-rules-5.1.4.tgz",
+ "integrity": "sha512-0R2IuYpgU93y9lhVbO/OylTtKMVcHb67zjWIfCiKR9rWL3GUk1677LAqD/BcHizukdZEjT8Ru3oHRoAYoJy44g==",
+ "requires": {
+ "browserslist": "^4.21.4",
+ "caniuse-api": "^3.0.0",
+ "cssnano-utils": "^3.1.0",
+ "postcss-selector-parser": "^6.0.5"
+ }
+ },
+ "postcss-minify-font-values": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-minify-font-values/-/postcss-minify-font-values-5.1.0.tgz",
+ "integrity": "sha512-el3mYTgx13ZAPPirSVsHqFzl+BBBDrXvbySvPGFnQcTI4iNslrPaFq4muTkLZmKlGk4gyFAYUBMH30+HurREyA==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-minify-gradients": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-minify-gradients/-/postcss-minify-gradients-5.1.1.tgz",
+ "integrity": "sha512-VGvXMTpCEo4qHTNSa9A0a3D+dxGFZCYwR6Jokk+/3oB6flu2/PnPXAh2x7x52EkY5xlIHLm+Le8tJxe/7TNhzw==",
+ "requires": {
+ "colord": "^2.9.1",
+ "cssnano-utils": "^3.1.0",
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-minify-params": {
+ "version": "5.1.4",
+ "resolved": "https://registry.npmjs.org/postcss-minify-params/-/postcss-minify-params-5.1.4.tgz",
+ "integrity": "sha512-+mePA3MgdmVmv6g+30rn57USjOGSAyuxUmkfiWpzalZ8aiBkdPYjXWtHuwJGm1v5Ojy0Z0LaSYhHaLJQB0P8Jw==",
+ "requires": {
+ "browserslist": "^4.21.4",
+ "cssnano-utils": "^3.1.0",
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-minify-selectors": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/postcss-minify-selectors/-/postcss-minify-selectors-5.2.1.tgz",
+ "integrity": "sha512-nPJu7OjZJTsVUmPdm2TcaiohIwxP+v8ha9NehQ2ye9szv4orirRU3SDdtUmKH+10nzn0bAyOXZ0UEr7OpvLehg==",
+ "requires": {
+ "postcss-selector-parser": "^6.0.5"
+ }
+ },
+ "postcss-modules-extract-imports": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-extract-imports/-/postcss-modules-extract-imports-3.0.0.tgz",
+ "integrity": "sha512-bdHleFnP3kZ4NYDhuGlVK+CMrQ/pqUm8bx/oGL93K6gVwiclvX5x0n76fYMKuIGKzlABOy13zsvqjb0f92TEXw==",
+ "requires": {}
+ },
+ "postcss-modules-local-by-default": {
+ "version": "4.0.3",
+ "resolved": "https://registry.npmjs.org/postcss-modules-local-by-default/-/postcss-modules-local-by-default-4.0.3.tgz",
+ "integrity": "sha512-2/u2zraspoACtrbFRnTijMiQtb4GW4BvatjaG/bCjYQo8kLTdevCUlwuBHx2sCnSyrI3x3qj4ZK1j5LQBgzmwA==",
+ "requires": {
+ "icss-utils": "^5.0.0",
+ "postcss-selector-parser": "^6.0.2",
+ "postcss-value-parser": "^4.1.0"
+ }
+ },
+ "postcss-modules-scope": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-3.0.0.tgz",
+ "integrity": "sha512-hncihwFA2yPath8oZ15PZqvWGkWf+XUfQgUGamS4LqoP1anQLOsOJw0vr7J7IwLpoY9fatA2qiGUGmuZL0Iqlg==",
+ "requires": {
+ "postcss-selector-parser": "^6.0.4"
+ }
+ },
+ "postcss-modules-values": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/postcss-modules-values/-/postcss-modules-values-4.0.0.tgz",
+ "integrity": "sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==",
+ "requires": {
+ "icss-utils": "^5.0.0"
+ }
+ },
+ "postcss-normalize-charset": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-charset/-/postcss-normalize-charset-5.1.0.tgz",
+ "integrity": "sha512-mSgUJ+pd/ldRGVx26p2wz9dNZ7ji6Pn8VWBajMXFf8jk7vUoSrZ2lt/wZR7DtlZYKesmZI680qjr2CeFF2fbUg==",
+ "requires": {}
+ },
+ "postcss-normalize-display-values": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-display-values/-/postcss-normalize-display-values-5.1.0.tgz",
+ "integrity": "sha512-WP4KIM4o2dazQXWmFaqMmcvsKmhdINFblgSeRgn8BJ6vxaMyaJkwAzpPpuvSIoG/rmX3M+IrRZEz2H0glrQNEA==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-normalize-positions": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-positions/-/postcss-normalize-positions-5.1.1.tgz",
+ "integrity": "sha512-6UpCb0G4eofTCQLFVuI3EVNZzBNPiIKcA1AKVka+31fTVySphr3VUgAIULBhxZkKgwLImhzMR2Bw1ORK+37INg==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-normalize-repeat-style": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-repeat-style/-/postcss-normalize-repeat-style-5.1.1.tgz",
+ "integrity": "sha512-mFpLspGWkQtBcWIRFLmewo8aC3ImN2i/J3v8YCFUwDnPu3Xz4rLohDO26lGjwNsQxB3YF0KKRwspGzE2JEuS0g==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-normalize-string": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-string/-/postcss-normalize-string-5.1.0.tgz",
+ "integrity": "sha512-oYiIJOf4T9T1N4i+abeIc7Vgm/xPCGih4bZz5Nm0/ARVJ7K6xrDlLwvwqOydvyL3RHNf8qZk6vo3aatiw/go3w==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-normalize-timing-functions": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-timing-functions/-/postcss-normalize-timing-functions-5.1.0.tgz",
+ "integrity": "sha512-DOEkzJ4SAXv5xkHl0Wa9cZLF3WCBhF3o1SKVxKQAa+0pYKlueTpCgvkFAHfk+Y64ezX9+nITGrDZeVGgITJXjg==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-normalize-unicode": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-unicode/-/postcss-normalize-unicode-5.1.1.tgz",
+ "integrity": "sha512-qnCL5jzkNUmKVhZoENp1mJiGNPcsJCs1aaRmURmeJGES23Z/ajaln+EPTD+rBeNkSryI+2WTdW+lwcVdOikrpA==",
+ "requires": {
+ "browserslist": "^4.21.4",
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-normalize-url": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-url/-/postcss-normalize-url-5.1.0.tgz",
+ "integrity": "sha512-5upGeDO+PVthOxSmds43ZeMeZfKH+/DKgGRD7TElkkyS46JXAUhMzIKiCa7BabPeIy3AQcTkXwVVN7DbqsiCew==",
+ "requires": {
+ "normalize-url": "^6.0.1",
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-normalize-whitespace": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-normalize-whitespace/-/postcss-normalize-whitespace-5.1.1.tgz",
+ "integrity": "sha512-83ZJ4t3NUDETIHTa3uEg6asWjSBYL5EdkVB0sDncx9ERzOKBVJIUeDO9RyA9Zwtig8El1d79HBp0JEi8wvGQnA==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-ordered-values": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/postcss-ordered-values/-/postcss-ordered-values-5.1.3.tgz",
+ "integrity": "sha512-9UO79VUhPwEkzbb3RNpqqghc6lcYej1aveQteWY+4POIwlqkYE21HKWaLDF6lWNuqCobEAyTovVhtI32Rbv2RQ==",
+ "requires": {
+ "cssnano-utils": "^3.1.0",
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-reduce-idents": {
+ "version": "5.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-reduce-idents/-/postcss-reduce-idents-5.2.0.tgz",
+ "integrity": "sha512-BTrLjICoSB6gxbc58D5mdBK8OhXRDqud/zodYfdSi52qvDHdMwk+9kB9xsM8yJThH/sZU5A6QVSmMmaN001gIg==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-reduce-initial": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/postcss-reduce-initial/-/postcss-reduce-initial-5.1.2.tgz",
+ "integrity": "sha512-dE/y2XRaqAi6OvjzD22pjTUQ8eOfc6m/natGHgKFBK9DxFmIm69YmaRVQrGgFlEfc1HePIurY0TmDeROK05rIg==",
+ "requires": {
+ "browserslist": "^4.21.4",
+ "caniuse-api": "^3.0.0"
+ }
+ },
+ "postcss-reduce-transforms": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-reduce-transforms/-/postcss-reduce-transforms-5.1.0.tgz",
+ "integrity": "sha512-2fbdbmgir5AvpW9RLtdONx1QoYG2/EtqpNQbFASDlixBbAYuTcJ0dECwlqNqH7VbaUnEnh8SrxOe2sRIn24XyQ==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0"
+ }
+ },
+ "postcss-selector-parser": {
+ "version": "6.0.13",
+ "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.0.13.tgz",
+ "integrity": "sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==",
+ "requires": {
+ "cssesc": "^3.0.0",
+ "util-deprecate": "^1.0.2"
+ }
+ },
+ "postcss-sort-media-queries": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/postcss-sort-media-queries/-/postcss-sort-media-queries-4.4.1.tgz",
+ "integrity": "sha512-QDESFzDDGKgpiIh4GYXsSy6sek2yAwQx1JASl5AxBtU1Lq2JfKBljIPNdil989NcSKRQX1ToiaKphImtBuhXWw==",
+ "requires": {
+ "sort-css-media-queries": "2.1.0"
+ }
+ },
+ "postcss-svgo": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-svgo/-/postcss-svgo-5.1.0.tgz",
+ "integrity": "sha512-D75KsH1zm5ZrHyxPakAxJWtkyXew5qwS70v56exwvw542d9CRtTo78K0WeFxZB4G7JXKKMbEZtZayTGdIky/eA==",
+ "requires": {
+ "postcss-value-parser": "^4.2.0",
+ "svgo": "^2.7.0"
+ }
+ },
+ "postcss-unique-selectors": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/postcss-unique-selectors/-/postcss-unique-selectors-5.1.1.tgz",
+ "integrity": "sha512-5JiODlELrz8L2HwxfPnhOWZYWDxVHWL83ufOv84NrcgipI7TaeRsatAhK4Tr2/ZiYldpK/wBvw5BD3qfaK96GA==",
+ "requires": {
+ "postcss-selector-parser": "^6.0.5"
+ }
+ },
+ "postcss-value-parser": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz",
+ "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ=="
+ },
+ "postcss-zindex": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/postcss-zindex/-/postcss-zindex-5.1.0.tgz",
+ "integrity": "sha512-fgFMf0OtVSBR1va1JNHYgMxYk73yhn/qb4uQDq1DLGYolz8gHCyr/sesEuGUaYs58E3ZJRcpoGuPVoB7Meiq9A==",
+ "requires": {}
+ },
+ "prepend-http": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/prepend-http/-/prepend-http-2.0.0.tgz",
+ "integrity": "sha512-ravE6m9Atw9Z/jjttRUZ+clIXogdghyZAuWJ3qEzjT+jI/dL1ifAqhZeC5VHzQp1MSt1+jxKkFNemj/iO7tVUA=="
+ },
+ "pretty-error": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/pretty-error/-/pretty-error-4.0.0.tgz",
+ "integrity": "sha512-AoJ5YMAcXKYxKhuJGdcvse+Voc6v1RgnsR3nWcYU7q4t6z0Q6T86sv5Zq8VIRbOWWFpvdGE83LtdSMNd+6Y0xw==",
+ "requires": {
+ "lodash": "^4.17.20",
+ "renderkid": "^3.0.0"
+ }
+ },
+ "pretty-time": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/pretty-time/-/pretty-time-1.1.0.tgz",
+ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA=="
+ },
+ "process": {
+ "version": "0.11.10",
+ "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz",
+ "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A=="
+ },
+ "process-nextick-args": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz",
+ "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag=="
+ },
+ "prompts": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/prompts/-/prompts-2.4.2.tgz",
+ "integrity": "sha512-NxNv/kLguCA7p3jE8oL2aEBsrJWgAakBpgmgK6lpPWV+WuOmY6r2/zbAVnP+T8bQlA0nzHXSJSJW0Hq7ylaD2Q==",
+ "requires": {
+ "kleur": "^3.0.3",
+ "sisteransi": "^1.0.5"
+ }
+ },
+ "prop-types": {
+ "version": "15.8.1",
+ "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz",
+ "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==",
+ "requires": {
+ "loose-envify": "^1.4.0",
+ "object-assign": "^4.1.1",
+ "react-is": "^16.13.1"
+ }
+ },
+ "property-expr": {
+ "version": "2.0.5",
+ "resolved": "https://registry.npmjs.org/property-expr/-/property-expr-2.0.5.tgz",
+ "integrity": "sha512-IJUkICM5dP5znhCckHSv30Q4b5/JA5enCtkRHYaOVOAocnH/1BQEYTC5NMfT3AVl/iXKdr3aqQbQn9DxyWknwA=="
+ },
+ "property-information": {
+ "version": "5.6.0",
+ "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz",
+ "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==",
+ "requires": {
+ "xtend": "^4.0.0"
+ }
+ },
+ "proxy-addr": {
+ "version": "2.0.7",
+ "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz",
+ "integrity": "sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==",
+ "requires": {
+ "forwarded": "0.2.0",
+ "ipaddr.js": "1.9.1"
+ },
+ "dependencies": {
+ "ipaddr.js": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz",
+ "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g=="
+ }
+ }
+ },
+ "pump": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz",
+ "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==",
+ "requires": {
+ "end-of-stream": "^1.1.0",
+ "once": "^1.3.1"
+ }
+ },
+ "punycode": {
+ "version": "1.4.1",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz",
+ "integrity": "sha512-jmYNElW7yvO7TV33CjSmvSiE2yco3bV2czu/OzDKdMNVZQWfxCblURLhf+47syQRBntjfLdd/H0egrzIG+oaFQ=="
+ },
+ "pupa": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.1.1.tgz",
+ "integrity": "sha512-l1jNAspIBSFqbT+y+5FosojNpVpF94nlI+wDUpqP9enwOTfHx9f0gh5nB96vl+6yTpsJsypeNrwfzPrKuHB41A==",
+ "requires": {
+ "escape-goat": "^2.0.0"
+ }
+ },
+ "qs": {
+ "version": "6.11.0",
+ "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.0.tgz",
+ "integrity": "sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q==",
+ "requires": {
+ "side-channel": "^1.0.4"
+ }
+ },
+ "queue": {
+ "version": "6.0.2",
+ "resolved": "https://registry.npmjs.org/queue/-/queue-6.0.2.tgz",
+ "integrity": "sha512-iHZWu+q3IdFZFX36ro/lKBkSvfkztY5Y7HMiPlOUjhupPcG2JMfst2KKEpu5XndviX/3UhFbRngUPNKtgvtZiA==",
+ "requires": {
+ "inherits": "~2.0.3"
+ }
+ },
+ "queue-microtask": {
+ "version": "1.2.3",
+ "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
+ "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A=="
+ },
+ "randombytes": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
+ "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==",
+ "requires": {
+ "safe-buffer": "^5.1.0"
+ }
+ },
+ "range-parser": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.0.tgz",
+ "integrity": "sha512-kA5WQoNVo4t9lNx2kQNFCxKeBl5IbbSNBl1M/tLkw9WCn+hxNBAW5Qh8gdhs63CJnhjJ2zQWFoqPJP2sK1AV5A=="
+ },
+ "raw-body": {
+ "version": "2.5.2",
+ "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz",
+ "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==",
+ "requires": {
+ "bytes": "3.1.2",
+ "http-errors": "2.0.0",
+ "iconv-lite": "0.4.24",
+ "unpipe": "1.0.0"
+ },
+ "dependencies": {
+ "bytes": {
+ "version": "3.1.2",
+ "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz",
+ "integrity": "sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg=="
+ }
+ }
+ },
+ "rc": {
+ "version": "1.2.8",
+ "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz",
+ "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==",
+ "requires": {
+ "deep-extend": "^0.6.0",
+ "ini": "~1.3.0",
+ "minimist": "^1.2.0",
+ "strip-json-comments": "~2.0.1"
+ }
+ },
+ "react": {
+ "version": "17.0.2",
+ "resolved": "https://registry.npmjs.org/react/-/react-17.0.2.tgz",
+ "integrity": "sha512-gnhPt75i/dq/z3/6q/0asP78D0u592D5L1pd7M8P+dck6Fu/jJeL6iVVK23fptSUZj8Vjf++7wXA8UNclGQcbA==",
+ "peer": true,
+ "requires": {
+ "loose-envify": "^1.1.0",
+ "object-assign": "^4.1.1"
+ }
+ },
+ "react-dev-utils": {
+ "version": "12.0.1",
+ "resolved": "https://registry.npmjs.org/react-dev-utils/-/react-dev-utils-12.0.1.tgz",
+ "integrity": "sha512-84Ivxmr17KjUupyqzFode6xKhjwuEJDROWKJy/BthkL7Wn6NJ8h4WE6k/exAv6ImS+0oZLRRW5j/aINMHyeGeQ==",
+ "requires": {
+ "@babel/code-frame": "^7.16.0",
+ "address": "^1.1.2",
+ "browserslist": "^4.18.1",
+ "chalk": "^4.1.2",
+ "cross-spawn": "^7.0.3",
+ "detect-port-alt": "^1.1.6",
+ "escape-string-regexp": "^4.0.0",
+ "filesize": "^8.0.6",
+ "find-up": "^5.0.0",
+ "fork-ts-checker-webpack-plugin": "^6.5.0",
+ "global-modules": "^2.0.0",
+ "globby": "^11.0.4",
+ "gzip-size": "^6.0.0",
+ "immer": "^9.0.7",
+ "is-root": "^2.1.0",
+ "loader-utils": "^3.2.0",
+ "open": "^8.4.0",
+ "pkg-up": "^3.1.0",
+ "prompts": "^2.4.2",
+ "react-error-overlay": "^6.0.11",
+ "recursive-readdir": "^2.2.2",
+ "shell-quote": "^1.7.3",
+ "strip-ansi": "^6.0.1",
+ "text-table": "^0.2.0"
+ },
+ "dependencies": {
+ "find-up": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
+ "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==",
+ "requires": {
+ "locate-path": "^6.0.0",
+ "path-exists": "^4.0.0"
+ }
+ },
+ "loader-utils": {
+ "version": "3.2.1",
+ "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-3.2.1.tgz",
+ "integrity": "sha512-ZvFw1KWS3GVyYBYb7qkmRM/WwL2TQQBxgCK62rlvm4WpVQ23Nb4tYjApUlfjrEGvOs7KHEsmyUn75OHZrJMWPw=="
+ },
+ "locate-path": {
+ "version": "6.0.0",
+ "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz",
+ "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==",
+ "requires": {
+ "p-locate": "^5.0.0"
+ }
+ },
+ "p-limit": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz",
+ "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==",
+ "requires": {
+ "yocto-queue": "^0.1.0"
+ }
+ },
+ "p-locate": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz",
+ "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==",
+ "requires": {
+ "p-limit": "^3.0.2"
+ }
+ }
+ }
+ },
+ "react-dom": {
+ "version": "17.0.2",
+ "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-17.0.2.tgz",
+ "integrity": "sha512-s4h96KtLDUQlsENhMn1ar8t2bEa+q/YAtj8pPPdIjPDGBDIVNsrD9aXNWqspUe6AzKCIG0C1HZZLqLV7qpOBGA==",
+ "peer": true,
+ "requires": {
+ "loose-envify": "^1.1.0",
+ "object-assign": "^4.1.1",
+ "scheduler": "^0.20.2"
+ }
+ },
+ "react-error-overlay": {
+ "version": "6.0.11",
+ "resolved": "https://registry.npmjs.org/react-error-overlay/-/react-error-overlay-6.0.11.tgz",
+ "integrity": "sha512-/6UZ2qgEyH2aqzYZgQPxEnz33NJ2gNsnHA2o5+o4wW9bLM/JYQitNP9xPhsXwC08hMMovfGe/8retsdDsczPRg=="
+ },
+ "react-fast-compare": {
+ "version": "3.2.2",
+ "resolved": "https://registry.npmjs.org/react-fast-compare/-/react-fast-compare-3.2.2.tgz",
+ "integrity": "sha512-nsO+KSNgo1SbJqJEYRE9ERzo7YtYbou/OqjSQKxV7jcKox7+usiUVZOAC+XnDOABXggQTno0Y1CpVnuWEc1boQ=="
+ },
+ "react-helmet-async": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/react-helmet-async/-/react-helmet-async-1.3.0.tgz",
+ "integrity": "sha512-9jZ57/dAn9t3q6hneQS0wukqC2ENOBgMNVEhb/ZG9ZSxUetzVIw4iAmEU38IaVg3QGYauQPhSeUTuIUtFglWpg==",
+ "requires": {
+ "@babel/runtime": "^7.12.5",
+ "invariant": "^2.2.4",
+ "prop-types": "^15.7.2",
+ "react-fast-compare": "^3.2.0",
+ "shallowequal": "^1.1.0"
+ }
+ },
+ "react-is": {
+ "version": "16.13.1",
+ "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz",
+ "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ=="
+ },
+ "react-loadable": {
+ "version": "npm:@docusaurus/react-loadable@5.5.2",
+ "resolved": "https://registry.npmjs.org/@docusaurus/react-loadable/-/react-loadable-5.5.2.tgz",
+ "integrity": "sha512-A3dYjdBGuy0IGT+wyLIGIKLRE+sAk1iNk0f1HjNDysO7u8lhL4N3VEm+FAubmJbAztn94F7MxBTPmnixbiyFdQ==",
+ "requires": {
+ "@types/react": "*",
+ "prop-types": "^15.6.2"
+ }
+ },
+ "react-loadable-ssr-addon-v5-slorber": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/react-loadable-ssr-addon-v5-slorber/-/react-loadable-ssr-addon-v5-slorber-1.0.1.tgz",
+ "integrity": "sha512-lq3Lyw1lGku8zUEJPDxsNm1AfYHBrO9Y1+olAYwpUJ2IGFBskM0DMKok97A6LWUpHm+o7IvQBOWu9MLenp9Z+A==",
+ "requires": {
+ "@babel/runtime": "^7.10.3"
+ }
+ },
+ "react-router": {
+ "version": "5.3.4",
+ "resolved": "https://registry.npmjs.org/react-router/-/react-router-5.3.4.tgz",
+ "integrity": "sha512-Ys9K+ppnJah3QuaRiLxk+jDWOR1MekYQrlytiXxC1RyfbdsZkS5pvKAzCCr031xHixZwpnsYNT5xysdFHQaYsA==",
+ "requires": {
+ "@babel/runtime": "^7.12.13",
+ "history": "^4.9.0",
+ "hoist-non-react-statics": "^3.1.0",
+ "loose-envify": "^1.3.1",
+ "path-to-regexp": "^1.7.0",
+ "prop-types": "^15.6.2",
+ "react-is": "^16.6.0",
+ "tiny-invariant": "^1.0.2",
+ "tiny-warning": "^1.0.0"
+ }
+ },
+ "react-router-config": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/react-router-config/-/react-router-config-5.1.1.tgz",
+ "integrity": "sha512-DuanZjaD8mQp1ppHjgnnUnyOlqYXZVjnov/JzFhjLEwd3Z4dYjMSnqrEzzGThH47vpCOqPPwJM2FtthLeJ8Pbg==",
+ "requires": {
+ "@babel/runtime": "^7.1.2"
+ }
+ },
+ "react-router-dom": {
+ "version": "5.3.4",
+ "resolved": "https://registry.npmjs.org/react-router-dom/-/react-router-dom-5.3.4.tgz",
+ "integrity": "sha512-m4EqFMHv/Ih4kpcBCONHbkT68KoAeHN4p3lAGoNryfHi0dMy0kCzEZakiKRsvg5wHZ/JLrLW8o8KomWiz/qbYQ==",
+ "requires": {
+ "@babel/runtime": "^7.12.13",
+ "history": "^4.9.0",
+ "loose-envify": "^1.3.1",
+ "prop-types": "^15.6.2",
+ "react-router": "5.3.4",
+ "tiny-invariant": "^1.0.2",
+ "tiny-warning": "^1.0.0"
+ }
+ },
+ "readable-stream": {
+ "version": "3.6.2",
+ "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz",
+ "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==",
+ "requires": {
+ "inherits": "^2.0.3",
+ "string_decoder": "^1.1.1",
+ "util-deprecate": "^1.0.1"
+ }
+ },
+ "readdir-enhanced": {
+ "version": "1.5.2",
+ "resolved": "https://registry.npmjs.org/readdir-enhanced/-/readdir-enhanced-1.5.2.tgz",
+ "integrity": "sha512-oncAoS9LLjy/+DeZfSAdZBI/iFJGcPCOp44RPFI6FIMHuxt5CC5P0cUZ9mET+EZB9ONhcEvAids/lVRkj0sTHw==",
+ "requires": {
+ "call-me-maybe": "^1.0.1",
+ "es6-promise": "^4.1.0",
+ "glob-to-regexp": "^0.3.0"
+ }
+ },
+ "readdirp": {
+ "version": "3.6.0",
+ "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz",
+ "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==",
+ "requires": {
+ "picomatch": "^2.2.1"
+ }
+ },
+ "rechoir": {
+ "version": "0.6.2",
+ "resolved": "https://registry.npmjs.org/rechoir/-/rechoir-0.6.2.tgz",
+ "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==",
+ "requires": {
+ "resolve": "^1.1.6"
+ }
+ },
+ "recursive-readdir": {
+ "version": "2.2.3",
+ "resolved": "https://registry.npmjs.org/recursive-readdir/-/recursive-readdir-2.2.3.tgz",
+ "integrity": "sha512-8HrF5ZsXk5FAH9dgsx3BlUer73nIhuj+9OrQwEbLTPOBzGkL1lsFCR01am+v+0m2Cmbs1nP12hLDl5FA7EszKA==",
+ "requires": {
+ "minimatch": "^3.0.5"
+ }
+ },
+ "regenerate": {
+ "version": "1.4.2",
+ "resolved": "https://registry.npmjs.org/regenerate/-/regenerate-1.4.2.tgz",
+ "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A=="
+ },
+ "regenerate-unicode-properties": {
+ "version": "10.1.0",
+ "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-10.1.0.tgz",
+ "integrity": "sha512-d1VudCLoIGitcU/hEg2QqvyGZQmdC0Lf8BqdOMXGFSvJP4bNV1+XqbPQeHHLD51Jh4QJJ225dlIFvY4Ly6MXmQ==",
+ "requires": {
+ "regenerate": "^1.4.2"
+ }
+ },
+ "regenerator-runtime": {
+ "version": "0.13.11",
+ "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz",
+ "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg=="
+ },
+ "regenerator-transform": {
+ "version": "0.15.1",
+ "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.15.1.tgz",
+ "integrity": "sha512-knzmNAcuyxV+gQCufkYcvOqX/qIIfHLv0u5x79kRxuGojfYVky1f15TzZEu2Avte8QGepvUNTnLskf8E6X6Vyg==",
+ "requires": {
+ "@babel/runtime": "^7.8.4"
+ }
+ },
+ "regexpu-core": {
+ "version": "5.3.2",
+ "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-5.3.2.tgz",
+ "integrity": "sha512-RAM5FlZz+Lhmo7db9L298p2vHP5ZywrVXmVXpmAD9GuL5MPH6t9ROw1iA/wfHkQ76Qe7AaPF0nGuim96/IrQMQ==",
+ "requires": {
+ "@babel/regjsgen": "^0.8.0",
+ "regenerate": "^1.4.2",
+ "regenerate-unicode-properties": "^10.1.0",
+ "regjsparser": "^0.9.1",
+ "unicode-match-property-ecmascript": "^2.0.0",
+ "unicode-match-property-value-ecmascript": "^2.1.0"
+ }
+ },
+ "registry-auth-token": {
+ "version": "4.2.2",
+ "resolved": "https://registry.npmjs.org/registry-auth-token/-/registry-auth-token-4.2.2.tgz",
+ "integrity": "sha512-PC5ZysNb42zpFME6D/XlIgtNGdTl8bBOCw90xQLVMpzuuubJKYDWFAEuUNc+Cn8Z8724tg2SDhDRrkVEsqfDMg==",
+ "requires": {
+ "rc": "1.2.8"
+ }
+ },
+ "registry-url": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/registry-url/-/registry-url-5.1.0.tgz",
+ "integrity": "sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw==",
+ "requires": {
+ "rc": "^1.2.8"
+ }
+ },
+ "regjsparser": {
+ "version": "0.9.1",
+ "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.9.1.tgz",
+ "integrity": "sha512-dQUtn90WanSNl+7mQKcXAgZxvUe7Z0SqXlgzv0za4LwiUhyzBC58yQO3liFoUgu8GiJVInAhJjkj1N0EtQ5nkQ==",
+ "requires": {
+ "jsesc": "~0.5.0"
+ },
+ "dependencies": {
+ "jsesc": {
+ "version": "0.5.0",
+ "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-0.5.0.tgz",
+ "integrity": "sha512-uZz5UnB7u4T9LvwmFqXii7pZSouaRPorGs5who1Ip7VO0wxanFvBL7GkM6dTHlgX+jhBApRetaWpnDabOeTcnA=="
+ }
+ }
+ },
+ "relateurl": {
+ "version": "0.2.7",
+ "resolved": "https://registry.npmjs.org/relateurl/-/relateurl-0.2.7.tgz",
+ "integrity": "sha512-G08Dxvm4iDN3MLM0EsP62EDV9IuhXPR6blNz6Utcp7zyV3tr4HVNINt6MpaRWbxoOHT3Q7YN2P+jaHX8vUbgog=="
+ },
+ "remark-emoji": {
+ "version": "2.2.0",
+ "resolved": "https://registry.npmjs.org/remark-emoji/-/remark-emoji-2.2.0.tgz",
+ "integrity": "sha512-P3cj9s5ggsUvWw5fS2uzCHJMGuXYRb0NnZqYlNecewXt8QBU9n5vW3DUUKOhepS8F9CwdMx9B8a3i7pqFWAI5w==",
+ "requires": {
+ "emoticon": "^3.2.0",
+ "node-emoji": "^1.10.0",
+ "unist-util-visit": "^2.0.3"
+ }
+ },
+ "remark-footnotes": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/remark-footnotes/-/remark-footnotes-2.0.0.tgz",
+ "integrity": "sha512-3Clt8ZMH75Ayjp9q4CorNeyjwIxHFcTkaektplKGl2A1jNGEUey8cKL0ZC5vJwfcD5GFGsNLImLG/NGzWIzoMQ=="
+ },
+ "remark-mdx": {
+ "version": "1.6.22",
+ "resolved": "https://registry.npmjs.org/remark-mdx/-/remark-mdx-1.6.22.tgz",
+ "integrity": "sha512-phMHBJgeV76uyFkH4rvzCftLfKCr2RZuF+/gmVcaKrpsihyzmhXjA0BEMDaPTXG5y8qZOKPVo83NAOX01LPnOQ==",
+ "requires": {
+ "@babel/core": "7.12.9",
+ "@babel/helper-plugin-utils": "7.10.4",
+ "@babel/plugin-proposal-object-rest-spread": "7.12.1",
+ "@babel/plugin-syntax-jsx": "7.12.1",
+ "@mdx-js/util": "1.6.22",
+ "is-alphabetical": "1.0.4",
+ "remark-parse": "8.0.3",
+ "unified": "9.2.0"
+ },
+ "dependencies": {
+ "@babel/core": {
+ "version": "7.12.9",
+ "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.12.9.tgz",
+ "integrity": "sha512-gTXYh3M5wb7FRXQy+FErKFAv90BnlOuNn1QkCK2lREoPAjrQCO49+HVSrFoe5uakFAF5eenS75KbO2vQiLrTMQ==",
+ "requires": {
+ "@babel/code-frame": "^7.10.4",
+ "@babel/generator": "^7.12.5",
+ "@babel/helper-module-transforms": "^7.12.1",
+ "@babel/helpers": "^7.12.5",
+ "@babel/parser": "^7.12.7",
+ "@babel/template": "^7.12.7",
+ "@babel/traverse": "^7.12.9",
+ "@babel/types": "^7.12.7",
+ "convert-source-map": "^1.7.0",
+ "debug": "^4.1.0",
+ "gensync": "^1.0.0-beta.1",
+ "json5": "^2.1.2",
+ "lodash": "^4.17.19",
+ "resolve": "^1.3.2",
+ "semver": "^5.4.1",
+ "source-map": "^0.5.0"
+ }
+ },
+ "@babel/helper-plugin-utils": {
+ "version": "7.10.4",
+ "resolved": "https://registry.npmjs.org/@babel/helper-plugin-utils/-/helper-plugin-utils-7.10.4.tgz",
+ "integrity": "sha512-O4KCvQA6lLiMU9l2eawBPMf1xPP8xPfB3iEQw150hOVTqj/rfXz0ThTb4HEzqQfs2Bmo5Ay8BzxfzVtBrr9dVg=="
+ },
+ "@babel/plugin-syntax-jsx": {
+ "version": "7.12.1",
+ "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-jsx/-/plugin-syntax-jsx-7.12.1.tgz",
+ "integrity": "sha512-1yRi7yAtB0ETgxdY9ti/p2TivUxJkTdhu/ZbF9MshVGqOx1TdB3b7xCXs49Fupgg50N45KcAsRP/ZqWjs9SRjg==",
+ "requires": {
+ "@babel/helper-plugin-utils": "^7.10.4"
+ }
+ },
+ "semver": {
+ "version": "5.7.2",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz",
+ "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g=="
+ },
+ "source-map": {
+ "version": "0.5.7",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz",
+ "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ=="
+ },
+ "unified": {
+ "version": "9.2.0",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.0.tgz",
+ "integrity": "sha512-vx2Z0vY+a3YoTj8+pttM3tiJHCwY5UFbYdiWrwBEbHmK8pvsPj2rtAX2BFfgXen8T39CJWblWRDT4L5WGXtDdg==",
+ "requires": {
+ "bail": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-buffer": "^2.0.0",
+ "is-plain-obj": "^2.0.0",
+ "trough": "^1.0.0",
+ "vfile": "^4.0.0"
+ }
+ }
+ }
+ },
+ "remark-parse": {
+ "version": "8.0.3",
+ "resolved": "https://registry.npmjs.org/remark-parse/-/remark-parse-8.0.3.tgz",
+ "integrity": "sha512-E1K9+QLGgggHxCQtLt++uXltxEprmWzNfg+MxpfHsZlrddKzZ/hZyWHDbK3/Ap8HJQqYJRXP+jHczdL6q6i85Q==",
+ "requires": {
+ "ccount": "^1.0.0",
+ "collapse-white-space": "^1.0.2",
+ "is-alphabetical": "^1.0.0",
+ "is-decimal": "^1.0.0",
+ "is-whitespace-character": "^1.0.0",
+ "is-word-character": "^1.0.0",
+ "markdown-escapes": "^1.0.0",
+ "parse-entities": "^2.0.0",
+ "repeat-string": "^1.5.4",
+ "state-toggle": "^1.0.0",
+ "trim": "0.0.1",
+ "trim-trailing-lines": "^1.0.0",
+ "unherit": "^1.0.4",
+ "unist-util-remove-position": "^2.0.0",
+ "vfile-location": "^3.0.0",
+ "xtend": "^4.0.1"
+ }
+ },
+ "remark-squeeze-paragraphs": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/remark-squeeze-paragraphs/-/remark-squeeze-paragraphs-4.0.0.tgz",
+ "integrity": "sha512-8qRqmL9F4nuLPIgl92XUuxI3pFxize+F1H0e/W3llTk0UsjJaj01+RrirkMw7P21RKe4X6goQhYRSvNWX+70Rw==",
+ "requires": {
+ "mdast-squeeze-paragraphs": "^4.0.0"
+ }
+ },
+ "renderkid": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/renderkid/-/renderkid-3.0.0.tgz",
+ "integrity": "sha512-q/7VIQA8lmM1hF+jn+sFSPWGlMkSAeNYcPLmDQx2zzuiDfaLrOmumR8iaUKlenFgh0XRPIUeSPlH3A+AW3Z5pg==",
+ "requires": {
+ "css-select": "^4.1.3",
+ "dom-converter": "^0.2.0",
+ "htmlparser2": "^6.1.0",
+ "lodash": "^4.17.21",
+ "strip-ansi": "^6.0.1"
+ }
+ },
+ "repeat-string": {
+ "version": "1.6.1",
+ "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz",
+ "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w=="
+ },
+ "require-from-string": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz",
+ "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw=="
+ },
+ "require-like": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/require-like/-/require-like-0.1.2.tgz",
+ "integrity": "sha512-oyrU88skkMtDdauHDuKVrgR+zuItqr6/c//FXzvmxRGMexSDc6hNvJInGW3LL46n+8b50RykrvwSUIIQH2LQ5A=="
+ },
+ "requires-port": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz",
+ "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ=="
+ },
+ "resolve": {
+ "version": "1.22.2",
+ "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.2.tgz",
+ "integrity": "sha512-Sb+mjNHOULsBv818T40qSPeRiuWLyaGMa5ewydRLFimneixmVy2zdivRl+AF6jaYPC8ERxGDmFSiqui6SfPd+g==",
+ "requires": {
+ "is-core-module": "^2.11.0",
+ "path-parse": "^1.0.7",
+ "supports-preserve-symlinks-flag": "^1.0.0"
+ }
+ },
+ "resolve-from": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz",
+ "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g=="
+ },
+ "resolve-pathname": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/resolve-pathname/-/resolve-pathname-3.0.0.tgz",
+ "integrity": "sha512-C7rARubxI8bXFNB/hqcp/4iUeIXJhJZvFPFPiSPRnhU5UPxzMFIl+2E6yY6c4k9giDJAhtV+enfA+G89N6Csng=="
+ },
+ "responselike": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/responselike/-/responselike-1.0.2.tgz",
+ "integrity": "sha512-/Fpe5guzJk1gPqdJLJR5u7eG/gNY4nImjbRDaVWVMRhne55TCmj2i9Q+54PBRfatRC8v/rIiv9BN0pMd9OV5EQ==",
+ "requires": {
+ "lowercase-keys": "^1.0.0"
+ }
+ },
+ "retry": {
+ "version": "0.13.1",
+ "resolved": "https://registry.npmjs.org/retry/-/retry-0.13.1.tgz",
+ "integrity": "sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg=="
+ },
+ "reusify": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz",
+ "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw=="
+ },
+ "rimraf": {
+ "version": "3.0.2",
+ "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
+ "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
+ "requires": {
+ "glob": "^7.1.3"
+ }
+ },
+ "rtl-detect": {
+ "version": "1.0.4",
+ "resolved": "https://registry.npmjs.org/rtl-detect/-/rtl-detect-1.0.4.tgz",
+ "integrity": "sha512-EBR4I2VDSSYr7PkBmFy04uhycIpDKp+21p/jARYXlCSjQksTBQcJ0HFUPOO79EPPH5JS6VAhiIQbycf0O3JAxQ=="
+ },
+ "run-parallel": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz",
+ "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==",
+ "requires": {
+ "queue-microtask": "^1.2.2"
+ }
+ },
+ "rxjs": {
+ "version": "7.8.1",
+ "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-7.8.1.tgz",
+ "integrity": "sha512-AA3TVj+0A2iuIoQkWEK/tqFjBq2j+6PO6Y0zJcvzLAFhEFIO3HL0vls9hWLncZbAAbK0mar7oZ4V079I/qPMxg==",
+ "requires": {
+ "tslib": "^2.1.0"
+ }
+ },
+ "safe-buffer": {
+ "version": "5.2.1",
+ "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz",
+ "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ=="
+ },
+ "safer-buffer": {
+ "version": "2.1.2",
+ "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
+ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
+ },
+ "scheduler": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.20.2.tgz",
+ "integrity": "sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ==",
+ "peer": true,
+ "requires": {
+ "loose-envify": "^1.1.0",
+ "object-assign": "^4.1.1"
+ }
+ },
+ "schema-utils": {
+ "version": "2.7.1",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.7.1.tgz",
+ "integrity": "sha512-SHiNtMOUGWBQJwzISiVYKu82GiV4QYGePp3odlY1tuKO7gPtphAT5R/py0fA6xtbgLL/RvtJZnU9b8s0F1q0Xg==",
+ "requires": {
+ "@types/json-schema": "^7.0.5",
+ "ajv": "^6.12.4",
+ "ajv-keywords": "^3.5.2"
+ }
+ },
+ "section-matter": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/section-matter/-/section-matter-1.0.0.tgz",
+ "integrity": "sha512-vfD3pmTzGpufjScBh50YHKzEu2lxBWhVEHsNGoEXmCmn2hKGfeNLYMzCJpe8cD7gqX7TJluOVpBkAequ6dgMmA==",
+ "requires": {
+ "extend-shallow": "^2.0.1",
+ "kind-of": "^6.0.0"
+ }
+ },
+ "select-hose": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/select-hose/-/select-hose-2.0.0.tgz",
+ "integrity": "sha512-mEugaLK+YfkijB4fx0e6kImuJdCIt2LxCRcbEYPqRGCs4F2ogyfZU5IAZRdjCP8JPq2AtdNoC/Dux63d9Kiryg=="
+ },
+ "selfsigned": {
+ "version": "2.1.1",
+ "resolved": "https://registry.npmjs.org/selfsigned/-/selfsigned-2.1.1.tgz",
+ "integrity": "sha512-GSL3aowiF7wa/WtSFwnUrludWFoNhftq8bUkH9pkzjpN2XSPOAYEgg6e0sS9s0rZwgJzJiQRPU18A6clnoW5wQ==",
+ "requires": {
+ "node-forge": "^1"
+ }
+ },
+ "semver": {
+ "version": "7.6.3",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz",
+ "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A=="
+ },
+ "semver-diff": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/semver-diff/-/semver-diff-3.1.1.tgz",
+ "integrity": "sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg==",
+ "requires": {
+ "semver": "^6.3.0"
+ },
+ "dependencies": {
+ "semver": {
+ "version": "6.3.1",
+ "resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
+ "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA=="
+ }
+ }
+ },
+ "send": {
+ "version": "0.19.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz",
+ "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==",
+ "requires": {
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "2.0.1"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ },
+ "dependencies": {
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ }
+ }
+ },
+ "ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
+ },
+ "range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
+ }
+ }
+ },
+ "serialize-javascript": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/serialize-javascript/-/serialize-javascript-6.0.1.tgz",
+ "integrity": "sha512-owoXEFjWRllis8/M1Q+Cw5k8ZH40e3zhp/ovX+Xr/vi1qj6QesbyXXViFbpNvWvPNAD62SutwEXavefrLJWj7w==",
+ "requires": {
+ "randombytes": "^2.1.0"
+ }
+ },
+ "serve-handler": {
+ "version": "6.1.5",
+ "resolved": "https://registry.npmjs.org/serve-handler/-/serve-handler-6.1.5.tgz",
+ "integrity": "sha512-ijPFle6Hwe8zfmBxJdE+5fta53fdIY0lHISJvuikXB3VYFafRjMRpOffSPvCYsbKyBA7pvy9oYr/BT1O3EArlg==",
+ "requires": {
+ "bytes": "3.0.0",
+ "content-disposition": "0.5.2",
+ "fast-url-parser": "1.1.3",
+ "mime-types": "2.1.18",
+ "minimatch": "3.1.2",
+ "path-is-inside": "1.0.2",
+ "path-to-regexp": "2.2.1",
+ "range-parser": "1.2.0"
+ },
+ "dependencies": {
+ "path-to-regexp": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-2.2.1.tgz",
+ "integrity": "sha512-gu9bD6Ta5bwGrrU8muHzVOBFFREpp2iRkVfhBJahwJ6p6Xw20SjT0MxLnwkjOibQmGSYhiUnf2FLe7k+jcFmGQ=="
+ }
+ }
+ },
+ "serve-index": {
+ "version": "1.9.1",
+ "resolved": "https://registry.npmjs.org/serve-index/-/serve-index-1.9.1.tgz",
+ "integrity": "sha512-pXHfKNP4qujrtteMrSBb0rc8HJ9Ms/GrXwcUtUtD5s4ewDJI8bT3Cz2zTVRMKtri49pLx2e0Ya8ziP5Ya2pZZw==",
+ "requires": {
+ "accepts": "~1.3.4",
+ "batch": "0.6.1",
+ "debug": "2.6.9",
+ "escape-html": "~1.0.3",
+ "http-errors": "~1.6.2",
+ "mime-types": "~2.1.17",
+ "parseurl": "~1.3.2"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ }
+ },
+ "depd": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/depd/-/depd-1.1.2.tgz",
+ "integrity": "sha512-7emPTl6Dpo6JRXOXjLRxck+FlLRX5847cLKEn00PLAgc3g2hTZZgr+e4c2v6QpSmLeFP3n5yUo7ft6avBK/5jQ=="
+ },
+ "http-errors": {
+ "version": "1.6.3",
+ "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-1.6.3.tgz",
+ "integrity": "sha512-lks+lVC8dgGyh97jxvxeYTWQFvh4uw4yC12gVl63Cg30sjPX4wuGcdkICVXDAESr6OJGjqGA8Iz5mkeN6zlD7A==",
+ "requires": {
+ "depd": "~1.1.2",
+ "inherits": "2.0.3",
+ "setprototypeof": "1.1.0",
+ "statuses": ">= 1.4.0 < 2"
+ }
+ },
+ "inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw=="
+ },
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ },
+ "setprototypeof": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.1.0.tgz",
+ "integrity": "sha512-BvE/TwpZX4FXExxOxZyRGQQv651MSwmWKZGqvmPcRIjDqWub67kTKuIMx43cZZrS/cBBzwBcNDWoFxt2XEFIpQ=="
+ },
+ "statuses": {
+ "version": "1.5.0",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-1.5.0.tgz",
+ "integrity": "sha512-OpZ3zP+jT1PI7I8nemJX4AKmAX070ZkYPVWV/AaKTJl+tXCTGyVdC1a4SL8RUQYEwk/f34ZX8UTykN68FwrqAA=="
+ }
+ }
+ },
+ "serve-static": {
+ "version": "1.16.0",
+ "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.0.tgz",
+ "integrity": "sha512-pDLK8zwl2eKaYrs8mrPZBJua4hMplRWJ1tIFksVC3FtBEBnl8dxgeHtsaMS8DhS9i4fLObaon6ABoc4/hQGdPA==",
+ "requires": {
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "parseurl": "~1.3.3",
+ "send": "0.18.0"
+ },
+ "dependencies": {
+ "debug": {
+ "version": "2.6.9",
+ "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz",
+ "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==",
+ "requires": {
+ "ms": "2.0.0"
+ },
+ "dependencies": {
+ "ms": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
+ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
+ }
+ }
+ },
+ "ms": {
+ "version": "2.1.3",
+ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
+ "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
+ },
+ "range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
+ },
+ "send": {
+ "version": "0.18.0",
+ "resolved": "https://registry.npmjs.org/send/-/send-0.18.0.tgz",
+ "integrity": "sha512-qqWzuOjSFOuqPjFe4NOsMLafToQQwBSOEpS+FwEt3A2V3vKubTquT3vmLTQpFgMXp8AlFWFuP1qKaJZOtPpVXg==",
+ "requires": {
+ "debug": "2.6.9",
+ "depd": "2.0.0",
+ "destroy": "1.2.0",
+ "encodeurl": "~1.0.2",
+ "escape-html": "~1.0.3",
+ "etag": "~1.8.1",
+ "fresh": "0.5.2",
+ "http-errors": "2.0.0",
+ "mime": "1.6.0",
+ "ms": "2.1.3",
+ "on-finished": "2.4.1",
+ "range-parser": "~1.2.1",
+ "statuses": "2.0.1"
+ }
+ }
+ }
+ },
+ "set-function-length": {
+ "version": "1.2.2",
+ "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
+ "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==",
+ "requires": {
+ "define-data-property": "^1.1.4",
+ "es-errors": "^1.3.0",
+ "function-bind": "^1.1.2",
+ "get-intrinsic": "^1.2.4",
+ "gopd": "^1.0.1",
+ "has-property-descriptors": "^1.0.2"
+ }
+ },
+ "setprototypeof": {
+ "version": "1.2.0",
+ "resolved": "https://registry.npmjs.org/setprototypeof/-/setprototypeof-1.2.0.tgz",
+ "integrity": "sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw=="
+ },
+ "shallow-clone": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/shallow-clone/-/shallow-clone-3.0.1.tgz",
+ "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==",
+ "requires": {
+ "kind-of": "^6.0.2"
+ }
+ },
+ "shallowequal": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/shallowequal/-/shallowequal-1.1.0.tgz",
+ "integrity": "sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ=="
+ },
+ "shebang-command": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
+ "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
+ "requires": {
+ "shebang-regex": "^3.0.0"
+ }
+ },
+ "shebang-regex": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
+ "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A=="
+ },
+ "shell-quote": {
+ "version": "1.8.1",
+ "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz",
+ "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA=="
+ },
+ "shelljs": {
+ "version": "0.8.5",
+ "resolved": "https://registry.npmjs.org/shelljs/-/shelljs-0.8.5.tgz",
+ "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==",
+ "requires": {
+ "glob": "^7.0.0",
+ "interpret": "^1.0.0",
+ "rechoir": "^0.6.2"
+ }
+ },
+ "side-channel": {
+ "version": "1.0.6",
+ "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.6.tgz",
+ "integrity": "sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==",
+ "requires": {
+ "call-bind": "^1.0.7",
+ "es-errors": "^1.3.0",
+ "get-intrinsic": "^1.2.4",
+ "object-inspect": "^1.13.1"
+ }
+ },
+ "signal-exit": {
+ "version": "3.0.7",
+ "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
+ "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ=="
+ },
+ "sirv": {
+ "version": "1.0.19",
+ "resolved": "https://registry.npmjs.org/sirv/-/sirv-1.0.19.tgz",
+ "integrity": "sha512-JuLThK3TnZG1TAKDwNIqNq6QA2afLOCcm+iE8D1Kj3GA40pSPsxQjjJl0J8X3tsR7T+CP1GavpzLwYkgVLWrZQ==",
+ "requires": {
+ "@polka/url": "^1.0.0-next.20",
+ "mrmime": "^1.0.0",
+ "totalist": "^1.0.0"
+ }
+ },
+ "sisteransi": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/sisteransi/-/sisteransi-1.0.5.tgz",
+ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg=="
+ },
+ "slash": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz",
+ "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q=="
+ },
+ "sockjs": {
+ "version": "0.3.24",
+ "resolved": "https://registry.npmjs.org/sockjs/-/sockjs-0.3.24.tgz",
+ "integrity": "sha512-GJgLTZ7vYb/JtPSSZ10hsOYIvEYsjbNU+zPdIHcUaWVNUEPivzxku31865sSSud0Da0W4lEeOPlmw93zLQchuQ==",
+ "requires": {
+ "faye-websocket": "^0.11.3",
+ "uuid": "^8.3.2",
+ "websocket-driver": "^0.7.4"
+ }
+ },
+ "sort-css-media-queries": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/sort-css-media-queries/-/sort-css-media-queries-2.1.0.tgz",
+ "integrity": "sha512-IeWvo8NkNiY2vVYdPa27MCQiR0MN0M80johAYFVxWWXQ44KU84WNxjslwBHmc/7ZL2ccwkM7/e6S5aiKZXm7jA=="
+ },
+ "source-map": {
+ "version": "0.6.1",
+ "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.6.1.tgz",
+ "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g=="
+ },
+ "source-map-js": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz",
+ "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA=="
+ },
+ "source-map-support": {
+ "version": "0.5.21",
+ "resolved": "https://registry.npmjs.org/source-map-support/-/source-map-support-0.5.21.tgz",
+ "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==",
+ "requires": {
+ "buffer-from": "^1.0.0",
+ "source-map": "^0.6.0"
+ }
+ },
+ "space-separated-tokens": {
+ "version": "1.1.5",
+ "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz",
+ "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA=="
+ },
+ "spdy": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz",
+ "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==",
+ "requires": {
+ "debug": "^4.1.0",
+ "handle-thing": "^2.0.0",
+ "http-deceiver": "^1.2.7",
+ "select-hose": "^2.0.0",
+ "spdy-transport": "^3.0.0"
+ }
+ },
+ "spdy-transport": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/spdy-transport/-/spdy-transport-3.0.0.tgz",
+ "integrity": "sha512-hsLVFE5SjA6TCisWeJXFKniGGOpBgMLmerfO2aCyCU5s7nJ/rpAepqmFifv/GCbSbueEeAJJnmSQ2rKC/g8Fcw==",
+ "requires": {
+ "debug": "^4.1.0",
+ "detect-node": "^2.0.4",
+ "hpack.js": "^2.1.6",
+ "obuf": "^1.1.2",
+ "readable-stream": "^3.0.6",
+ "wbuf": "^1.7.3"
+ }
+ },
+ "sprintf-js": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
+ "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g=="
+ },
+ "stable": {
+ "version": "0.1.8",
+ "resolved": "https://registry.npmjs.org/stable/-/stable-0.1.8.tgz",
+ "integrity": "sha512-ji9qxRnOVfcuLDySj9qzhGSEFVobyt1kIOSkj1qZzYLzq7Tos/oUUWvotUPQLlrsidqsK6tBH89Bc9kL5zHA6w=="
+ },
+ "state-toggle": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/state-toggle/-/state-toggle-1.0.3.tgz",
+ "integrity": "sha512-d/5Z4/2iiCnHw6Xzghyhb+GcmF89bxwgXG60wjIiZaxnymbyOmI8Hk4VqHXiVVp6u2ysaskFfXg3ekCj4WNftQ=="
+ },
+ "statuses": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz",
+ "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ=="
+ },
+ "std-env": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.3.3.tgz",
+ "integrity": "sha512-Rz6yejtVyWnVjC1RFvNmYL10kgjC49EOghxWn0RFqlCHGFpQx+Xe7yW3I4ceK1SGrWIGMjD5Kbue8W/udkbMJg=="
+ },
+ "string_decoder": {
+ "version": "1.3.0",
+ "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz",
+ "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==",
+ "requires": {
+ "safe-buffer": "~5.2.0"
+ }
+ },
+ "string-width": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz",
+ "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==",
+ "requires": {
+ "eastasianwidth": "^0.2.0",
+ "emoji-regex": "^9.2.2",
+ "strip-ansi": "^7.0.1"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
+ "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA=="
+ },
+ "strip-ansi": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
+ "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
+ "requires": {
+ "ansi-regex": "^6.0.1"
+ }
+ }
+ }
+ },
+ "stringify-object": {
+ "version": "3.3.0",
+ "resolved": "https://registry.npmjs.org/stringify-object/-/stringify-object-3.3.0.tgz",
+ "integrity": "sha512-rHqiFh1elqCQ9WPLIC8I0Q/g/wj5J1eMkyoiD6eoQApWHP0FtlK7rqnhmabL5VUY9JQCcqwwvlOaSuutekgyrw==",
+ "requires": {
+ "get-own-enumerable-property-symbols": "^3.0.0",
+ "is-obj": "^1.0.1",
+ "is-regexp": "^1.0.0"
+ }
+ },
+ "strip-ansi": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz",
+ "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==",
+ "requires": {
+ "ansi-regex": "^5.0.1"
+ }
+ },
+ "strip-bom-string": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/strip-bom-string/-/strip-bom-string-1.0.0.tgz",
+ "integrity": "sha512-uCC2VHvQRYu+lMh4My/sFNmF2klFymLX1wHJeXnbEJERpV/ZsVuonzerjfrGpIGF7LBVa1O7i9kjiWvJiFck8g=="
+ },
+ "strip-final-newline": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz",
+ "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA=="
+ },
+ "strip-json-comments": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz",
+ "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ=="
+ },
+ "style-to-object": {
+ "version": "0.3.0",
+ "resolved": "https://registry.npmjs.org/style-to-object/-/style-to-object-0.3.0.tgz",
+ "integrity": "sha512-CzFnRRXhzWIdItT3OmF8SQfWyahHhjq3HwcMNCNLn+N7klOOqPjMeG/4JSu77D7ypZdGvSzvkrbyeTMizz2VrA==",
+ "requires": {
+ "inline-style-parser": "0.1.1"
+ }
+ },
+ "stylehacks": {
+ "version": "5.1.1",
+ "resolved": "https://registry.npmjs.org/stylehacks/-/stylehacks-5.1.1.tgz",
+ "integrity": "sha512-sBpcd5Hx7G6seo7b1LkpttvTz7ikD0LlH5RmdcBNb6fFR0Fl7LQwHDFr300q4cwUqi+IYrFGmsIHieMBfnN/Bw==",
+ "requires": {
+ "browserslist": "^4.21.4",
+ "postcss-selector-parser": "^6.0.4"
+ }
+ },
+ "supports-color": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz",
+ "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==",
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ },
+ "supports-preserve-symlinks-flag": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz",
+ "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w=="
+ },
+ "svg-parser": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/svg-parser/-/svg-parser-2.0.4.tgz",
+ "integrity": "sha512-e4hG1hRwoOdRb37cIMSgzNsxyzKfayW6VOflrwvR+/bzrkyxY/31WkbgnQpgtrNp1SdpJvpUAGTa/ZoiPNDuRQ=="
+ },
+ "svgo": {
+ "version": "2.8.0",
+ "resolved": "https://registry.npmjs.org/svgo/-/svgo-2.8.0.tgz",
+ "integrity": "sha512-+N/Q9kV1+F+UeWYoSiULYo4xYSDQlTgb+ayMobAXPwMnLvop7oxKMo9OzIrX5x3eS4L4f2UHhc9axXwY8DpChg==",
+ "requires": {
+ "@trysound/sax": "0.2.0",
+ "commander": "^7.2.0",
+ "css-select": "^4.1.3",
+ "css-tree": "^1.1.3",
+ "csso": "^4.2.0",
+ "picocolors": "^1.0.0",
+ "stable": "^0.1.8"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz",
+ "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw=="
+ }
+ }
+ },
+ "sync-directory": {
+ "version": "5.1.9",
+ "resolved": "https://registry.npmjs.org/sync-directory/-/sync-directory-5.1.9.tgz",
+ "integrity": "sha512-0942RssO+NrIjDcaNiXUH/NQoAamURT1zpzN/uB8fgyetDM8NtPPOQNax3+BuNUfw/2JcaEXrAz567DokNq0lw==",
+ "requires": {
+ "chokidar": "^3.3.1",
+ "commander": "^6.2.0",
+ "fs-extra": "^7.0.1",
+ "is-absolute": "^1.0.0",
+ "readdir-enhanced": "^1.5.2"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz",
+ "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA=="
+ }
+ }
+ },
+ "tapable": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/tapable/-/tapable-2.2.1.tgz",
+ "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ=="
+ },
+ "terser": {
+ "version": "5.32.0",
+ "resolved": "https://registry.npmjs.org/terser/-/terser-5.32.0.tgz",
+ "integrity": "sha512-v3Gtw3IzpBJ0ugkxEX8U0W6+TnPKRRCWGh1jC/iM/e3Ki5+qvO1L1EAZ56bZasc64aXHwRHNIQEzm6//i5cemQ==",
+ "requires": {
+ "@jridgewell/source-map": "^0.3.3",
+ "acorn": "^8.8.2",
+ "commander": "^2.20.0",
+ "source-map-support": "~0.5.20"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "2.20.3",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-2.20.3.tgz",
+ "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ=="
+ }
+ }
+ },
+ "terser-webpack-plugin": {
+ "version": "5.3.10",
+ "resolved": "https://registry.npmjs.org/terser-webpack-plugin/-/terser-webpack-plugin-5.3.10.tgz",
+ "integrity": "sha512-BKFPWlPDndPs+NGGCr1U59t0XScL5317Y0UReNrHaw9/FwhPENlq6bfgs+4yPfyP51vqC1bQ4rp1EfXW5ZSH9w==",
+ "requires": {
+ "@jridgewell/trace-mapping": "^0.3.20",
+ "jest-worker": "^27.4.5",
+ "schema-utils": "^3.1.1",
+ "serialize-javascript": "^6.0.1",
+ "terser": "^5.26.0"
+ },
+ "dependencies": {
+ "jest-worker": {
+ "version": "27.5.1",
+ "resolved": "https://registry.npmjs.org/jest-worker/-/jest-worker-27.5.1.tgz",
+ "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==",
+ "requires": {
+ "@types/node": "*",
+ "merge-stream": "^2.0.0",
+ "supports-color": "^8.0.0"
+ }
+ },
+ "schema-utils": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz",
+ "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==",
+ "requires": {
+ "@types/json-schema": "^7.0.8",
+ "ajv": "^6.12.5",
+ "ajv-keywords": "^3.5.2"
+ }
+ },
+ "supports-color": {
+ "version": "8.1.1",
+ "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-8.1.1.tgz",
+ "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==",
+ "requires": {
+ "has-flag": "^4.0.0"
+ }
+ }
+ }
+ },
+ "text-table": {
+ "version": "0.2.0",
+ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz",
+ "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw=="
+ },
+ "thunky": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/thunky/-/thunky-1.1.0.tgz",
+ "integrity": "sha512-eHY7nBftgThBqOyHGVN+l8gF0BucP09fMo0oO/Lb0w1OF80dJv+lDVpXG60WMQvkcxAkNybKsrEIE3ZtKGmPrA=="
+ },
+ "tiny-invariant": {
+ "version": "1.3.1",
+ "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.1.tgz",
+ "integrity": "sha512-AD5ih2NlSssTCwsMznbvwMZpJ1cbhkGd2uueNxzv2jDlEeZdU04JQfRnggJQ8DrcVBGjAsCKwFBbDlVNtEMlzw=="
+ },
+ "tiny-warning": {
+ "version": "1.0.3",
+ "resolved": "https://registry.npmjs.org/tiny-warning/-/tiny-warning-1.0.3.tgz",
+ "integrity": "sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA=="
+ },
+ "to-fast-properties": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/to-fast-properties/-/to-fast-properties-2.0.0.tgz",
+ "integrity": "sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog=="
+ },
+ "to-readable-stream": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/to-readable-stream/-/to-readable-stream-1.0.0.tgz",
+ "integrity": "sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q=="
+ },
+ "to-regex-range": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
+ "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
+ "requires": {
+ "is-number": "^7.0.0"
+ }
+ },
+ "toidentifier": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz",
+ "integrity": "sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA=="
+ },
+ "toposort": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/toposort/-/toposort-2.0.2.tgz",
+ "integrity": "sha512-0a5EOkAUp8D4moMi2W8ZF8jcga7BgZd91O/yabJCFY8az+XSzeGyTKs0Aoo897iV1Nj6guFq8orWDS96z91oGg=="
+ },
+ "totalist": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/totalist/-/totalist-1.1.0.tgz",
+ "integrity": "sha512-gduQwd1rOdDMGxFG1gEvhV88Oirdo2p+KjoYFU7k2g+i7n6AFFbDQ5kMPUsW0pNbfQsB/cwXvT1i4Bue0s9g5g=="
+ },
+ "trim": {
+ "version": "0.0.1",
+ "resolved": "https://registry.npmjs.org/trim/-/trim-0.0.1.tgz",
+ "integrity": "sha512-YzQV+TZg4AxpKxaTHK3c3D+kRDCGVEE7LemdlQZoQXn0iennk10RsIoY6ikzAqJTc9Xjl9C1/waHom/J86ziAQ=="
+ },
+ "trim-trailing-lines": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/trim-trailing-lines/-/trim-trailing-lines-1.1.4.tgz",
+ "integrity": "sha512-rjUWSqnfTNrjbB9NQWfPMH/xRK1deHeGsHoVfpxJ++XeYXE0d6B1En37AHfw3jtfTU7dzMzZL2jjpe8Qb5gLIQ=="
+ },
+ "trough": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/trough/-/trough-1.0.5.tgz",
+ "integrity": "sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA=="
+ },
+ "tslib": {
+ "version": "2.5.3",
+ "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.5.3.tgz",
+ "integrity": "sha512-mSxlJJwl3BMEQCUNnxXBU9jP4JBktcEGhURcPR6VQVlnP0FdDEsIaz0C35dXNGLyRfrATNofF0F5p2KPxQgB+w=="
+ },
+ "type-fest": {
+ "version": "2.19.0",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-2.19.0.tgz",
+ "integrity": "sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA=="
+ },
+ "type-is": {
+ "version": "1.6.18",
+ "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz",
+ "integrity": "sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==",
+ "requires": {
+ "media-typer": "0.3.0",
+ "mime-types": "~2.1.24"
+ },
+ "dependencies": {
+ "mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="
+ },
+ "mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "requires": {
+ "mime-db": "1.52.0"
+ }
+ }
+ }
+ },
+ "typedarray-to-buffer": {
+ "version": "3.1.5",
+ "resolved": "https://registry.npmjs.org/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz",
+ "integrity": "sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q==",
+ "requires": {
+ "is-typedarray": "^1.0.0"
+ }
+ },
+ "typescript": {
+ "version": "5.1.3",
+ "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.1.3.tgz",
+ "integrity": "sha512-XH627E9vkeqhlZFQuL+UsyAXEnibT0kWR2FWONlr4sTjvxyJYnyefgrkyECLzM5NenmKzRAy2rR/OlYLA1HkZw==",
+ "peer": true
+ },
+ "uglify-js": {
+ "version": "3.17.4",
+ "resolved": "https://registry.npmjs.org/uglify-js/-/uglify-js-3.17.4.tgz",
+ "integrity": "sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g==",
+ "optional": true
+ },
+ "unc-path-regex": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/unc-path-regex/-/unc-path-regex-0.1.2.tgz",
+ "integrity": "sha512-eXL4nmJT7oCpkZsHZUOJo8hcX3GbsiDOa0Qu9F646fi8dT3XuSVopVqAcEiVzSKKH7UoDti23wNX3qGFxcW5Qg=="
+ },
+ "unherit": {
+ "version": "1.1.3",
+ "resolved": "https://registry.npmjs.org/unherit/-/unherit-1.1.3.tgz",
+ "integrity": "sha512-Ft16BJcnapDKp0+J/rqFC3Rrk6Y/Ng4nzsC028k2jdDII/rdZ7Wd3pPT/6+vIIxRagwRc9K0IUX0Ra4fKvw+WQ==",
+ "requires": {
+ "inherits": "^2.0.0",
+ "xtend": "^4.0.0"
+ }
+ },
+ "unicode-canonical-property-names-ecmascript": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.0.tgz",
+ "integrity": "sha512-yY5PpDlfVIU5+y/BSCxAJRBIS1Zc2dDG3Ujq+sR0U+JjUevW2JhocOF+soROYDSaAezOzOKuyyixhD6mBknSmQ=="
+ },
+ "unicode-match-property-ecmascript": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz",
+ "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==",
+ "requires": {
+ "unicode-canonical-property-names-ecmascript": "^2.0.0",
+ "unicode-property-aliases-ecmascript": "^2.0.0"
+ }
+ },
+ "unicode-match-property-value-ecmascript": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.1.0.tgz",
+ "integrity": "sha512-qxkjQt6qjg/mYscYMC0XKRn3Rh0wFPlfxB0xkt9CfyTvpX1Ra0+rAmdX2QyAobptSEvuy4RtpPRui6XkV+8wjA=="
+ },
+ "unicode-property-aliases-ecmascript": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz",
+ "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w=="
+ },
+ "unified": {
+ "version": "9.2.2",
+ "resolved": "https://registry.npmjs.org/unified/-/unified-9.2.2.tgz",
+ "integrity": "sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==",
+ "requires": {
+ "bail": "^1.0.0",
+ "extend": "^3.0.0",
+ "is-buffer": "^2.0.0",
+ "is-plain-obj": "^2.0.0",
+ "trough": "^1.0.0",
+ "vfile": "^4.0.0"
+ }
+ },
+ "unique-string": {
+ "version": "2.0.0",
+ "resolved": "https://registry.npmjs.org/unique-string/-/unique-string-2.0.0.tgz",
+ "integrity": "sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg==",
+ "requires": {
+ "crypto-random-string": "^2.0.0"
+ }
+ },
+ "unist-builder": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/unist-builder/-/unist-builder-2.0.3.tgz",
+ "integrity": "sha512-f98yt5pnlMWlzP539tPc4grGMsFaQQlP/vM396b00jngsiINumNmsY8rkXjfoi1c6QaM8nQ3vaGDuoKWbe/1Uw=="
+ },
+ "unist-util-generated": {
+ "version": "1.1.6",
+ "resolved": "https://registry.npmjs.org/unist-util-generated/-/unist-util-generated-1.1.6.tgz",
+ "integrity": "sha512-cln2Mm1/CZzN5ttGK7vkoGw+RZ8VcUH6BtGbq98DDtRGquAAOXig1mrBQYelOwMXYS8rK+vZDyyojSjp7JX+Lg=="
+ },
+ "unist-util-is": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-is/-/unist-util-is-4.1.0.tgz",
+ "integrity": "sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg=="
+ },
+ "unist-util-position": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-position/-/unist-util-position-3.1.0.tgz",
+ "integrity": "sha512-w+PkwCbYSFw8vpgWD0v7zRCl1FpY3fjDSQ3/N/wNd9Ffa4gPi8+4keqt99N3XW6F99t/mUzp2xAhNmfKWp95QA=="
+ },
+ "unist-util-remove": {
+ "version": "2.1.0",
+ "resolved": "https://registry.npmjs.org/unist-util-remove/-/unist-util-remove-2.1.0.tgz",
+ "integrity": "sha512-J8NYPyBm4baYLdCbjmf1bhPu45Cr1MWTm77qd9istEkzWpnN6O9tMsEbB2JhNnBCqGENRqEWomQ+He6au0B27Q==",
+ "requires": {
+ "unist-util-is": "^4.0.0"
+ }
+ },
+ "unist-util-remove-position": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/unist-util-remove-position/-/unist-util-remove-position-2.0.1.tgz",
+ "integrity": "sha512-fDZsLYIe2uT+oGFnuZmy73K6ZxOPG/Qcm+w7jbEjaFcJgbQ6cqjs/eSPzXhsmGpAsWPkqZM9pYjww5QTn3LHMA==",
+ "requires": {
+ "unist-util-visit": "^2.0.0"
+ }
+ },
+ "unist-util-stringify-position": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz",
+ "integrity": "sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==",
+ "requires": {
+ "@types/unist": "^2.0.2"
+ }
+ },
+ "unist-util-visit": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/unist-util-visit/-/unist-util-visit-2.0.3.tgz",
+ "integrity": "sha512-iJ4/RczbJMkD0712mGktuGpm/U4By4FfDonL7N/9tATGIF4imikjOuagyMY53tnZq3NP6BcmlrHhEKAfGWjh7Q==",
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^4.0.0",
+ "unist-util-visit-parents": "^3.0.0"
+ }
+ },
+ "unist-util-visit-parents": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz",
+ "integrity": "sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==",
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-is": "^4.0.0"
+ }
+ },
+ "universalify": {
+ "version": "0.1.2",
+ "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
+ "integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg=="
+ },
+ "unpipe": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/unpipe/-/unpipe-1.0.0.tgz",
+ "integrity": "sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ=="
+ },
+ "update-browserslist-db": {
+ "version": "1.1.0",
+ "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz",
+ "integrity": "sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==",
+ "requires": {
+ "escalade": "^3.1.2",
+ "picocolors": "^1.0.1"
+ }
+ },
+ "update-notifier": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-5.1.0.tgz",
+ "integrity": "sha512-ItnICHbeMh9GqUy31hFPrD1kcuZ3rpxDZbf4KUDavXwS0bW5m7SLbDQpGX3UYr072cbrF5hFUs3r5tUsPwjfHw==",
+ "requires": {
+ "boxen": "^5.0.0",
+ "chalk": "^4.1.0",
+ "configstore": "^5.0.1",
+ "has-yarn": "^2.1.0",
+ "import-lazy": "^2.1.0",
+ "is-ci": "^2.0.0",
+ "is-installed-globally": "^0.4.0",
+ "is-npm": "^5.0.0",
+ "is-yarn-global": "^0.3.0",
+ "latest-version": "^5.1.0",
+ "pupa": "^2.1.1",
+ "semver": "^7.3.4",
+ "semver-diff": "^3.1.1",
+ "xdg-basedir": "^4.0.0"
+ },
+ "dependencies": {
+ "boxen": {
+ "version": "5.1.2",
+ "resolved": "https://registry.npmjs.org/boxen/-/boxen-5.1.2.tgz",
+ "integrity": "sha512-9gYgQKXx+1nP8mP7CzFyaUARhg7D3n1dF/FnErWmu9l6JvGpNUN278h0aSb+QjoiKSWG+iZ3uHrcqk0qrY9RQQ==",
+ "requires": {
+ "ansi-align": "^3.0.0",
+ "camelcase": "^6.2.0",
+ "chalk": "^4.1.0",
+ "cli-boxes": "^2.2.1",
+ "string-width": "^4.2.2",
+ "type-fest": "^0.20.2",
+ "widest-line": "^3.1.0",
+ "wrap-ansi": "^7.0.0"
+ }
+ },
+ "cli-boxes": {
+ "version": "2.2.1",
+ "resolved": "https://registry.npmjs.org/cli-boxes/-/cli-boxes-2.2.1.tgz",
+ "integrity": "sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw=="
+ },
+ "emoji-regex": {
+ "version": "8.0.0",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz",
+ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="
+ },
+ "string-width": {
+ "version": "4.2.3",
+ "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz",
+ "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==",
+ "requires": {
+ "emoji-regex": "^8.0.0",
+ "is-fullwidth-code-point": "^3.0.0",
+ "strip-ansi": "^6.0.1"
+ }
+ },
+ "type-fest": {
+ "version": "0.20.2",
+ "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz",
+ "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ=="
+ },
+ "widest-line": {
+ "version": "3.1.0",
+ "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-3.1.0.tgz",
+ "integrity": "sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg==",
+ "requires": {
+ "string-width": "^4.0.0"
+ }
+ },
+ "wrap-ansi": {
+ "version": "7.0.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
+ "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==",
+ "requires": {
+ "ansi-styles": "^4.0.0",
+ "string-width": "^4.1.0",
+ "strip-ansi": "^6.0.0"
+ }
+ }
+ }
+ },
+ "uri-js": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
+ "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
+ "requires": {
+ "punycode": "^2.1.0"
+ },
+ "dependencies": {
+ "punycode": {
+ "version": "2.3.0",
+ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz",
+ "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA=="
+ }
+ }
+ },
+ "url-loader": {
+ "version": "4.1.1",
+ "resolved": "https://registry.npmjs.org/url-loader/-/url-loader-4.1.1.tgz",
+ "integrity": "sha512-3BTV812+AVHHOJQO8O5MkWgZ5aosP7GnROJwvzLS9hWDj00lZ6Z0wNak423Lp9PBZN05N+Jk/N5Si8jRAlGyWA==",
+ "requires": {
+ "loader-utils": "^2.0.0",
+ "mime-types": "^2.1.27",
+ "schema-utils": "^3.0.0"
+ },
+ "dependencies": {
+ "mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="
+ },
+ "mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "requires": {
+ "mime-db": "1.52.0"
+ }
+ },
+ "schema-utils": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz",
+ "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==",
+ "requires": {
+ "@types/json-schema": "^7.0.8",
+ "ajv": "^6.12.5",
+ "ajv-keywords": "^3.5.2"
+ }
+ }
+ }
+ },
+ "url-parse-lax": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/url-parse-lax/-/url-parse-lax-3.0.0.tgz",
+ "integrity": "sha512-NjFKA0DidqPa5ciFcSrXnAltTtzz84ogy+NebPvfEgAck0+TNg4UJ4IN+fB7zRZfbgUf0syOo9MDxFkDSMuFaQ==",
+ "requires": {
+ "prepend-http": "^2.0.0"
+ }
+ },
+ "util": {
+ "version": "0.10.4",
+ "resolved": "https://registry.npmjs.org/util/-/util-0.10.4.tgz",
+ "integrity": "sha512-0Pm9hTQ3se5ll1XihRic3FDIku70C+iHUdT/W926rSgHV5QgXsYbKZN8MSC3tJtSkhuROzvsQjAaFENRXr+19A==",
+ "requires": {
+ "inherits": "2.0.3"
+ },
+ "dependencies": {
+ "inherits": {
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz",
+ "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw=="
+ }
+ }
+ },
+ "util-deprecate": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz",
+ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw=="
+ },
+ "utila": {
+ "version": "0.4.0",
+ "resolved": "https://registry.npmjs.org/utila/-/utila-0.4.0.tgz",
+ "integrity": "sha512-Z0DbgELS9/L/75wZbro8xAnT50pBVFQZ+hUEueGDU5FN51YSCYM+jdxsfCiHjwNP/4LCDD0i/graKpeBnOXKRA=="
+ },
+ "utility-types": {
+ "version": "3.10.0",
+ "resolved": "https://registry.npmjs.org/utility-types/-/utility-types-3.10.0.tgz",
+ "integrity": "sha512-O11mqxmi7wMKCo6HKFt5AhO4BwY3VV68YU07tgxfz8zJTIxr4BpsezN49Ffwy9j3ZpwwJp4fkRwjRzq3uWE6Rg=="
+ },
+ "utils-merge": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
+ "integrity": "sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA=="
+ },
+ "uuid": {
+ "version": "8.3.2",
+ "resolved": "https://registry.npmjs.org/uuid/-/uuid-8.3.2.tgz",
+ "integrity": "sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg=="
+ },
+ "value-equal": {
+ "version": "1.0.1",
+ "resolved": "https://registry.npmjs.org/value-equal/-/value-equal-1.0.1.tgz",
+ "integrity": "sha512-NOJ6JZCAWr0zlxZt+xqCHNTEKOsrks2HQd4MqhP1qy4z1SkbEP467eNx6TgDKXMvUOb+OENfJCZwM+16n7fRfw=="
+ },
+ "vary": {
+ "version": "1.1.2",
+ "resolved": "https://registry.npmjs.org/vary/-/vary-1.1.2.tgz",
+ "integrity": "sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg=="
+ },
+ "vfile": {
+ "version": "4.2.1",
+ "resolved": "https://registry.npmjs.org/vfile/-/vfile-4.2.1.tgz",
+ "integrity": "sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==",
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "is-buffer": "^2.0.0",
+ "unist-util-stringify-position": "^2.0.0",
+ "vfile-message": "^2.0.0"
+ }
+ },
+ "vfile-location": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/vfile-location/-/vfile-location-3.2.0.tgz",
+ "integrity": "sha512-aLEIZKv/oxuCDZ8lkJGhuhztf/BW4M+iHdCwglA/eWc+vtuRFJj8EtgceYFX4LRjOhCAAiNHsKGssC6onJ+jbA=="
+ },
+ "vfile-message": {
+ "version": "2.0.4",
+ "resolved": "https://registry.npmjs.org/vfile-message/-/vfile-message-2.0.4.tgz",
+ "integrity": "sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==",
+ "requires": {
+ "@types/unist": "^2.0.0",
+ "unist-util-stringify-position": "^2.0.0"
+ }
+ },
+ "wait-on": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/wait-on/-/wait-on-6.0.1.tgz",
+ "integrity": "sha512-zht+KASY3usTY5u2LgaNqn/Cd8MukxLGjdcZxT2ns5QzDmTFc4XoWBgC+C/na+sMRZTuVygQoMYwdcVjHnYIVw==",
+ "requires": {
+ "axios": "^0.25.0",
+ "joi": "^17.6.0",
+ "lodash": "^4.17.21",
+ "minimist": "^1.2.5",
+ "rxjs": "^7.5.4"
+ }
+ },
+ "watchpack": {
+ "version": "2.4.2",
+ "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-2.4.2.tgz",
+ "integrity": "sha512-TnbFSbcOCcDgjZ4piURLCbJ3nJhznVh9kw6F6iokjiFPl8ONxe9A6nMDVXDiNbrSfLILs6vB07F7wLBrwPYzJw==",
+ "requires": {
+ "glob-to-regexp": "^0.4.1",
+ "graceful-fs": "^4.1.2"
+ },
+ "dependencies": {
+ "glob-to-regexp": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz",
+ "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw=="
+ }
+ }
+ },
+ "wbuf": {
+ "version": "1.7.3",
+ "resolved": "https://registry.npmjs.org/wbuf/-/wbuf-1.7.3.tgz",
+ "integrity": "sha512-O84QOnr0icsbFGLS0O3bI5FswxzRr8/gHwWkDlQFskhSPryQXvrTMxjxGP4+iWYoauLoBvfDpkrOauZ+0iZpDA==",
+ "requires": {
+ "minimalistic-assert": "^1.0.0"
+ }
+ },
+ "web-namespaces": {
+ "version": "1.1.4",
+ "resolved": "https://registry.npmjs.org/web-namespaces/-/web-namespaces-1.1.4.tgz",
+ "integrity": "sha512-wYxSGajtmoP4WxfejAPIr4l0fVh+jeMXZb08wNc0tMg6xsfZXj3cECqIK0G7ZAqUq0PP8WlMDtaOGVBTAWztNw=="
+ },
+ "webpack": {
+ "version": "5.94.0",
+ "resolved": "https://registry.npmjs.org/webpack/-/webpack-5.94.0.tgz",
+ "integrity": "sha512-KcsGn50VT+06JH/iunZJedYGUJS5FGjow8wb9c0v5n1Om8O1g4L6LjtfxwlXIATopoQu+vOXXa7gYisWxCoPyg==",
+ "requires": {
+ "@types/estree": "^1.0.5",
+ "@webassemblyjs/ast": "^1.12.1",
+ "@webassemblyjs/wasm-edit": "^1.12.1",
+ "@webassemblyjs/wasm-parser": "^1.12.1",
+ "acorn": "^8.7.1",
+ "acorn-import-attributes": "^1.9.5",
+ "browserslist": "^4.21.10",
+ "chrome-trace-event": "^1.0.2",
+ "enhanced-resolve": "^5.17.1",
+ "es-module-lexer": "^1.2.1",
+ "eslint-scope": "5.1.1",
+ "events": "^3.2.0",
+ "glob-to-regexp": "^0.4.1",
+ "graceful-fs": "^4.2.11",
+ "json-parse-even-better-errors": "^2.3.1",
+ "loader-runner": "^4.2.0",
+ "mime-types": "^2.1.27",
+ "neo-async": "^2.6.2",
+ "schema-utils": "^3.2.0",
+ "tapable": "^2.1.1",
+ "terser-webpack-plugin": "^5.3.10",
+ "watchpack": "^2.4.1",
+ "webpack-sources": "^3.2.3"
+ },
+ "dependencies": {
+ "glob-to-regexp": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz",
+ "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw=="
+ },
+ "mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="
+ },
+ "mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "requires": {
+ "mime-db": "1.52.0"
+ }
+ },
+ "schema-utils": {
+ "version": "3.2.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-3.2.0.tgz",
+ "integrity": "sha512-0zTyLGyDJYd/MBxG1AhJkKa6fpEBds4OQO2ut0w7OYG+ZGhGea09lijvzsqegYSik88zc7cUtIlnnO+/BvD6gQ==",
+ "requires": {
+ "@types/json-schema": "^7.0.8",
+ "ajv": "^6.12.5",
+ "ajv-keywords": "^3.5.2"
+ }
+ }
+ }
+ },
+ "webpack-bundle-analyzer": {
+ "version": "4.9.0",
+ "resolved": "https://registry.npmjs.org/webpack-bundle-analyzer/-/webpack-bundle-analyzer-4.9.0.tgz",
+ "integrity": "sha512-+bXGmO1LyiNx0i9enBu3H8mv42sj/BJWhZNFwjz92tVnBa9J3JMGo2an2IXlEleoDOPn/Hofl5hr/xCpObUDtw==",
+ "requires": {
+ "@discoveryjs/json-ext": "0.5.7",
+ "acorn": "^8.0.4",
+ "acorn-walk": "^8.0.0",
+ "chalk": "^4.1.0",
+ "commander": "^7.2.0",
+ "gzip-size": "^6.0.0",
+ "lodash": "^4.17.20",
+ "opener": "^1.5.2",
+ "sirv": "^1.0.7",
+ "ws": "^7.3.1"
+ },
+ "dependencies": {
+ "commander": {
+ "version": "7.2.0",
+ "resolved": "https://registry.npmjs.org/commander/-/commander-7.2.0.tgz",
+ "integrity": "sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw=="
+ }
+ }
+ },
+ "webpack-dev-middleware": {
+ "version": "5.3.4",
+ "resolved": "https://registry.npmjs.org/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz",
+ "integrity": "sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q==",
+ "requires": {
+ "colorette": "^2.0.10",
+ "memfs": "^3.4.3",
+ "mime-types": "^2.1.31",
+ "range-parser": "^1.2.1",
+ "schema-utils": "^4.0.0"
+ },
+ "dependencies": {
+ "ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "ajv-keywords": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
+ "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
+ "requires": {
+ "fast-deep-equal": "^3.1.3"
+ }
+ },
+ "json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "mime-db": {
+ "version": "1.52.0",
+ "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz",
+ "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="
+ },
+ "mime-types": {
+ "version": "2.1.35",
+ "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz",
+ "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==",
+ "requires": {
+ "mime-db": "1.52.0"
+ }
+ },
+ "range-parser": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/range-parser/-/range-parser-1.2.1.tgz",
+ "integrity": "sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg=="
+ },
+ "schema-utils": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz",
+ "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==",
+ "requires": {
+ "@types/json-schema": "^7.0.9",
+ "ajv": "^8.9.0",
+ "ajv-formats": "^2.1.1",
+ "ajv-keywords": "^5.1.0"
+ }
+ }
+ }
+ },
+ "webpack-dev-server": {
+ "version": "4.15.0",
+ "resolved": "https://registry.npmjs.org/webpack-dev-server/-/webpack-dev-server-4.15.0.tgz",
+ "integrity": "sha512-HmNB5QeSl1KpulTBQ8UT4FPrByYyaLxpJoQ0+s7EvUrMc16m0ZS1sgb1XGqzmgCPk0c9y+aaXxn11tbLzuM7NQ==",
+ "requires": {
+ "@types/bonjour": "^3.5.9",
+ "@types/connect-history-api-fallback": "^1.3.5",
+ "@types/express": "^4.17.13",
+ "@types/serve-index": "^1.9.1",
+ "@types/serve-static": "^1.13.10",
+ "@types/sockjs": "^0.3.33",
+ "@types/ws": "^8.5.1",
+ "ansi-html-community": "^0.0.8",
+ "bonjour-service": "^1.0.11",
+ "chokidar": "^3.5.3",
+ "colorette": "^2.0.10",
+ "compression": "^1.7.4",
+ "connect-history-api-fallback": "^2.0.0",
+ "default-gateway": "^6.0.3",
+ "express": "^4.17.3",
+ "graceful-fs": "^4.2.6",
+ "html-entities": "^2.3.2",
+ "http-proxy-middleware": "^2.0.3",
+ "ipaddr.js": "^2.0.1",
+ "launch-editor": "^2.6.0",
+ "open": "^8.0.9",
+ "p-retry": "^4.5.0",
+ "rimraf": "^3.0.2",
+ "schema-utils": "^4.0.0",
+ "selfsigned": "^2.1.1",
+ "serve-index": "^1.9.1",
+ "sockjs": "^0.3.24",
+ "spdy": "^4.0.2",
+ "webpack-dev-middleware": "^5.3.1",
+ "ws": "^8.13.0"
+ },
+ "dependencies": {
+ "ajv": {
+ "version": "8.12.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-8.12.0.tgz",
+ "integrity": "sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==",
+ "requires": {
+ "fast-deep-equal": "^3.1.1",
+ "json-schema-traverse": "^1.0.0",
+ "require-from-string": "^2.0.2",
+ "uri-js": "^4.2.2"
+ }
+ },
+ "ajv-keywords": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/ajv-keywords/-/ajv-keywords-5.1.0.tgz",
+ "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==",
+ "requires": {
+ "fast-deep-equal": "^3.1.3"
+ }
+ },
+ "json-schema-traverse": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz",
+ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="
+ },
+ "schema-utils": {
+ "version": "4.1.0",
+ "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-4.1.0.tgz",
+ "integrity": "sha512-Jw+GZVbP5IggB2WAn6UHI02LBwGmsIeYN/lNbSMZyDziQ7jmtAUrqKqDja+W89YHVs+KL/3IkIMltAklqB1vAw==",
+ "requires": {
+ "@types/json-schema": "^7.0.9",
+ "ajv": "^8.9.0",
+ "ajv-formats": "^2.1.1",
+ "ajv-keywords": "^5.1.0"
+ }
+ },
+ "ws": {
+ "version": "8.18.0",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-8.18.0.tgz",
+ "integrity": "sha512-8VbfWfHLbbwu3+N6OKsOMpBdT4kXPDDB9cJk2bJ6mh9ucxdlnNvH1e+roYkKmN9Nxw2yjz7VzeO9oOz2zJ04Pw==",
+ "requires": {}
+ }
+ }
+ },
+ "webpack-merge": {
+ "version": "5.9.0",
+ "resolved": "https://registry.npmjs.org/webpack-merge/-/webpack-merge-5.9.0.tgz",
+ "integrity": "sha512-6NbRQw4+Sy50vYNTw7EyOn41OZItPiXB8GNv3INSoe3PSFaHJEz3SHTrYVaRm2LilNGnFUzh0FAwqPEmU/CwDg==",
+ "requires": {
+ "clone-deep": "^4.0.1",
+ "wildcard": "^2.0.0"
+ }
+ },
+ "webpack-sources": {
+ "version": "3.2.3",
+ "resolved": "https://registry.npmjs.org/webpack-sources/-/webpack-sources-3.2.3.tgz",
+ "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w=="
+ },
+ "webpackbar": {
+ "version": "5.0.2",
+ "resolved": "https://registry.npmjs.org/webpackbar/-/webpackbar-5.0.2.tgz",
+ "integrity": "sha512-BmFJo7veBDgQzfWXl/wwYXr/VFus0614qZ8i9znqcl9fnEdiVkdbi0TedLQ6xAK92HZHDJ0QmyQ0fmuZPAgCYQ==",
+ "requires": {
+ "chalk": "^4.1.0",
+ "consola": "^2.15.3",
+ "pretty-time": "^1.1.0",
+ "std-env": "^3.0.1"
+ }
+ },
+ "websocket-driver": {
+ "version": "0.7.4",
+ "resolved": "https://registry.npmjs.org/websocket-driver/-/websocket-driver-0.7.4.tgz",
+ "integrity": "sha512-b17KeDIQVjvb0ssuSDF2cYXSg2iztliJ4B9WdsuB6J952qCPKmnVq4DyW5motImXHDC1cBT/1UezrJVsKw5zjg==",
+ "requires": {
+ "http-parser-js": ">=0.5.1",
+ "safe-buffer": ">=5.1.0",
+ "websocket-extensions": ">=0.1.1"
+ }
+ },
+ "websocket-extensions": {
+ "version": "0.1.4",
+ "resolved": "https://registry.npmjs.org/websocket-extensions/-/websocket-extensions-0.1.4.tgz",
+ "integrity": "sha512-OqedPIGOfsDlo31UNwYbCFMSaO9m9G/0faIHj5/dZFDMFqPTcx6UwqyOy3COEaEOg/9VsGIpdqn62W5KhoKSpg=="
+ },
+ "which": {
+ "version": "2.0.2",
+ "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
+ "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
+ "requires": {
+ "isexe": "^2.0.0"
+ }
+ },
+ "widest-line": {
+ "version": "4.0.1",
+ "resolved": "https://registry.npmjs.org/widest-line/-/widest-line-4.0.1.tgz",
+ "integrity": "sha512-o0cyEG0e8GPzT4iGHphIOh0cJOV8fivsXxddQasHPHfoZf1ZexrfeA21w2NaEN1RHE+fXlfISmOE8R9N3u3Qig==",
+ "requires": {
+ "string-width": "^5.0.1"
+ }
+ },
+ "wildcard": {
+ "version": "2.0.1",
+ "resolved": "https://registry.npmjs.org/wildcard/-/wildcard-2.0.1.tgz",
+ "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ=="
+ },
+ "wordwrap": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/wordwrap/-/wordwrap-1.0.0.tgz",
+ "integrity": "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q=="
+ },
+ "wrap-ansi": {
+ "version": "8.1.0",
+ "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz",
+ "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==",
+ "requires": {
+ "ansi-styles": "^6.1.0",
+ "string-width": "^5.0.1",
+ "strip-ansi": "^7.0.1"
+ },
+ "dependencies": {
+ "ansi-regex": {
+ "version": "6.0.1",
+ "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.0.1.tgz",
+ "integrity": "sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA=="
+ },
+ "ansi-styles": {
+ "version": "6.2.1",
+ "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz",
+ "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug=="
+ },
+ "strip-ansi": {
+ "version": "7.1.0",
+ "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.0.tgz",
+ "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==",
+ "requires": {
+ "ansi-regex": "^6.0.1"
+ }
+ }
+ }
+ },
+ "wrappy": {
+ "version": "1.0.2",
+ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
+ "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
+ },
+ "write-file-atomic": {
+ "version": "3.0.3",
+ "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz",
+ "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==",
+ "requires": {
+ "imurmurhash": "^0.1.4",
+ "is-typedarray": "^1.0.0",
+ "signal-exit": "^3.0.2",
+ "typedarray-to-buffer": "^3.1.5"
+ }
+ },
+ "ws": {
+ "version": "7.5.10",
+ "resolved": "https://registry.npmjs.org/ws/-/ws-7.5.10.tgz",
+ "integrity": "sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==",
+ "requires": {}
+ },
+ "xdg-basedir": {
+ "version": "4.0.0",
+ "resolved": "https://registry.npmjs.org/xdg-basedir/-/xdg-basedir-4.0.0.tgz",
+ "integrity": "sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q=="
+ },
+ "xtend": {
+ "version": "4.0.2",
+ "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz",
+ "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ=="
+ },
+ "yallist": {
+ "version": "3.1.1",
+ "resolved": "https://registry.npmjs.org/yallist/-/yallist-3.1.1.tgz",
+ "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g=="
+ },
+ "yaml": {
+ "version": "1.10.2",
+ "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz",
+ "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg=="
+ },
+ "yocto-queue": {
+ "version": "0.1.0",
+ "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz",
+ "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q=="
+ },
+ "yup": {
+ "version": "0.32.11",
+ "resolved": "https://registry.npmjs.org/yup/-/yup-0.32.11.tgz",
+ "integrity": "sha512-Z2Fe1bn+eLstG8DRR6FTavGD+MeAwyfmouhHsIUgaADz8jvFKbO/fXc2trJKZg+5EBjh4gGm3iU/t3onKlXHIg==",
+ "requires": {
+ "@babel/runtime": "^7.15.4",
+ "@types/lodash": "^4.14.175",
+ "lodash": "^4.17.21",
+ "lodash-es": "^4.17.21",
+ "nanoclone": "^0.2.1",
+ "property-expr": "^2.0.4",
+ "toposort": "^2.0.2"
+ }
+ },
+ "zwitch": {
+ "version": "1.0.5",
+ "resolved": "https://registry.npmjs.org/zwitch/-/zwitch-1.0.5.tgz",
+ "integrity": "sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw=="
+ }
+ }
+}
diff --git a/documentation/package.json b/documentation/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..0ad8738bed36b4bae7089d22a762d6247dfdd133
--- /dev/null
+++ b/documentation/package.json
@@ -0,0 +1,14 @@
+{
+ "name": "h2o-llmstudio-docs",
+ "version": "0.0.0",
+ "scripts": {
+ "start": "makersaurus start",
+ "build": "makersaurus build",
+ "serve": "makersaurus serve",
+ "deploy": "makersaurus deploy"
+ },
+ "dependencies": {
+ "@h2oai/makersaurus": "^0.8.4",
+ "docusaurus-plugin-includes": "^1.1.4"
+ }
+}
diff --git a/documentation/sidebars.js b/documentation/sidebars.js
new file mode 100644
index 0000000000000000000000000000000000000000..41afe8a555bc40f4d08acfdd55de39accd8bda73
--- /dev/null
+++ b/documentation/sidebars.js
@@ -0,0 +1,56 @@
+module.exports = {
+ defaultSidebar: [
+ "index",
+ {
+ "Get started": [
+ "get-started/what-is-h2o-llm-studio",
+ "get-started/set-up-llm-studio",
+ "get-started/llm-studio-performance",
+ "get-started/llm-studio-flow",
+ "get-started/core-features",
+ "get-started/videos",
+ ],
+ },
+ {
+ type: "category",
+ label: "Tutorials",
+ items: [
+ "tutorials/tutorial-1a/tutorial-1a",
+ "tutorials/tutorial-2a/tutorial-2a",
+ ],
+ },
+ "concepts",
+ {
+ type: "category",
+ label: "Guide",
+ items: [
+ {
+ type: "category",
+ label: "Datasets",
+ items: [
+ "guide/datasets/data-connectors-format",
+ "guide/datasets/import-dataset",
+ "guide/datasets/view-dataset",
+ "guide/datasets/merge-datasets",
+ ],
+ },
+ {
+ type: "category",
+ label: "Experiments",
+ items: [
+ "guide/experiments/supported-problem-types",
+ "guide/experiments/experiment-settings",
+ "guide/experiments/create-an-experiment",
+ "guide/experiments/view-an-experiment",
+ "guide/experiments/compare-experiments",
+ "guide/experiments/export-trained-model",
+ "guide/experiments/import-to-h2ogpt",
+ "guide/experiments/evaluate-model-using-llm",
+ ],
+ },
+ ],
+ },
+ "faqs",
+ ],
+};
+
diff --git a/examples/example_oasst2.yaml b/examples/example_oasst2.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0840e0baeb2f324326f897a536973fcd4ce7e902
--- /dev/null
+++ b/examples/example_oasst2.yaml
@@ -0,0 +1,105 @@
+architecture:
+ backbone_dtype: int4
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: true
+ add_eos_token_to_prompt: true
+ add_eos_token_to_system: true
+ answer_column: output
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.01
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ only_last_answer: false
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: <|answer|>
+ text_prompt_start: <|prompt|>
+ text_system_start: <|system|>
+ train_dataframe: examples/data_oasst2/train_full.csv
+ validation_dataframe: None
+ validation_size: 0.01
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ deepspeed_allgather_bucket_size: 1000000
+ deepspeed_method: ZeRO2
+ deepspeed_reduce_bucket_size: 1000000
+ deepspeed_stage3_param_persistence_threshold: 1000000
+ deepspeed_stage3_prefetch_bucket_size: 1000000
+ find_unused_parameters: false
+ gpus:
+ - '0'
+ huggingface_branch: main
+ mixed_precision: true
+ mixed_precision_dtype: bfloat16
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+ use_deepspeed: false
+experiment_name: example_oasst2
+llm_backbone: h2oai/h2o-danube2-1.8b-base
+logging:
+ logger: None
+ neptune_project: test_org/test_project
+output_directory: examples/output_oasst2
+prediction:
+ batch_size_inference: 0
+ do_sample: false
+ max_length_inference: 256
+ max_time: 0.0
+ metric: Perplexity
+ metric_gpt_model: gpt-3.5-turbo-0301
+ metric_gpt_template: general
+ min_length_inference: 1
+ num_beams: 1
+ num_history: 4
+ repetition_penalty: 1.2
+ stop_tokens: ''
+ temperature: 0.0
+ top_k: 0
+ top_p: 1.0
+problem_type: text_causal_language_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 512
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers: []
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: TokenAveragedCrossEntropy
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
diff --git a/llm_studio/__init__.py b/llm_studio/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/app.py b/llm_studio/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..59227f4a0f82ab1a5231bb73e79ab416865da7be
--- /dev/null
+++ b/llm_studio/app.py
@@ -0,0 +1,43 @@
+import logging
+import os
+
+from llm_studio.app_utils.sections.chat_update import is_app_blocked_while_streaming
+from llm_studio.src.utils.logging_utils import initialize_logging
+
+os.environ["MKL_THREADING_LAYER"] = "GNU"
+
+from h2o_wave import Q, app, copy_expando, main, ui # noqa: F401
+
+from llm_studio.app_utils.handlers import handle
+from llm_studio.app_utils.initializers import initialize_app, initialize_client
+from llm_studio.app_utils.sections.common import heap_redact, interface
+
+logger = logging.getLogger(__name__)
+
+
+def on_startup() -> None:
+ initialize_logging()
+ logger.info("Starting H2O LLM STUDIO")
+
+
+@app("/", on_startup=on_startup)
+async def serve(q: Q) -> None:
+ """Serving function."""
+
+ # Chat is still being streamed but user clicks on another button.
+ # Wait until streaming has been completed
+ if await is_app_blocked_while_streaming(q):
+ return
+
+ await initialize_app(q)
+
+ copy_expando(q.args, q.client)
+
+ await initialize_client(q)
+ await handle(q)
+
+ if not q.args["experiment/display/chat/chatbot"]:
+ await interface(q)
+
+ await heap_redact(q)
+ await q.page.save()
diff --git a/llm_studio/app_utils/__init__.py b/llm_studio/app_utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/app_utils/cards.py b/llm_studio/app_utils/cards.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5d47dd8fc3abdfc33d9aed8b4f042ff5195aac5
--- /dev/null
+++ b/llm_studio/app_utils/cards.py
@@ -0,0 +1,333 @@
+from typing import List, Optional
+
+from h2o_wave import ui
+
+
+def header_zone() -> ui.Zone:
+ """Returns the header zone"""
+
+ zone = ui.zone(
+ "header",
+ size="80px",
+ )
+
+ return zone
+
+
+def navigation_zone() -> ui.Zone:
+ """Returns the navigation zone"""
+
+ zone = ui.zone(
+ "navigation",
+ size="max(13%, 180px)",
+ zones=[
+ ui.zone(name="nav", size="100%"),
+ ],
+ )
+
+ return zone
+
+
+def card_zones(mode: Optional[str] = "full") -> List[ui.Zone]:
+ """Specifies for certain modes the layout zones
+
+ Args:
+ mode: mode for layout zones
+
+ Returns:
+ List of zones
+
+ """
+
+ if mode in ["full", "experiment_start"]:
+ zones = [
+ header_zone(),
+ ui.zone(
+ "body",
+ size="1",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ navigation_zone(),
+ ui.zone(
+ "content_all",
+ direction=ui.ZoneDirection.COLUMN,
+ size="min(calc(100% - 180px), 87%)",
+ zones=[
+ ui.zone(
+ "content",
+ size="calc(100vh - 160px)",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone("footer", size="80px"),
+ ],
+ ),
+ ],
+ ),
+ ]
+ elif mode == "error":
+ zones = [
+ header_zone(),
+ ui.zone(
+ "body",
+ size="1",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ navigation_zone(),
+ ui.zone(
+ "content_all",
+ direction=ui.ZoneDirection.COLUMN,
+ size="min(calc(100% - 180px), 87%)",
+ zones=[
+ ui.zone(
+ "content",
+ size="calc(100vh - 80px)",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ],
+ ),
+ ],
+ ),
+ ]
+
+ elif mode == "home":
+ zones = [
+ header_zone(),
+ ui.zone(
+ "body",
+ size="calc(100vh - 80px)",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ navigation_zone(),
+ ui.zone(
+ "content_all",
+ direction=ui.ZoneDirection.COLUMN,
+ size="min(calc(100% - 180px), 87%)",
+ zones=[
+ ui.zone(
+ "content",
+ size="370px",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone(
+ "expander",
+ size="0",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone(
+ "tables",
+ size="0",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ ui.zone(name="datasets", size="40%"),
+ ui.zone(name="experiments", size="60%"),
+ ],
+ ),
+ ],
+ ),
+ ],
+ ),
+ ]
+
+ elif mode in [
+ "experiment/display/charts",
+ "experiment/compare/charts",
+ ]:
+ zones = [
+ header_zone(),
+ ui.zone(
+ "body",
+ size="1",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ navigation_zone(),
+ ui.zone(
+ "content_all",
+ direction=ui.ZoneDirection.COLUMN,
+ size="min(calc(100% - 180px), 87%)",
+ zones=[
+ ui.zone("nav2", size="62px"),
+ ui.zone(
+ "first_row",
+ size="max(calc((100vh - 222px)*0.5), 300px)",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ ui.zone("top_left", size="50%"),
+ ui.zone("top_right", size="50%"),
+ ],
+ ),
+ ui.zone(
+ "second_row",
+ size="max(calc((100vh - 222px)*0.5), 300px)",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ ui.zone("bottom_left", size="50%"),
+ ui.zone("bottom_right", size="50%"),
+ ],
+ ),
+ ui.zone("footer", size="80px"),
+ ],
+ ),
+ ],
+ ),
+ ]
+
+ elif mode in [
+ "experiment/display/chat",
+ ]:
+ zones = [
+ header_zone(),
+ ui.zone(
+ "body",
+ size="1",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ navigation_zone(),
+ ui.zone(
+ "content_all",
+ direction=ui.ZoneDirection.COLUMN,
+ size="min(calc(100% - 180px), 87%)",
+ zones=[
+ ui.zone("nav2", size="62px"),
+ ui.zone(
+ "first",
+ size="calc((100vh - 222px)*0.65)",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone(
+ "second",
+ size="calc((100vh - 222px)*0.35)",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone("footer", size="80px"),
+ ],
+ ),
+ ],
+ ),
+ ]
+ elif mode in ["experiment/display/summary"]:
+ zones = [
+ header_zone(),
+ ui.zone(
+ "body",
+ size="1",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ navigation_zone(),
+ ui.zone(
+ "content_all",
+ direction=ui.ZoneDirection.COLUMN,
+ size="min(calc(100% - 180px), 87%)",
+ zones=[
+ ui.zone("nav2", size="62px"),
+ ui.zone(
+ "first",
+ size="235px",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone(
+ "second",
+ size="235px",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone(
+ "third",
+ size="max(calc(100vh - 692px), 400px)",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone("footer", size="80px"),
+ ],
+ ),
+ ],
+ ),
+ ]
+ elif mode in ["dataset/display/statistics"]:
+ zones = [
+ header_zone(),
+ ui.zone(
+ "body",
+ size="1",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ navigation_zone(),
+ ui.zone(
+ "content_all",
+ direction=ui.ZoneDirection.COLUMN,
+ size="min(calc(100% - 180px), 87%)",
+ zones=[
+ ui.zone("nav2", size="62px"),
+ ui.zone(
+ "first",
+ size="max(calc(0.33*(100vh - 222px)), 400px)",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone(
+ "second",
+ size="max(calc(0.33*(100vh - 222px)), 400px)",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone(
+ "third",
+ size="max(calc(0.34*(100vh - 222px)), 200px)",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone("footer", size="80px"),
+ ],
+ ),
+ ],
+ ),
+ ]
+ elif mode in [
+ "experiment/compare/config",
+ "experiment/display/train_data_insights",
+ "experiment/display/validation_prediction_insights",
+ "experiment/display/config",
+ "experiment/display/logs",
+ "dataset/display/data",
+ "dataset/display/visualization",
+ "dataset/display/summary",
+ ]:
+ zones = [
+ header_zone(),
+ ui.zone(
+ "body",
+ size="1",
+ direction=ui.ZoneDirection.ROW,
+ zones=[
+ navigation_zone(),
+ ui.zone(
+ "content_all",
+ direction=ui.ZoneDirection.COLUMN,
+ size="min(calc(100% - 180px), 87%)",
+ zones=[
+ ui.zone("nav2", size="62px"),
+ ui.zone(
+ "first",
+ size="calc(100vh - 222px)",
+ direction=ui.ZoneDirection.ROW,
+ ),
+ ui.zone("footer", size="80px"),
+ ],
+ ),
+ ],
+ ),
+ ]
+ else:
+ raise ValueError(f"Unknown mode: {mode}")
+ return zones
+
+
+def card_wait(msg: str, box: str) -> ui.FormCard:
+ """Return a form card for displaying waiting status
+
+ Args:
+ msg: message to display
+ box: box for card
+
+ Returns:
+ Form card
+
+ """
+
+ card = ui.form_card(box=box, items=[ui.progress(label=msg)])
+
+ return card
diff --git a/llm_studio/app_utils/config.py b/llm_studio/app_utils/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..e37c817fa5271eb4b73a114ff4cb3c63c8ad2388
--- /dev/null
+++ b/llm_studio/app_utils/config.py
@@ -0,0 +1,185 @@
+import os
+import socket
+from types import SimpleNamespace
+
+import toml
+from huggingface_hub.constants import _is_true
+
+toml_root_dir = os.path.abspath(
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..")
+)
+app_toml_filename = os.path.join(toml_root_dir, "pyproject.toml")
+
+toml_loaded = toml.load(app_toml_filename)
+
+version = toml_loaded["project"]["version"]
+
+
+def get_size(x):
+ try:
+ if x.endswith("TB"):
+ return float(x.replace("TB", "")) * (2**40)
+ if x.endswith("GB"):
+ return float(x.replace("GB", "")) * (2**30)
+ if x.endswith("MB"):
+ return float(x.replace("MB", "")) * (2**20)
+ if x.endswith("KB"):
+ return float(x.replace("KB", "")) * (2**10)
+ if x.endswith("B"):
+ return float(x.replace("B", ""))
+ return 2**31
+ except Exception:
+ return 2**31
+
+
+try:
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ s.connect(("8.8.8.8", 80))
+ host = s.getsockname()[0]
+ s.close()
+except OSError:
+ host = "localhost"
+
+port = "10101"
+url = f"http://{host}:{port}/"
+
+
+if os.getenv("H2O_LLM_STUDIO_DEFAULT_LM_MODELS"):
+ default_causal_language_models = [
+ mdl.strip() for mdl in os.getenv("H2O_LLM_STUDIO_DEFAULT_LM_MODELS").split(",")
+ ]
+else:
+ default_causal_language_models = [
+ "h2oai/h2o-danube3-500m-base",
+ "h2oai/h2o-danube3-500m-chat",
+ "h2oai/h2o-danube3-4b-base",
+ "h2oai/h2o-danube3-4b-chat",
+ "h2oai/h2o-danube2-1.8b-base",
+ "h2oai/h2o-danube2-1.8b-chat",
+ "meta-llama/Llama-3.2-1B-Instruct",
+ "meta-llama/Llama-3.2-3B-Instruct",
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
+ "meta-llama/Meta-Llama-3.1-70B-Instruct",
+ "mistralai/Mistral-7B-v0.3",
+ "mistralai/Mistral-7B-Instruct-v0.2",
+ "google/gemma-2-2b-it",
+ "google/gemma-2-9b-it",
+ "microsoft/Phi-3-mini-4k-instruct",
+ "microsoft/Phi-3-medium-4k-instruct",
+ "Qwen/Qwen2-7B-Instruct",
+ "Qwen/Qwen2-72B-Instruct",
+ ]
+
+if os.getenv("H2O_LLM_STUDIO_DEFAULT_S2S_MODELS"):
+ default_sequence_to_sequence_models = [
+ mdl.strip() for mdl in os.getenv("H2O_LLM_STUDIO_DEFAULT_S2S_MODELS").split(",")
+ ]
+else:
+ default_sequence_to_sequence_models = [
+ "t5-small",
+ "t5-base",
+ "t5-large",
+ "google/flan-t5-small",
+ "google/flan-t5-base",
+ "google/flan-t5-large",
+ "google/flan-ul2",
+ ]
+
+default_cfg = {
+ "url": url,
+ "name": "H2O LLM Studio",
+ "version": version,
+ "github": "https://github.com/h2oai/h2o-llmstudio",
+ "min_experiment_disk_space": get_size(
+ os.getenv("MIN_DISK_SPACE_FOR_EXPERIMENTS", "2GB")
+ ),
+ "allowed_file_extensions": os.getenv(
+ "ALLOWED_FILE_EXTENSIONS", ".zip,.csv,.pq,.parquet"
+ ).split(","),
+ "llm_studio_workdir": f"{os.getenv('H2O_LLM_STUDIO_WORKDIR', os.getcwd())}",
+ "heap_mode": os.getenv("H2O_LLM_STUDIO_ENABLE_HEAP", "False") == "True",
+ "data_folder": "data/",
+ "output_folder": "output/",
+ "cfg_file": "text_causal_language_modeling_config",
+ "start_page": "home",
+ "problem_types": [
+ "text_causal_language_modeling_config",
+ "text_causal_classification_modeling_config",
+ "text_causal_regression_modeling_config",
+ "text_sequence_to_sequence_modeling_config",
+ "text_dpo_modeling_config",
+ ],
+ "default_causal_language_models": default_causal_language_models,
+ "default_sequence_to_sequence_models": default_sequence_to_sequence_models,
+ "problem_categories": ["text"],
+ "dataset_keys": [
+ "train_dataframe",
+ "validation_dataframe",
+ "system_column",
+ "prompt_column",
+ "rejected_prompt_column",
+ "answer_column",
+ "rejected_answer_column",
+ "parent_id_column",
+ "id_column",
+ ],
+ "dataset_trigger_keys": [
+ "train_dataframe",
+ "validation_dataframe",
+ "parent_id_column",
+ ],
+ "dataset_extra_keys": [
+ "validation_strategy",
+ "data_sample",
+ "data_sample_choice",
+ ],
+ "dataset_folder_keys": [
+ "train_dataframe",
+ "validation_dataframe",
+ ],
+ "user_settings": {
+ "credential_saver": ".env File",
+ "default_aws_bucket_name": f"{os.getenv('AWS_BUCKET', 'bucket_name')}",
+ "default_aws_access_key": os.getenv("AWS_ACCESS_KEY_ID", ""),
+ "default_aws_secret_key": os.getenv("AWS_SECRET_ACCESS_KEY", ""),
+ "default_azure_conn_string": "",
+ "default_azure_container": "",
+ "default_kaggle_username": "",
+ "default_kaggle_secret_key": "",
+ "set_max_epochs": 50,
+ "set_max_batch_size": 256,
+ "set_max_num_classes": 100,
+ "set_max_max_length": 16384,
+ "set_max_gradient_clip": 10,
+ "set_max_lora_r": 256,
+ "set_max_lora_alpha": 256,
+ "gpu_used_for_download": "cuda:0",
+ "gpu_used_for_chat": 1,
+ "default_number_of_workers": 8,
+ "default_logger": "None",
+ "default_neptune_project": os.getenv("NEPTUNE_PROJECT", ""),
+ "default_neptune_api_token": os.getenv("NEPTUNE_API_TOKEN", ""),
+ "default_wandb_api_token": os.getenv("WANDB_API_KEY", ""),
+ "default_wandb_project": os.getenv("WANDB_PROJECT", ""),
+ "default_wandb_entity": os.getenv("WANDB_ENTITY", ""),
+ "default_huggingface_api_token": os.getenv("HF_TOKEN", ""),
+ "default_hf_hub_enable_hf_transfer": _is_true(
+ os.getenv("HF_HUB_ENABLE_HF_TRANSFER", "1")
+ ),
+ "default_openai_azure": os.getenv("OPENAI_API_TYPE", "open_ai") == "azure",
+ "default_openai_api_token": os.getenv("OPENAI_API_KEY", ""),
+ "default_openai_api_base": os.getenv(
+ "OPENAI_API_BASE", "https://example-endpoint.openai.azure.com"
+ ),
+ "default_openai_api_deployment_id": os.getenv(
+ "OPENAI_API_DEPLOYMENT_ID", "deployment-name"
+ ),
+ "default_openai_api_version": os.getenv("OPENAI_API_VERSION", "2023-05-15"),
+ "default_gpt_eval_max": os.getenv("GPT_EVAL_MAX", 100),
+ "default_safe_serialization": True,
+ "delete_dialogs": True,
+ "chart_plot_max_points": 1000,
+ },
+}
+
+default_cfg = SimpleNamespace(**default_cfg)
diff --git a/llm_studio/app_utils/db.py b/llm_studio/app_utils/db.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb41e9622b5bdebef21b53ca376d0ea5493c96c8
--- /dev/null
+++ b/llm_studio/app_utils/db.py
@@ -0,0 +1,159 @@
+from typing import Optional
+
+import pandas as pd
+from pandas.core.frame import DataFrame
+from sqlalchemy import Integer, String, create_engine
+from sqlalchemy.orm import DeclarativeBase, Mapped, Session, mapped_column
+
+
+class Base(DeclarativeBase):
+ pass
+
+
+class Dataset(Base):
+ """Dataset table"""
+
+ __tablename__ = "datasets"
+
+ id: Mapped[int] = mapped_column("id", Integer, autoincrement=True, primary_key=True)
+ name: Mapped[str] = mapped_column("name", String, unique=True)
+ path: Mapped[str] = mapped_column("path", String)
+ config_file: Mapped[str] = mapped_column("config_file", String)
+ train_rows: Mapped[int] = mapped_column("train_rows", Integer)
+ validation_rows: Mapped[Optional[int]] = mapped_column(
+ "validation_rows", Integer, nullable=True
+ )
+
+
+class Experiment(Base):
+ """Experiment table"""
+
+ __tablename__ = "experiments"
+
+ id: Mapped[int] = mapped_column("id", Integer, primary_key=True)
+ name: Mapped[str] = mapped_column("name", String)
+ mode: Mapped[str] = mapped_column("mode", String)
+ dataset: Mapped[str] = mapped_column("dataset", String)
+ config_file: Mapped[str] = mapped_column("config_file", String)
+ path: Mapped[str] = mapped_column("path", String)
+ seed: Mapped[int] = mapped_column("seed", Integer)
+ process_id: Mapped[int] = mapped_column("process_id", Integer)
+ gpu_list: Mapped[str] = mapped_column("gpu_list", String)
+
+
+class Database:
+ """Class for managing database."""
+
+ def __init__(self, path_db: str) -> None:
+ """Initialize database
+
+ Args:
+ path_db: path to sqlite database file
+ """
+
+ self.__engine__ = create_engine(f"sqlite:///{path_db}")
+ Base.metadata.create_all(self.__engine__)
+ self._session = Session(self.__engine__)
+
+ def add_dataset(self, dataset: Dataset) -> None:
+ """Add a dataset to the table
+
+ Args:
+ dataset: dataset to add
+ """
+ self._session.add(dataset)
+ self._session.commit()
+
+ def delete_dataset(self, id: int) -> None:
+ """Delete a dataset from the table
+
+ Args:
+ id: dataset id to delete
+ """
+
+ dataset = self._session.query(Dataset).get(int(id))
+ self._session.delete(dataset)
+ self._session.commit()
+
+ def get_dataset(self, id: int) -> Dataset:
+ """Return dataset given an id
+
+ Args:
+ id: dataset id to return
+
+ Returns:
+ Dataset with given id
+ """
+
+ return self._session.query(Dataset).get(int(id))
+
+ def get_datasets_df(self) -> DataFrame:
+ """Return dataframe containing all datasets
+
+ Returns:
+ All datasets
+ """
+
+ datasets = pd.read_sql(self._session.query(Dataset).statement, self.__engine__)
+ return datasets.sort_values("id", ascending=False)
+
+ def add_experiment(self, experiment: Experiment) -> None:
+ """Add an experiment to the table
+
+ Args:
+ experiment: experiment to add
+ """
+
+ self._session.add(experiment)
+ self._session.commit()
+
+ def delete_experiment(self, id: int) -> None:
+ """Delete an experiment from the table
+
+ Args:
+ id: experiment id to delete
+ """
+
+ experiment = self._session.query(Experiment).get(int(id))
+ self._session.delete(experiment)
+ self._session.commit()
+
+ def get_experiment(self, id: int) -> Experiment:
+ """Return experiment given an id
+
+ Args:
+ id: experiment id to return
+
+ Returns:
+ Experiment with given id
+ """
+
+ return self._session.query(Experiment).get(int(id))
+
+ def get_experiments_df(self) -> DataFrame:
+ """Return dataframe containing all experiments
+
+ Returns:
+ All experiments
+ """
+
+ experiments = pd.read_sql(
+ self._session.query(Experiment).statement, self.__engine__
+ )
+ return experiments.sort_values("id", ascending=False)
+
+ def rename_experiment(self, id: int, new_name: str, new_path: str) -> None:
+ """Rename experiment given id and new name
+
+ Args:
+ id: experiment id
+ new_name: new name
+ """
+
+ experiment = self.get_experiment(id)
+ experiment.name = new_name
+ experiment.path = new_path
+ self._session.commit()
+
+ def update(self) -> None:
+ self._session.commit()
diff --git a/llm_studio/app_utils/default_datasets.py b/llm_studio/app_utils/default_datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..7201ea95290c316e91ab26855a36b9674e605483
--- /dev/null
+++ b/llm_studio/app_utils/default_datasets.py
@@ -0,0 +1,94 @@
+import os
+
+import pandas as pd
+from datasets import load_dataset
+
+
+def prepare_default_dataset_causal_language_modeling(path):
+ if os.environ.get("H2O_LLM_STUDIO_DEMO_DATASETS") is None:
+ ds = load_dataset("OpenAssistant/oasst2")
+ train = ds["train"].to_pandas()
+ val = ds["validation"].to_pandas()
+ df = pd.concat([train, val], axis=0).reset_index(drop=True)
+ else:
+ df = pd.read_parquet(
+ os.path.join(
+ os.environ.get("H2O_LLM_STUDIO_DEMO_DATASETS"),
+ "causal_language_modeling.pq",
+ )
+ )
+
+ df_assistant = df[(df.role == "assistant")].copy()
+ df_prompter = df[(df.role == "prompter")].copy()
+ df_prompter = df_prompter.set_index("message_id")
+ df_assistant["output"] = df_assistant["text"].values
+
+ inputs = []
+ parent_ids = []
+ for _, row in df_assistant.iterrows():
+ input = df_prompter.loc[row.parent_id]
+ inputs.append(input.text)
+ parent_ids.append(input.parent_id)
+
+ df_assistant["instruction"] = inputs
+ df_assistant["parent_id"] = parent_ids
+
+ df_assistant = df_assistant[
+ ["instruction", "output", "message_id", "parent_id", "lang", "rank"]
+ ].rename(columns={"message_id": "id"})
+
+ df_assistant[(df_assistant["rank"] == 0.0) & (df_assistant["lang"] == "en")][
+ ["instruction", "output", "id", "parent_id"]
+ ].to_parquet(os.path.join(path, "train_full.pq"), index=False)
+
+ df_assistant[df_assistant["lang"] == "en"][
+ ["instruction", "output", "id", "parent_id"]
+ ].to_parquet(os.path.join(path, "train_full_allrank.pq"), index=False)
+
+ df_assistant[df_assistant["rank"] == 0.0][
+ ["instruction", "output", "id", "parent_id"]
+ ].to_parquet(os.path.join(path, "train_full_multilang.pq"), index=False)
+
+ df_assistant[["instruction", "output", "id", "parent_id"]].to_parquet(
+ os.path.join(path, "train_full_multilang_allrank.pq"), index=False
+ )
+
+ return df_assistant[(df_assistant["rank"] == 0.0) & (df_assistant["lang"] == "en")]
+
+
+def prepare_default_dataset_dpo_modeling() -> pd.DataFrame:
+ if os.environ.get("H2O_LLM_STUDIO_DEMO_DATASETS") is None:
+ df = load_dataset("Intel/orca_dpo_pairs")["train"].to_pandas()
+ else:
+ df = pd.read_parquet(
+ os.path.join(
+ os.environ.get("H2O_LLM_STUDIO_DEMO_DATASETS"), "dpo_modeling.pq"
+ )
+ )
+ return df
+
+
+def prepare_default_dataset_classification_modeling() -> pd.DataFrame:
+ if os.environ.get("H2O_LLM_STUDIO_DEMO_DATASETS") is None:
+ df = load_dataset("stanfordnlp/imdb")["train"].to_pandas()
+ else:
+ df = pd.read_parquet(
+ os.path.join(
+ os.environ.get("H2O_LLM_STUDIO_DEMO_DATASETS"),
+ "classification_modeling.pq",
+ )
+ )
+ return df
+
+
+def prepare_default_dataset_regression_modeling() -> pd.DataFrame:
+ if os.environ.get("H2O_LLM_STUDIO_DEMO_DATASETS") is None:
+ df = load_dataset("nvidia/HelpSteer2")["train"].to_pandas()
+ else:
+ df = pd.read_parquet(
+ os.path.join(
+ os.environ.get("H2O_LLM_STUDIO_DEMO_DATASETS"),
+ "regression_modeling.pq",
+ )
+ )
+ return df
diff --git a/llm_studio/app_utils/handlers.py b/llm_studio/app_utils/handlers.py
new file mode 100644
index 0000000000000000000000000000000000000000..3931f97fdd02c6b216d1383c76be17e0c3797915
--- /dev/null
+++ b/llm_studio/app_utils/handlers.py
@@ -0,0 +1,432 @@
+import gc
+import logging
+from typing import List
+
+import torch
+from h2o_wave import Q
+
+from llm_studio.app_utils.sections.chat import chat_tab
+from llm_studio.app_utils.sections.chat_update import chat_copy, chat_update
+from llm_studio.app_utils.sections.common import delete_dialog
+from llm_studio.app_utils.sections.dataset import (
+ dataset_delete_current_datasets,
+ dataset_delete_single,
+ dataset_display,
+ dataset_edit,
+ dataset_import,
+ dataset_import_uploaded_file,
+ dataset_list,
+ dataset_list_delete,
+ dataset_merge,
+ dataset_newexperiment,
+)
+from llm_studio.app_utils.sections.experiment import (
+ experiment_delete,
+ experiment_display,
+ experiment_download_adapter,
+ experiment_download_logs,
+ experiment_download_model,
+ experiment_download_predictions,
+ experiment_input_type_error,
+ experiment_list,
+ experiment_push_to_huggingface_dialog,
+ experiment_rename_ui_workflow,
+ experiment_run,
+ experiment_start,
+ experiment_stop,
+)
+from llm_studio.app_utils.sections.home import home
+from llm_studio.app_utils.sections.project import (
+ current_experiment_compare,
+ current_experiment_list_compare,
+ current_experiment_list_delete,
+ current_experiment_list_stop,
+ experiment_rename_action_workflow,
+ list_current_experiments,
+)
+from llm_studio.app_utils.sections.settings import settings
+from llm_studio.app_utils.setting_utils import (
+ load_default_user_settings,
+ load_user_settings_and_secrets,
+ save_user_settings_and_secrets,
+)
+from llm_studio.app_utils.utils import add_model_type
+from llm_studio.app_utils.wave_utils import report_error, wave_utils_handle_error
+
+logger = logging.getLogger(__name__)
+
+
+async def handle(q: Q) -> None:
+ """Handles all requests in application and calls according functions."""
+
+ # logger.info(f"args: {q.args}")
+ # logger.info(f"events: {q.events}")
+
+ if not (
+ q.args.__wave_submission_name__ == "experiment/display/chat/chatbot"
+ or q.args.__wave_submission_name__ == "experiment/display/chat/clear_history"
+ or q.args.__wave_submission_name__ == "experiment/display/chat/copy_chat"
+ ):
+ if "experiment/display/chat/cfg" in q.client:
+ del q.client["experiment/display/chat/cfg"]
+ if "experiment/display/chat/model" in q.client:
+ del q.client["experiment/display/chat/model"]
+ if "experiment/display/chat/tokenizer" in q.client:
+ del q.client["experiment/display/chat/tokenizer"]
+ torch.cuda.empty_cache()
+ gc.collect()
+
+ try:
+ if q.args.__wave_submission_name__ == "home":
+ await home(q)
+ elif q.args.__wave_submission_name__ == "settings":
+ await settings(q)
+ elif q.args.__wave_submission_name__ == "save_settings":
+ logger.info("Saving user settings")
+ await save_user_settings_and_secrets(q)
+ await settings(q)
+ elif q.args.__wave_submission_name__ == "load_settings":
+ load_user_settings_and_secrets(q)
+ await settings(q)
+ elif q.args.__wave_submission_name__ == "restore_default_settings":
+ load_default_user_settings(q)
+ await settings(q)
+
+ elif q.args.__wave_submission_name__ == "report_error":
+ await report_error(q)
+
+ elif q.args.__wave_submission_name__ == "dataset/import":
+ await dataset_import(q, step=1)
+ elif q.args.__wave_submission_name__ == "dataset/list":
+ await dataset_list(q)
+ elif q.args.__wave_submission_name__ == "dataset/list/delete/abort":
+ q.page["dataset/list"].items[0].table.multiple = False
+ await dataset_list(q, reset=True)
+ elif q.args.__wave_submission_name__ == "dataset/list/abort":
+ q.page["dataset/list"].items[0].table.multiple = False
+ await dataset_list(q, reset=True)
+ elif q.args.__wave_submission_name__ == "dataset/list/delete":
+ await dataset_list_delete(q)
+ elif q.args.__wave_submission_name__ == "dataset/delete/single":
+ dataset_id = q.client["dataset/delete/single/id"]
+ dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[dataset_id]
+ await dataset_delete_single(q, int(dataset_id))
+ elif q.args.__wave_submission_name__ == "dataset/delete/dialog/single":
+ dataset_id = int(q.args["dataset/delete/dialog/single"])
+ q.client["dataset/delete/single/id"] = dataset_id
+ name = q.client["dataset/list/df_datasets"]["name"].iloc[dataset_id]
+
+ if q.client["delete_dialogs"]:
+ await delete_dialog(q, [name], "dataset/delete/single", "dataset")
+ else:
+ dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[dataset_id]
+ await dataset_delete_single(q, int(dataset_id))
+
+ elif q.args["dataset/delete/dialog"]:
+ names = list(
+ q.client["dataset/list/df_datasets"]["name"].iloc[
+ list(map(int, q.client["dataset/list/table"]))
+ ]
+ )
+
+ if not names:
+ return
+
+ if q.client["delete_dialogs"]:
+ await delete_dialog(q, names, "dataset/delete", "dataset")
+ else:
+ await dataset_delete_current_datasets(q)
+
+ elif q.args.__wave_submission_name__ == "dataset/delete":
+ await dataset_delete_current_datasets(q)
+ elif q.args.__wave_submission_name__ == "dataset/edit":
+ if q.client["dataset/list/df_datasets"] is not None:
+ dataset_id = int(q.args["dataset/edit"])
+ dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[dataset_id]
+ await dataset_edit(q, int(dataset_id))
+ elif q.args.__wave_submission_name__ == "dataset/newexperiment":
+ if q.client["dataset/list/df_datasets"] is not None:
+ dataset_id = int(q.args["dataset/newexperiment"])
+ dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[dataset_id]
+ await dataset_newexperiment(q, int(dataset_id))
+ elif q.args.__wave_submission_name__ == "dataset/newexperiment/from_current":
+ idx = q.client["dataset/display/id"]
+ dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[idx]
+ await dataset_newexperiment(q, dataset_id)
+
+ elif q.args.__wave_submission_name__ == "dataset/list/table":
+ q.client["dataset/display/id"] = int(q.args["dataset/list/table"][0])
+ await dataset_display(q)
+
+ elif q.args.__wave_submission_name__ == "dataset/display/visualization":
+ await dataset_display(q)
+ elif q.args.__wave_submission_name__ == "dataset/display/data":
+ await dataset_display(q)
+ elif q.args.__wave_submission_name__ == "dataset/display/statistics":
+ await dataset_display(q)
+ elif q.args["dataset/display/summary"]:
+ await dataset_display(q)
+
+ elif (
+ q.args.__wave_submission_name__ == "experiment/start/run"
+ or q.args.__wave_submission_name__ == "experiment/start/error/proceed"
+ or q.args.__wave_submission_name__ == "experiment/start/gridsearch/proceed"
+ ):
+ # error check for custom entered values in combo boxes (grid search)
+ error = experiment_input_type_error(q, pre="experiment/start")
+ if error:
+ await experiment_start(q)
+ q.client["notification_bar"] = (
+ f"Input type mismatch found in parameter **{error}**."
+ )
+ else:
+ # add model type to cfg file name here
+ q.client["experiment/start/cfg_file"] = add_model_type(
+ q.client["experiment/start/cfg_file"],
+ q.client["experiment/start/cfg_sub"],
+ )
+ q.client.delete_cards.add("experiment/start")
+ await experiment_run(q)
+
+ elif (
+ q.args.__wave_submission_name__ == "experiment/start_experiment"
+ or q.args.__wave_submission_name__ == "experiment/list/new"
+ or q.args.__wave_submission_name__ == "experiment/list/new_gridsearch"
+ ):
+ if q.client["experiment/list/df_experiments"] is not None:
+ if q.args.__wave_submission_name__ == "experiment/list/new_gridsearch":
+ selected_idx = int(q.args["experiment/list/new_gridsearch"])
+ elif q.args.__wave_submission_name__ == "experiment/list/new":
+ selected_idx = int(q.args["experiment/list/new"])
+
+ experiment_id = q.client["experiment/list/df_experiments"]["id"].iloc[
+ selected_idx
+ ]
+
+ q.client["experiment/start/cfg_category"] = "experiment"
+ q.client["experiment/start/cfg_file"] = "experiment"
+ q.client["experiment/start/cfg_experiment"] = str(experiment_id)
+
+ await experiment_start(q)
+ elif (
+ q.args.__wave_submission_name__ == "experiment/start"
+ or q.args.__wave_submission_name__ == "experiment/start/grid_search"
+ ):
+ q.client["experiment/start/cfg_category"] = None
+ q.client["experiment/start/cfg_file"] = None
+ datasets_df = q.client.app_db.get_datasets_df()
+ if datasets_df.shape[0] == 0:
+ info = "Import dataset before you create an experiment. "
+ await dataset_import(q, step=1, info=info)
+ else:
+ await experiment_start(q)
+
+ elif q.args.__wave_submission_name__ == "experiment/display/download_logs":
+ await experiment_download_logs(q)
+ elif (
+ q.args.__wave_submission_name__ == "experiment/display/download_predictions"
+ ):
+ await experiment_download_predictions(q)
+
+ elif q.args.__wave_submission_name__ == "experiment/list":
+ await experiment_list(q)
+ elif q.args.__wave_submission_name__ == "experiment/list/current":
+ await list_current_experiments(q)
+ elif q.args.__wave_submission_name__ == "experiment/list/current/noreset":
+ await list_current_experiments(q, reset=False)
+ elif q.args.__wave_submission_name__ == "experiment/list/refresh":
+ await experiment_list(q)
+ elif q.args.__wave_submission_name__ == "experiment/list/abort":
+ await list_current_experiments(q)
+ elif q.args.__wave_submission_name__ == "experiment/list/stop":
+ await current_experiment_list_stop(q)
+ elif q.args.__wave_submission_name__ == "experiment/list/delete":
+ await current_experiment_list_delete(q)
+ elif q.args.__wave_submission_name__ == "experiment/list/rename":
+ await experiment_rename_ui_workflow(q)
+ elif q.args.__wave_submission_name__ == "experiment/list/compare":
+ await current_experiment_list_compare(q)
+ elif (
+ q.args.__wave_submission_name__ == "experiment/stop"
+ or q.args.__wave_submission_name__ == "experiment/list/stop/table"
+ ):
+ if q.args["experiment/list/stop/table"]:
+ idx = int(q.args["experiment/list/stop/table"])
+ selected_id = q.client["experiment/list/df_experiments"]["id"].iloc[idx]
+ experiment_ids = [selected_id]
+ else:
+ selected_idxs = q.client["experiment/list/table"]
+ experiment_ids = list(
+ q.client["experiment/list/df_experiments"]["id"].iloc[
+ list(map(int, selected_idxs))
+ ]
+ )
+
+ await experiment_stop(q, experiment_ids)
+ await list_current_experiments(q)
+ elif q.args.__wave_submission_name__ == "experiment/list/delete/table/dialog":
+ idx = int(q.args["experiment/list/delete/table/dialog"])
+ names = [q.client["experiment/list/df_experiments"]["name"].iloc[idx]]
+ selected_id = q.client["experiment/list/df_experiments"]["id"].iloc[idx]
+ q.client["experiment/delete/single/id"] = selected_id
+ if q.client["delete_dialogs"]:
+ await delete_dialog(
+ q, names, "experiment/list/delete/table", "experiment"
+ )
+ else:
+ await experiment_delete_all_artifacts(q, [selected_id])
+
+ elif q.args.__wave_submission_name__ == "experiment/delete/dialog":
+ selected_idxs = q.client["experiment/list/table"]
+ exp_df = q.client["experiment/list/df_experiments"]
+ names = list(exp_df["name"].iloc[list(map(int, selected_idxs))])
+
+ if not names:
+ return
+
+ if q.client["delete_dialogs"]:
+ await delete_dialog(q, names, "experiment/delete", "experiment")
+ else:
+ experiment_ids = list(exp_df["id"].iloc[list(map(int, selected_idxs))])
+ await experiment_delete_all_artifacts(q, experiment_ids)
+
+ elif (
+ q.args.__wave_submission_name__ == "experiment/delete"
+ or q.args.__wave_submission_name__ == "experiment/list/delete/table"
+ ):
+ if q.args["experiment/list/delete/table"]:
+ selected_id = q.client["experiment/delete/single/id"]
+ experiment_ids = [selected_id]
+ else:
+ selected_idxs = q.client["experiment/list/table"]
+ exp_df = q.client["experiment/list/df_experiments"]
+ experiment_ids = list(exp_df["id"].iloc[list(map(int, selected_idxs))])
+
+ await experiment_delete_all_artifacts(q, experiment_ids)
+
+ elif q.args.__wave_submission_name__ == "experiment/rename/action":
+ await experiment_rename_action_workflow(q)
+
+ elif q.args.__wave_submission_name__ == "experiment/compare":
+ await current_experiment_compare(q)
+ elif q.args.__wave_submission_name__ == "experiment/compare/charts":
+ await current_experiment_compare(q)
+ elif q.args.__wave_submission_name__ == "experiment/compare/config":
+ await current_experiment_compare(q)
+ elif q.args.__wave_submission_name__ == "experiment/compare/diff_toggle":
+ q.client["experiment/compare/diff_toggle"] = q.args[
+ "experiment/compare/diff_toggle"
+ ]
+ await current_experiment_compare(q)
+
+ elif q.args.__wave_submission_name__ == "experiment/list/table":
+ q.client["experiment/display/id"] = int(q.args["experiment/list/table"][0])
+ q.client["experiment/display/logs_path"] = None
+ q.client["experiment/display/preds_path"] = None
+ q.client["experiment/display/tab"] = None
+ await experiment_display(q)
+
+ elif q.args.__wave_submission_name__ == "experiment/display/refresh":
+ await experiment_display(q)
+
+ elif q.args.__wave_submission_name__ == "experiment/display/charts":
+ await experiment_display(q)
+ elif q.args.__wave_submission_name__ == "experiment/display/summary":
+ await experiment_display(q)
+ elif (
+ q.args.__wave_submission_name__ == "experiment/display/train_data_insights"
+ ):
+ await experiment_display(q)
+ elif (
+ q.args.__wave_submission_name__
+ == "experiment/display/validation_prediction_insights"
+ ):
+ await experiment_display(q)
+ elif (
+ q.args.__wave_submission_name__ == "experiment/display/push_to_huggingface"
+ ):
+ await experiment_push_to_huggingface_dialog(q)
+ elif q.args.__wave_submission_name__ == "experiment/display/download_model":
+ await experiment_download_model(q)
+ elif q.args.__wave_submission_name__ == "experiment/display/download_adapter":
+ await experiment_download_adapter(q)
+ elif (
+ q.args.__wave_submission_name__
+ == "experiment/display/push_to_huggingface_submit"
+ ):
+ await experiment_push_to_huggingface_dialog(q)
+
+ elif q.args.__wave_submission_name__ == "experiment/display/config":
+ await experiment_display(q)
+ elif q.args.__wave_submission_name__ == "experiment/display/logs":
+ await experiment_display(q)
+ elif q.args.__wave_submission_name__ == "experiment/display/chat":
+ await experiment_display(q)
+
+ elif q.args.__wave_submission_name__ == "experiment/display/chat/chatbot":
+ await chat_update(q)
+ elif q.args.__wave_submission_name__ == "experiment/display/chat/clear_history":
+ await chat_tab(q, load_model=False)
+ elif q.args.__wave_submission_name__ == "experiment/display/chat/copy_chat":
+ await chat_copy(q)
+ elif q.args.__wave_submission_name__ == "dataset/import/local_upload":
+ await dataset_import_uploaded_file(q)
+ elif q.args.__wave_submission_name__ == "dataset/import/local_path_list":
+ await dataset_import(q, step=1)
+ elif q.args.__wave_submission_name__ == "dataset/import/2":
+ await dataset_import(q, step=2)
+ elif q.args.__wave_submission_name__ == "dataset/import/3":
+ await dataset_import(q, step=3)
+ elif q.args.__wave_submission_name__ == "dataset/import/cfg/parent_id_column":
+ await dataset_import(q, step=31, edit=True)
+ elif q.args.__wave_submission_name__ == "dataset/import/3/edit":
+ await dataset_import(q, step=3, edit=True)
+ elif q.args.__wave_submission_name__ == "dataset/import/4":
+ await dataset_import(q, step=4)
+ elif q.args.__wave_submission_name__ == "dataset/import/4/edit":
+ await dataset_import(q, step=4, edit=True)
+ elif q.args.__wave_submission_name__ == "dataset/import/6":
+ await dataset_import(q, step=6)
+ elif (
+ q.args.__wave_submission_name__ == "dataset/import/source"
+ and not q.args["dataset/list"]
+ ):
+ await dataset_import(q, step=1)
+ elif q.args.__wave_submission_name__ == "dataset/merge":
+ await dataset_merge(q, step=1)
+ elif q.args.__wave_submission_name__ == "dataset/merge/action":
+ await dataset_merge(q, step=2)
+
+ elif q.args.__wave_submission_name__ == "dataset/import/cfg_file":
+ await dataset_import(q, step=3)
+
+ # leave at the end of dataset import routing,
+ # would also be triggered if user clicks on
+ # a continue button in the dataset import wizard
+ elif q.args.__wave_submission_name__ == "dataset/import/cfg/train_dataframe":
+ await dataset_import(q, step=3)
+
+ elif q.args.__wave_submission_name__ == "experiment/start/cfg_file":
+ q.client["experiment/start/cfg_file"] = q.args["experiment/start/cfg_file"]
+ await experiment_start(q)
+ elif q.args.__wave_submission_name__ == "experiment/start/dataset":
+ await experiment_start(q)
+
+ elif (
+ q.client["nav/active"] == "experiment/start"
+ or q.client["nav/active"] == "experiment/start/grid_search"
+ ):
+ await experiment_start(q)
+
+ except Exception as unknown_exception:
+ logger.error("Unknown exception", exc_info=True)
+ await wave_utils_handle_error(
+ q,
+ error=unknown_exception,
+ )
+
+
+async def experiment_delete_all_artifacts(q: Q, experiment_ids: List[int]) -> None:
+ await experiment_stop(q, experiment_ids)
+ await experiment_delete(q, experiment_ids)
+ await list_current_experiments(q)
diff --git a/llm_studio/app_utils/hugging_face_utils.py b/llm_studio/app_utils/hugging_face_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..de3545e5c7f9339dbfc35f5ca7085c60c1d4ccfa
--- /dev/null
+++ b/llm_studio/app_utils/hugging_face_utils.py
@@ -0,0 +1,299 @@
+import os
+
+import accelerate
+import einops
+import huggingface_hub
+import torch
+import transformers
+
+from llm_studio.app_utils.sections.chat import load_cfg_model_tokenizer
+from llm_studio.app_utils.utils import hf_repo_friendly_name, save_hf_yaml, set_env
+from llm_studio.src.utils.config_utils import (
+ GENERATION_PROBLEM_TYPES,
+ NON_GENERATION_PROBLEM_TYPES,
+)
+from llm_studio.src.utils.modeling_utils import check_disk_space
+
+
+def get_model_card(cfg, model, repo_id) -> huggingface_hub.ModelCard:
+ """
+ Method to define the Model Card.
+
+ It is possible to change the language, the library name, and the tags.
+ These values will appear in the Model Card tab of Hugging Face.
+
+ Parameters:
+ cfg : Configuration parameters for the model card.
+ model : The model for which the model card is being generated.
+ repo_id : The ID of the target Hugging Face repository.
+
+ Returns:
+ huggingface_hub.ModelCard: The Model Card containing model information.
+ """
+ card_data = huggingface_hub.ModelCardData(
+ language="en",
+ library_name="transformers",
+ tags=["gpt", "llm", "large language model", "h2o-llmstudio"],
+ )
+ cfg_kwargs = dict(
+ text_prompt_start=cfg.dataset.text_prompt_start,
+ text_answer_separator=cfg.dataset.text_answer_separator,
+ trust_remote_code=cfg.environment.trust_remote_code,
+ end_of_sentence=(
+ cfg.tokenizer._tokenizer_eos_token
+ if cfg.dataset.add_eos_token_to_prompt
+ else ""
+ ),
+ )
+ if cfg.problem_type not in NON_GENERATION_PROBLEM_TYPES:
+ cfg_kwargs.update(
+ dict(
+ min_new_tokens=cfg.prediction.min_length_inference,
+ max_new_tokens=cfg.prediction.max_length_inference,
+ do_sample=cfg.prediction.do_sample,
+ num_beams=cfg.prediction.num_beams,
+ temperature=cfg.prediction.temperature,
+ repetition_penalty=cfg.prediction.repetition_penalty,
+ )
+ )
+ if cfg.dataset.system_column != "None":
+ cfg_kwargs[
+ "sample_messages"
+ ] = """[
+ {
+ "role": "system",
+ "content": "You are a friendly and polite chatbot.",
+ },
+ {"role": "user", "content": "Hi, how are you?"},
+ {"role": "assistant", "content": "I'm doing great, how about you?"},
+ {"role": "user", "content": "Why is drinking water so healthy?"},
+]"""
+ else:
+ cfg_kwargs[
+ "sample_messages"
+ ] = """[
+ {"role": "user", "content": "Hi, how are you?"},
+ {"role": "assistant", "content": "I'm doing great, how about you?"},
+ {"role": "user", "content": "Why is drinking water so healthy?"},
+]"""
+
+ card = huggingface_hub.ModelCard.from_template(
+ card_data,
+ template_path=os.path.join("model_cards", cfg.environment._model_card_template),
+ base_model=cfg.llm_backbone, # will be replaced in template if it exists
+ repo_id=repo_id,
+ model_architecture=model.backbone.__repr__(),
+ config=cfg.__repr__(),
+ transformers_version=transformers.__version__,
+ einops_version=einops.__version__,
+ accelerate_version=accelerate.__version__,
+ torch_version=torch.__version__.split("+")[0],
+ **cfg_kwargs,
+ )
+ return card
+
+
+def get_chat_template(cfg):
+
+ chat_template = """
+{% for message in messages %}
+chat_template_for_checking_system_role
+chat_template_for_checking_alternating_roles
+{% if message['role'] == 'user' %}
+{{ 'text_prompt_start' + message['content'].strip() + eos_token_prompt }}
+chat_template_for_system
+{% elif message['role'] == 'assistant' %}
+{{ 'text_answer_separator' + message['content'].strip() + eos_token_answer }}
+{% endif %}
+{% endfor %}
+{% if add_generation_prompt %}{{ 'text_answer_separator' }}{% endif %}"""
+
+ if cfg.dataset.system_column != "None":
+ # If system role is supported
+ chat_template = chat_template.replace(
+ "chat_template_for_checking_system_role", ""
+ )
+ chat_template = chat_template.replace(
+ "chat_template_for_checking_alternating_roles",
+ """
+{% if loop.index0 != 0 and message['role'] == 'system' %}
+{{ raise_exception('Conversation roles must alternate system(optional)/user/assistant/user/assistant/...') }}""" # noqa
+ + """
+{% elif messages[0]['role'] == 'system' and ((message['role'] == 'user' and (loop.index0 % 2 == 0)) or (message['role'] == 'assistant' and (loop.index0 % 2 == 1))) %}""" # noqa
+ + """
+{{ raise_exception('Conversation roles must alternate system(optional)/user/assistant/user/assistant/...') }}""" # noqa
+ + """
+{% elif messages[0]['role'] != 'system' and ((message['role'] == 'user' and (loop.index0 % 2 != 0)) or (message['role'] == 'assistant' and (loop.index0 % 2 != 1))) %}""" # noqa
+ + """
+{{ raise_exception('Conversation roles must alternate system(optional)/user/assistant/user/assistant/...') }}""" # noqa
+ + """
+{% endif %}""",
+ )
+ chat_template = chat_template.replace(
+ "chat_template_for_system",
+ """
+{% elif message['role'] == 'system' %}
+{{ 'text_system_start' + message['content'].strip() + eos_token_system }}""",
+ )
+ if cfg.dataset.add_eos_token_to_system:
+ chat_template = chat_template.replace("eos_token_system", "eos_token")
+ else:
+ chat_template = chat_template.replace("+ eos_token_system", "")
+ else:
+ # If system role is NOT supported
+ chat_template = chat_template.replace(
+ "chat_template_for_checking_system_role",
+ """
+{% if message['role'] == 'system' %}
+{{ raise_exception('System role not supported') }}
+{% endif %}""",
+ )
+ chat_template = chat_template.replace(
+ "chat_template_for_checking_alternating_roles",
+ """
+{% if ((message['role'] == 'user') != (loop.index0 % 2 == 0)) or ((message['role'] == 'assistant') != (loop.index0 % 2 == 1)) %}""" # noqa
+ + """
+{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}""" # noqa
+ + """
+{% endif %}""",
+ )
+ chat_template = chat_template.replace("chat_template_for_system", "")
+
+ if cfg.dataset.add_eos_token_to_prompt:
+ chat_template = chat_template.replace("eos_token_prompt", "eos_token")
+ else:
+ chat_template = chat_template.replace("+ eos_token_prompt", "")
+ if cfg.dataset.add_eos_token_to_answer:
+ chat_template = chat_template.replace("eos_token_answer", "eos_token")
+ else:
+ chat_template = chat_template.replace("+ eos_token_answer", "")
+
+ chat_template = (
+ chat_template.replace("\n", "")
+ .replace("text_system_start", cfg.dataset.text_system_start)
+ .replace("text_prompt_start", cfg.dataset.text_prompt_start)
+ .replace("text_answer_separator", cfg.dataset.text_answer_separator)
+ )
+
+ return chat_template
+
+
+def publish_model_to_hugging_face(
+ path_to_experiment: str,
+ model_name: str,
+ user_id: str = None,
+ api_key: str = None,
+ device: str = "cuda:0",
+ safe_serialization: bool = True,
+ hf_transfer: bool = False,
+) -> None:
+ """
+ Method to publish the model to Hugging Face.
+
+ Parameters:
+ path_to_experiment: The file path of the fine-tuned model's files.
+ device: The target device for running the model, either 'cpu', 'cpu_shard'
+ or 'cuda:0'.
+ user_id: The Hugging Face user ID.
+ api_key: The Hugging Face API Key.
+ model_name: The name of the model to be published on Hugging Face.
+ safe_serialization: A flag indicating whether safe serialization should be used.
+ hf_transfer: A flag indicating whether to use hf_transfer
+ (https://pypi.org/project/hf-transfer/) for model upload.
+
+ Returns:
+ None. The model is published to the specified Hugging Face repository.
+ """
+
+ # Check if the 'device' value is valid, raise an exception if not
+ if device == "cpu" or device == "cpu_shard":
+ pass # 'cpu' is a valid value
+ elif device.startswith("cuda:") and device[5:].isdigit():
+ pass # 'cuda:integer' format is valid
+ else:
+ raise ValueError(
+ "Invalid device value. Use 'cpu', 'cpu_shard' or 'cuda:INTEGER'."
+ )
+
+ # update the environment variable for hf_transfer
+ if hf_transfer:
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
+ else:
+ os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "false"
+
+ with set_env(HF_TOKEN=api_key):
+ cfg, model, tokenizer = load_cfg_model_tokenizer(
+ path_to_experiment,
+ merge=True,
+ device=device,
+ )
+
+ check_disk_space(model.backbone, "./")
+
+ # Check if the user is already logged in, and if not, prompt for API key
+ if api_key:
+ huggingface_hub.login(api_key)
+
+ # If 'user_id' argument is blank, fetch 'user_id' from the logged-in user
+ if user_id == "":
+ user_id = huggingface_hub.whoami()["name"]
+
+ repo_id = f"{user_id}/{hf_repo_friendly_name(model_name)}"
+
+ # push tokenizer to hub
+ if cfg.problem_type in GENERATION_PROBLEM_TYPES:
+ tokenizer.chat_template = get_chat_template(cfg)
+ tokenizer.push_to_hub(repo_id=repo_id, private=True)
+
+ # push model card to hub
+ card = get_model_card(cfg, model, repo_id)
+ card.push_to_hub(
+ repo_id=repo_id, repo_type="model", commit_message="Upload model card"
+ )
+
+ api = huggingface_hub.HfApi()
+
+ # push classification head to hub
+ if os.path.isfile(f"{path_to_experiment}/classification_head.pth"):
+ api.upload_file(
+ path_or_fileobj=f"{path_to_experiment}/classification_head.pth",
+ path_in_repo="classification_head.pth",
+ repo_id=repo_id,
+ repo_type="model",
+ commit_message="Upload classification_head.pth",
+ )
+ # push regression head to hub
+ if os.path.isfile(f"{path_to_experiment}/regression_head.pth"):
+ api.upload_file(
+ path_or_fileobj=f"{path_to_experiment}/regression_head.pth",
+ path_in_repo="regression_head.pth",
+ repo_id=repo_id,
+ repo_type="model",
+ commit_message="Upload regression_head.pth",
+ )
+
+ # push config to hub
+ api.upload_file(
+ path_or_fileobj=os.path.join(path_to_experiment, "cfg.yaml"),
+ path_in_repo="cfg.yaml",
+ repo_id=repo_id,
+ repo_type="model",
+ commit_message="Upload cfg.yaml",
+ )
+
+ # push model to hub
+ model.backbone.push_to_hub(
+ repo_id=repo_id,
+ private=True,
+ commit_message="Upload model",
+ safe_serialization=safe_serialization,
+ )
+
+ # Storing HF attributes
+ output_directory = cfg.output_directory
+ save_hf_yaml(
+ path=f"{output_directory.rstrip('/')}/hf.yaml",
+ account_name=user_id,
+ model_name=model_name,
+ repo_id=repo_id,
+ )
diff --git a/llm_studio/app_utils/initializers.py b/llm_studio/app_utils/initializers.py
new file mode 100644
index 0000000000000000000000000000000000000000..0853dc77c58c6fb0a4463073e5d92534f5590212
--- /dev/null
+++ b/llm_studio/app_utils/initializers.py
@@ -0,0 +1,260 @@
+import logging
+import os
+import shutil
+from tempfile import NamedTemporaryFile
+
+from bokeh.resources import Resources as BokehResources
+from h2o_wave import Q, ui
+
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.app_utils.db import Database, Dataset
+from llm_studio.app_utils.default_datasets import (
+ prepare_default_dataset_causal_language_modeling,
+ prepare_default_dataset_classification_modeling,
+ prepare_default_dataset_dpo_modeling,
+ prepare_default_dataset_regression_modeling,
+)
+from llm_studio.app_utils.sections.common import interface
+from llm_studio.app_utils.setting_utils import load_user_settings_and_secrets
+from llm_studio.app_utils.utils import (
+ get_data_dir,
+ get_database_dir,
+ get_download_dir,
+ get_output_dir,
+ get_user_db_path,
+ get_user_name,
+)
+from llm_studio.src.utils.config_utils import load_config_py, save_config_yaml
+
+logger = logging.getLogger(__name__)
+
+
+async def import_default_data(q: Q):
+ """Imports default data"""
+
+ try:
+ if q.client.app_db.get_dataset(1) is None:
+ logger.info("Downloading default dataset...")
+ q.page["meta"].dialog = ui.dialog(
+ title="Creating default datasets",
+ blocking=True,
+ items=[ui.progress(label="Please be patient...")],
+ )
+ await q.page.save()
+
+ dataset = prepare_oasst(q)
+ q.client.app_db.add_dataset(dataset)
+ dataset = prepare_dpo(q)
+ q.client.app_db.add_dataset(dataset)
+ dataset = prepare_imdb(q)
+ q.client.app_db.add_dataset(dataset)
+ dataset = prepare_helpsteer(q)
+ q.client.app_db.add_dataset(dataset)
+
+ except Exception as e:
+ q.client.app_db._session.rollback()
+ logger.warning(f"Could not download default dataset: {e}")
+ pass
+
+
+def prepare_oasst(q: Q) -> Dataset:
+ path = f"{get_data_dir(q)}/oasst"
+ if os.path.exists(path):
+ shutil.rmtree(path)
+ os.makedirs(path, exist_ok=True)
+ df = prepare_default_dataset_causal_language_modeling(path)
+ cfg = load_config_py(
+ config_path=os.path.join("llm_studio/python_configs", default_cfg.cfg_file),
+ config_name="ConfigProblemBase",
+ )
+ cfg.dataset.train_dataframe = os.path.join(path, "train_full.pq")
+ cfg.dataset.prompt_column = ("instruction",)
+ cfg.dataset.answer_column = "output"
+ cfg.dataset.parent_id_column = "None"
+ cfg_path = os.path.join(path, f"{default_cfg.cfg_file}.yaml")
+ save_config_yaml(cfg_path, cfg)
+ dataset = Dataset(
+ id=1,
+ name="oasst",
+ path=path,
+ config_file=cfg_path,
+ train_rows=df.shape[0],
+ )
+ return dataset
+
+
+def prepare_dpo(q: Q) -> Dataset:
+ path = f"{get_data_dir(q)}/dpo"
+ if os.path.exists(path):
+ shutil.rmtree(path)
+ os.makedirs(path, exist_ok=True)
+ train_df = prepare_default_dataset_dpo_modeling()
+ train_df.to_parquet(os.path.join(path, "train.pq"), index=False)
+
+ from llm_studio.python_configs.text_dpo_modeling_config import ConfigDPODataset
+ from llm_studio.python_configs.text_dpo_modeling_config import (
+ ConfigProblemBase as ConfigProblemBaseDPO,
+ )
+
+ cfg: ConfigProblemBaseDPO = ConfigProblemBaseDPO(
+ dataset=ConfigDPODataset(
+ train_dataframe=os.path.join(path, "train.pq"),
+ system_column="system",
+ prompt_column=("question",),
+ answer_column="chosen",
+ rejected_answer_column="rejected",
+ ),
+ )
+
+ cfg_path = os.path.join(path, "text_dpo_modeling_config.yaml")
+ save_config_yaml(cfg_path, cfg)
+ dataset = Dataset(
+ id=2,
+ name="dpo",
+ path=path,
+ config_file=cfg_path,
+ train_rows=train_df.shape[0],
+ )
+ return dataset
+
+
+def prepare_imdb(q: Q) -> Dataset:
+ path = f"{get_data_dir(q)}/imdb"
+ if os.path.exists(path):
+ shutil.rmtree(path)
+ os.makedirs(path, exist_ok=True)
+ train_df = prepare_default_dataset_classification_modeling()
+ train_df.to_parquet(os.path.join(path, "train.pq"), index=False)
+
+ from llm_studio.python_configs.text_causal_classification_modeling_config import (
+ ConfigNLPCausalClassificationDataset,
+ )
+ from llm_studio.python_configs.text_causal_classification_modeling_config import (
+ ConfigProblemBase as ConfigProblemBaseClassification,
+ )
+
+ cfg: ConfigProblemBaseClassification = ConfigProblemBaseClassification(
+ dataset=ConfigNLPCausalClassificationDataset(
+ train_dataframe=os.path.join(path, "train.pq"),
+ prompt_column=("text",),
+ answer_column=("label",),
+ ),
+ )
+
+ cfg_path = os.path.join(path, "text_causal_classification_modeling_config.yaml")
+ save_config_yaml(cfg_path, cfg)
+ dataset = Dataset(
+ id=3,
+ name="imdb",
+ path=path,
+ config_file=cfg_path,
+ train_rows=train_df.shape[0],
+ )
+ return dataset
+
+
+def prepare_helpsteer(q: Q) -> Dataset:
+ path = f"{get_data_dir(q)}/helpsteer"
+ if os.path.exists(path):
+ shutil.rmtree(path)
+ os.makedirs(path, exist_ok=True)
+ train_df = prepare_default_dataset_regression_modeling()
+ train_df.to_parquet(os.path.join(path, "train.pq"), index=False)
+
+ from llm_studio.python_configs.text_causal_regression_modeling_config import (
+ ConfigNLPCausalRegressionDataset,
+ )
+ from llm_studio.python_configs.text_causal_regression_modeling_config import (
+ ConfigProblemBase as ConfigProblemBaseRegression,
+ )
+
+ cfg: ConfigProblemBaseRegression = ConfigProblemBaseRegression(
+ dataset=ConfigNLPCausalRegressionDataset(
+ train_dataframe=os.path.join(path, "train.pq"),
+ prompt_column=("prompt", "response"),
+ answer_column=(
+ "helpfulness",
+ "correctness",
+ "coherence",
+ "complexity",
+ "verbosity",
+ ),
+ ),
+ )
+
+ cfg_path = os.path.join(path, "text_causal_regression_modeling_config.yaml")
+ save_config_yaml(cfg_path, cfg)
+ dataset = Dataset(
+ id=4,
+ name="helpsteer",
+ path=path,
+ config_file=cfg_path,
+ train_rows=train_df.shape[0],
+ )
+ return dataset
+
+
+async def initialize_client(q: Q) -> None:
+ """Initialize the client."""
+
+ if not q.client.client_initialized:
+ logger.info("Initializing client ...")
+ q.client.delete_cards = set()
+ q.client.delete_cards.add("init_app")
+
+ os.makedirs(get_data_dir(q), exist_ok=True)
+ os.makedirs(get_database_dir(q), exist_ok=True)
+ os.makedirs(get_output_dir(q), exist_ok=True)
+ os.makedirs(get_download_dir(q), exist_ok=True)
+
+ db_path = get_user_db_path(q)
+
+ q.client.app_db = Database(db_path)
+
+ logger.info(f"User name: {get_user_name(q)}")
+
+ q.client.client_initialized = True
+
+ q.client["mode_curr"] = "full"
+ load_user_settings_and_secrets(q)
+ await interface(q)
+
+ await import_default_data(q)
+ q.args.__wave_submission_name__ = default_cfg.start_page
+ logger.info("Initializing client ... done")
+
+ return
+
+
+async def initialize_app(q: Q) -> None:
+ """
+ Initialize the app.
+
+ This function is called once when the app is started and stores values in q.app.
+ """
+
+ if not q.app.initialized:
+ logger.info("Initializing app ...")
+
+ icons_pth = "llm_studio/app_utils/static/"
+ (q.app["icon_path"],) = await q.site.upload([f"{icons_pth}/icon_300.svg"])
+
+ script_sources = []
+
+ with NamedTemporaryFile(mode="w", suffix=".min.js") as f:
+ # write all Bokeh scripts to one file to make sure
+ # they are loaded sequentially
+ for js_raw in BokehResources(mode="inline").js_raw:
+ f.write(js_raw)
+ f.write("\n")
+
+ (url,) = await q.site.upload([f.name])
+ script_sources.append(url)
+
+ q.app["script_sources"] = script_sources
+ q.app["initialized"] = True
+ q.app.version = default_cfg.version
+ q.app.name = default_cfg.name
+ q.app.heap_mode = default_cfg.heap_mode
+
+ logger.info("Initializing app ... done")
diff --git a/llm_studio/app_utils/sections/__init__.py b/llm_studio/app_utils/sections/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/app_utils/sections/chat.py b/llm_studio/app_utils/sections/chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..97205d3f0d3b8ace5b18754a5812b8064e738d7b
--- /dev/null
+++ b/llm_studio/app_utils/sections/chat.py
@@ -0,0 +1,277 @@
+import gc
+import logging
+import os
+
+import numpy as np
+import torch
+from accelerate import dispatch_model, infer_auto_device_map
+from accelerate.utils import get_balanced_memory
+from h2o_wave import Q
+from h2o_wave import data as chat_data
+from h2o_wave import ui
+
+from llm_studio.app_utils.utils import get_experiments, get_ui_elements_for_cfg, set_env
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.src.datasets.text_utils import get_texts, get_tokenizer
+from llm_studio.src.utils.config_utils import (
+ NON_GENERATION_PROBLEM_TYPES,
+ load_config_yaml,
+)
+from llm_studio.src.utils.export_utils import get_prediction_dataframe
+from llm_studio.src.utils.modeling_utils import load_checkpoint
+
+logger = logging.getLogger(__name__)
+
+
+async def chat_tab(q: Q, load_model=True):
+ if not await should_start_chat(q):
+ return
+
+ if load_model:
+ q.page["experiment/display/chat"] = ui.form_card(
+ box="first",
+ items=[ui.progress(label="Loading the model...")],
+ )
+
+ q.client["experiment/display/chat/messages"] = []
+ q.client.delete_cards.add("experiment/display/chat")
+
+ q.page["experiment/display/chat/settings"] = ui.form_card(
+ box="second",
+ items=[
+ ui.expander(
+ name="chat_settings",
+ label="Chat Settings",
+ items=[ui.progress(label="Loading model configuration...")],
+ expanded=True,
+ )
+ ],
+ )
+ q.client.delete_cards.add("experiment/display/chat/settings")
+
+ await q.page.save()
+ logger.info(torch.cuda.memory_allocated())
+
+ if load_model:
+ with set_env(HF_TOKEN=q.client["default_huggingface_api_token"]):
+ gpu_id = q.client["gpu_used_for_chat"] - 1
+ cfg, model, tokenizer = load_cfg_model_tokenizer(
+ q.client["experiment/display/experiment_path"], device=f"cuda:{gpu_id}"
+ )
+ q.client["experiment/display/chat/cfg"] = cfg
+ q.client["experiment/display/chat/model"] = model
+ q.client["experiment/display/chat/tokenizer"] = tokenizer
+ initial_message = "Model successfully loaded, how can I help you?"
+
+ else:
+ cfg = q.client["experiment/display/chat/cfg"]
+ assert q.client["experiment/display/chat/model"] is not None
+ assert q.client["experiment/display/chat/tokenizer"] is not None
+ initial_message = "Chat History cleaned. How can I help you?"
+
+ # Load validation dataframe and texts
+ validation_dataframe = get_prediction_dataframe(cfg.output_directory)
+ if cfg.dataset.parent_id_column != "None":
+ # sample and parent ids can have any dtype, such as str, int, float, etc.
+ # id column can be int, while parent_id column can be float
+ # (as some values are NaN) so we cast id to the same dtype
+ sample_ids = (
+ validation_dataframe["id"]
+ .astype(validation_dataframe[cfg.dataset.parent_id_column].dtype)
+ .tolist()
+ )
+ parent_ids = validation_dataframe[cfg.dataset.parent_id_column].tolist()
+
+ sample_ids_set = set(sample_ids)
+ is_seed_prompt = [
+ False if idx in sample_ids_set else True for idx in parent_ids
+ ]
+ validation_dataframe["is_seed_prompt"] = is_seed_prompt
+
+ validation_dataframe = validation_dataframe.loc[
+ validation_dataframe["is_seed_prompt"]
+ ]
+ validation_texts = get_texts(validation_dataframe, cfg)
+
+ # Hide fields that are should not be visible in the UI
+ cfg.prediction._visibility["metric"] = -1
+ cfg.prediction._visibility["batch_size_inference"] = -1
+ cfg.prediction._visibility["min_length_inference"] = -1
+ cfg.prediction._visibility["stop_tokens"] = -1
+
+ logger.info(torch.cuda.memory_allocated())
+ q.page["experiment/display/chat"] = ui.chatbot_card(
+ box="first",
+ data=chat_data(fields="content from_user", t="list"), # type: ignore
+ name="experiment/display/chat/chatbot",
+ events=["stop", "suggestion"],
+ suggestions=[
+ ui.chat_suggestion(
+ "Write a poem about H2O LLM Studio",
+ label="Write a poem",
+ caption="about H2O LLM Studio",
+ icon="Edit",
+ ),
+ ui.chat_suggestion(
+ "Plan a trip to Europe",
+ label="Plan a trip",
+ caption="to Europe",
+ icon="Airplane",
+ ),
+ ui.chat_suggestion(
+ "Give me ideas for a new project",
+ label="Give me ideas",
+ caption="for a new project",
+ icon="Lightbulb",
+ ),
+ ui.chat_suggestion(
+ np.random.choice(validation_texts),
+ label="Random sample from validation set",
+ icon="Chat",
+ ),
+ ],
+ )
+ q.page["experiment/display/chat"].data += [initial_message, False]
+
+ option_items = get_ui_elements_for_cfg(
+ cfg=q.client["experiment/display/chat/cfg"].prediction,
+ q=q,
+ pre="chat/cfg_predictions",
+ )
+ q.page["experiment/display/chat/settings"] = ui.form_card(
+ box="second",
+ items=[
+ ui.buttons(
+ [
+ ui.button(
+ name="experiment/display/chat/clear_history",
+ label="Clear History",
+ primary=True,
+ ),
+ ui.button(
+ name="experiment/display/chat/copy_chat",
+ label="Copy to clipboard",
+ primary=True,
+ ),
+ ]
+ ),
+ ui.expander(
+ name="chat_settings",
+ label="Chat Settings",
+ items=option_items,
+ expanded=True,
+ ),
+ ],
+ )
+
+
+async def should_start_chat(q: Q):
+ cfg: DefaultConfigProblemBase = load_config_yaml(
+ os.path.join(q.client["experiment/display/experiment_path"], "cfg.yaml")
+ )
+
+ if cfg.problem_type in NON_GENERATION_PROBLEM_TYPES:
+ q.page["experiment/display/chat"] = ui.form_card(
+ box="first",
+ items=[
+ ui.text(
+ "Chatbot is not available for this problem type. "
+ "Please select a text generation problem."
+ )
+ ],
+ title="",
+ )
+ q.client.delete_cards.add("experiment/display/chat")
+ return False
+
+ # gpu id in UI is offset by 1 to be in sync with experiment UI
+ gpu_id = q.client["gpu_used_for_chat"] - 1
+ if gpu_is_blocked(q, gpu_id):
+ q.page["experiment/display/chat"] = ui.form_card(
+ box="first",
+ items=[
+ ui.text(
+ f"""Chatbot is not available when GPU{q.client["gpu_used_for_chat"]}
+ is blocked by another experiment.
+ You can change "Gpu used for Chat" in the settings tab
+ to use another GPU for the chatbot. """
+ )
+ ],
+ title="",
+ )
+ q.client.delete_cards.add("experiment/display/chat")
+ return False
+ return True
+
+
+def gpu_is_blocked(q, gpu_id):
+ experiments = get_experiments(q=q)
+ running_experiments = experiments[experiments.status.isin(["running"])]
+ gpu_blocked = any(
+ [
+ str(gpu_id) in gpu_list
+ for gpu_list in running_experiments["gpu_list"]
+ .apply(lambda x: x.split(","))
+ .to_list()
+ ]
+ )
+ return gpu_blocked
+
+
+def load_cfg_model_tokenizer(
+ experiment_path: str, merge: bool = False, device: str = "cuda:0"
+):
+ """Loads the model, tokenizer and configuration from the experiment path."""
+ cfg = load_config_yaml(os.path.join(experiment_path, "cfg.yaml"))
+ cfg.architecture.pretrained = False
+ cfg.architecture.gradient_checkpointing = False
+ cfg.environment._device = device.replace("_shard", "")
+ cfg.environment._local_rank = 0
+ cfg.prediction._visibility["num_history"] = 1
+
+ tokenizer = get_tokenizer(cfg)
+
+ gc.collect()
+ torch.cuda.empty_cache()
+
+ if (
+ merge
+ and cfg.training.lora
+ and cfg.architecture.backbone_dtype in ("int4", "int8")
+ ):
+ # Force to float16 for merging LORA weights.
+ # TODO: Could be configurable in the future to allow bfloat16.
+ logger.info("Loading backbone in float16 for merging LORA weights.")
+ cfg.architecture.backbone_dtype = "float16"
+ cfg.architecture.pretrained = True
+
+ # if "cpu" in device:
+ # cfg.architecture.backbone_dtype = "float32"
+
+ with torch.device(cfg.environment._device):
+ model = cfg.architecture.model_class(cfg)
+ cfg.architecture.pretrained_weights = os.path.join(
+ experiment_path, "checkpoint.pth"
+ )
+ load_checkpoint(cfg, model, strict=False)
+
+ if device == "cpu_shard":
+ max_memory = get_balanced_memory(
+ model,
+ )
+ device_map = infer_auto_device_map(model, max_memory=max_memory)
+ model = dispatch_model(
+ model,
+ device_map=device_map,
+ )
+
+ if merge and cfg.training.lora:
+ # merges the LoRa layers into the base model.
+ # This is needed if one wants to use the base model as a standalone model.
+ logger.info("Merging LORA layers with base model.")
+ model.backbone = model.backbone.merge_and_unload()
+
+ model = model.eval()
+ model.backbone.use_cache = True
+
+ return cfg, model, tokenizer
diff --git a/llm_studio/app_utils/sections/chat_update.py b/llm_studio/app_utils/sections/chat_update.py
new file mode 100644
index 0000000000000000000000000000000000000000..137739922d9c6b6ca693e85674e76cb5b2a71dd0
--- /dev/null
+++ b/llm_studio/app_utils/sections/chat_update.py
@@ -0,0 +1,317 @@
+import asyncio
+import gc
+import logging
+import os
+import threading
+from typing import Any, Callable, Dict, List, Optional
+
+import numpy as np
+import torch
+from h2o_wave import Q, ui
+from transformers import AutoTokenizer, TextStreamer
+
+from llm_studio.app_utils.utils import parse_ui_elements
+from llm_studio.src.models.text_causal_language_modeling_model import Model
+from llm_studio.src.utils.modeling_utils import (
+ EnvVariableStoppingCriteria,
+ get_torch_dtype,
+ set_generation_config,
+)
+
+logger = logging.getLogger(__name__)
+
+USER = True
+BOT = False
+
+
+@torch.inference_mode(mode=True)
+async def chat_update(q: Q) -> None:
+ """
+ Update the chatbot with the new message.
+ """
+ q.client["experiment/display/chat/finished"] = False
+ try:
+ await update_chat_window(q)
+ finally:
+ q.client["experiment/display/chat/finished"] = True
+ # Hide the "Stop generating" button
+ q.page["experiment/display/chat"].generating = False
+
+
+async def update_chat_window(q):
+ cfg_prediction = parse_ui_elements(
+ cfg=q.client["experiment/display/chat/cfg"].prediction,
+ q=q,
+ pre="chat/cfg_predictions/cfg/",
+ )
+ q.client["experiment/display/chat/cfg"].prediction = cfg_prediction
+ # Update generation config
+ q.client["experiment/display/chat/model"].backbone = set_generation_config(
+ q.client["experiment/display/chat/model"].backbone, cfg_prediction
+ )
+
+ # could also invoke cfg.check() here, but leave it explicit as cfg.check()
+ # may raise other issues not related to the chatbot
+ if cfg_prediction.do_sample and cfg_prediction.temperature == 0.0:
+ q.page["meta"].dialog = ui.dialog(
+ title="Invalid Text Generation configuration.",
+ name="chatbot_invalid_settings",
+ items=[
+ ui.text(
+ "Do Sample enabled and Temperature = 0 are mutually exclusive. "
+ "Please increase Temperature or disable sampling."
+ ),
+ ],
+ closable=True,
+ )
+ await q.page.save()
+ return
+
+ # populate chat window with user message
+ logger.info(f"Using chatbot config: {cfg_prediction}")
+ if q.events["experiment/display/chat/chatbot"]:
+ prompt = q.events["experiment/display/chat/chatbot"]["suggestion"]
+ else:
+ prompt = q.client["experiment/display/chat/chatbot"]
+ message = [prompt, USER]
+ q.client["experiment/display/chat/messages"].append(message)
+ q.page["experiment/display/chat"].data += message
+ q.page["experiment/display/chat"].data += ["", BOT]
+ await q.page.save()
+
+ predicted_text = await answer_chat(q)
+
+ # populate chat window with bot message
+ logger.info(f"Predicted Answer: {predicted_text}")
+ message = [predicted_text, BOT]
+ q.client["experiment/display/chat/messages"].append(message)
+ q.page["experiment/display/chat"].data[-1] = message
+
+
+async def chat_copy(q: Q) -> None:
+
+ chat_messages = [
+ f"{'USER' if t[1] == USER else 'ASSISTANT'}: {t[0]}"
+ for t in q.client["experiment/display/chat/messages"]
+ ]
+ chat_to_copy = "\n".join(chat_messages)
+
+ q.page["meta"].script = ui.inline_script(
+ f"navigator.clipboard.writeText(`{chat_to_copy}`);"
+ )
+ await q.page.save()
+
+
+async def answer_chat(q: Q) -> str:
+ cfg = q.client["experiment/display/chat/cfg"]
+ model: Model = q.client["experiment/display/chat/model"]
+ tokenizer = q.client["experiment/display/chat/tokenizer"]
+
+ full_prompt = ""
+ if len(q.client["experiment/display/chat/messages"]):
+ for prev_message in q.client["experiment/display/chat/messages"][
+ -(cfg.prediction.num_history + 1) :
+ ]:
+ if prev_message[1] is USER:
+ prev_message = cfg.dataset.dataset_class.parse_prompt(
+ cfg, prev_message[0]
+ )
+ else:
+ prev_message = prev_message[0]
+ if cfg.dataset.add_eos_token_to_answer:
+ prev_message += cfg.tokenizer._tokenizer_eos_token
+
+ full_prompt += prev_message
+ logger.info(f"Full prompt: {full_prompt}")
+
+ inputs = cfg.dataset.dataset_class.encode(
+ tokenizer, full_prompt, cfg.tokenizer.max_length, "left"
+ )
+ inputs["prompt_input_ids"] = (
+ inputs.pop("input_ids").unsqueeze(0).to(cfg.environment._device)
+ )
+ inputs["prompt_attention_mask"] = (
+ inputs.pop("attention_mask").unsqueeze(0).to(cfg.environment._device)
+ )
+
+ def text_cleaner(text: str) -> str:
+ return cfg.dataset.dataset_class.clean_output(
+ output={"predicted_text": np.array([text])}, cfg=cfg
+ )["predicted_text"][0]
+
+ if cfg.prediction.num_beams == 1:
+ streamer = WaveChatStreamer(tokenizer=tokenizer, q=q, text_cleaner=text_cleaner)
+ # Need to start generation in a separate thread, otherwise streaming is blocked
+ thread = threading.Thread(
+ target=generate,
+ kwargs=dict(model=model, inputs=inputs, cfg=cfg, streamer=streamer),
+ )
+ q.client["currently_chat_streaming"] = True
+ # Show the "Stop generating" button
+ q.page["experiment/display/chat"].generating = True
+ # Hide suggestions
+ q.page["experiment/display/chat"].suggestions = None
+
+ try:
+ thread.start()
+ max_wait_time_in_seconds = 60 * 3
+ for current_wait_time in range(max_wait_time_in_seconds):
+ thread_is_dead = not thread.is_alive()
+ takes_too_much_time = current_wait_time == max_wait_time_in_seconds - 1
+ streaming_finished = streamer.finished
+
+ if streaming_finished or takes_too_much_time or thread_is_dead:
+ if takes_too_much_time:
+ # this is more of a safety measure
+ # to ensure the app gets responsive eventually
+ logger.warning(
+ "Chat generation took too much time. "
+ "Stopping chat generation."
+ )
+ if thread_is_dead: # some error occurred during streaming
+ logger.warning(
+ "Chat generation thread is not alive anymore. "
+ "Please check logs!"
+ )
+ if streaming_finished:
+ logger.info("Chat Stream has been completed")
+
+ predicted_text = streamer.answer
+ break
+ await q.sleep(1) # 1 second, see max_wait_time_in_seconds
+ finally:
+ del q.client["currently_chat_streaming"]
+ if thread.is_alive():
+ thread.join()
+ else:
+ # ValueError: `streamer` cannot be used with beam search (yet!).
+ # Make sure that `num_beams` is set to 1.
+ logger.info("Not streaming output, as it cannot be used with beam search.")
+ q.page["experiment/display/chat"].data[-1] = ["...", BOT]
+ await q.page.save()
+ predicted_answer_ids = generate(model, inputs, cfg)[0]
+ predicted_text = tokenizer.decode(
+ predicted_answer_ids, skip_special_tokens=True
+ )
+ predicted_text = text_cleaner(predicted_text)
+
+ del inputs
+ gc.collect()
+ torch.cuda.empty_cache()
+ return predicted_text
+
+
+class WaveChatStreamer(TextStreamer):
+ """
+ Utility class that updates the chabot card in a streaming fashion
+ """
+
+ def __init__(
+ self,
+ tokenizer: AutoTokenizer,
+ q: Q,
+ text_cleaner: Optional[Callable] = None,
+ **decode_kwargs,
+ ):
+ super().__init__(tokenizer, skip_prompt=True, **decode_kwargs)
+ self.text_cleaner: Optional[Callable] = text_cleaner
+ self.words_predicted_answer: List[str] = []
+ self.q: Q = q
+ self.loop = asyncio.get_event_loop()
+ self.finished = False
+
+ def on_finalized_text(self, text: str, stream_end: bool = False):
+ self.words_predicted_answer += [text]
+ self.loop.create_task(self.update_chat_page())
+
+ async def update_chat_page(self):
+ self.q.page["experiment/display/chat"].data[-1] = [self.answer, BOT]
+ await self.q.page.save()
+
+ @property
+ def answer(self):
+ """
+ Create the answer by joining the generated words.
+ By this, self.text_cleaner does not need to be idempotent.
+ """
+ answer = "".join(self.words_predicted_answer)
+ if answer.endswith(self.tokenizer.eos_token):
+ # text generation is stopped
+ answer = answer.replace(self.tokenizer.eos_token, "")
+ if self.text_cleaner:
+ answer = self.text_cleaner(answer)
+ return answer
+
+ def end(self):
+ super().end()
+ self.finished = True
+
+
+def generate(model: Model, inputs: Dict, cfg: Any, streamer: TextStreamer = None):
+ with torch.cuda.amp.autocast(
+ dtype=get_torch_dtype(cfg.environment.mixed_precision_dtype)
+ ):
+ output = model.generate(batch=inputs, cfg=cfg, streamer=streamer).detach().cpu()
+ return output
+
+
+async def show_chat_is_running_dialog(q):
+ q.page["meta"].dialog = ui.dialog(
+ title="Text Generation is streaming.",
+ name="chatbot_running_dialog",
+ items=[
+ ui.text("Please wait till the text generation has stopped."),
+ ],
+ closable=True,
+ )
+ await q.page.save()
+
+
+async def show_stream_is_aborted_dialog(q):
+ q.page["meta"].dialog = ui.dialog(
+ title="Text Generation will be stopped.",
+ name="chatbot_stopping_dialog",
+ items=[
+ ui.text("Please wait"),
+ ],
+ closable=False,
+ )
+ await q.page.save()
+
+
+async def is_app_blocked_while_streaming(q: Q) -> bool:
+ """
+ Check whether the app is blocked with current answer generation.
+ """
+ if (
+ q.events["experiment/display/chat/chatbot"] is not None
+ and q.events["experiment/display/chat/chatbot"]["stop"]
+ and q.client["currently_chat_streaming"]
+ ):
+ # Cancel the streaming task.
+ try:
+ # User clicks abort button while the chat is currently streaming
+ logger.info("Stopping Chat Stream")
+ os.environ[EnvVariableStoppingCriteria.stop_streaming_env] = "True"
+ await show_stream_is_aborted_dialog(q)
+ await q.page.save()
+
+ for _ in range(20): # don't wait longer than 10 seconds
+ await q.sleep(0.5)
+ if q.client["currently_chat_streaming"] is None:
+ q.page["meta"].dialog = None
+ await q.page.save()
+ return True
+ else:
+ logger.warning("Could not terminate stream")
+ return True
+ finally:
+ if EnvVariableStoppingCriteria.stop_streaming_env in os.environ:
+ del os.environ[EnvVariableStoppingCriteria.stop_streaming_env]
+ # Hide the "Stop generating" button.
+ q.page["experiment/display/chat"].generating = False
+
+ elif q.client["experiment/display/chat/finished"] is False:
+ await show_chat_is_running_dialog(q)
+ return True
+ return False
diff --git a/llm_studio/app_utils/sections/common.py b/llm_studio/app_utils/sections/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..283dfe86bd9ba6e4b1cbdd1db02373e80fb0f93e
--- /dev/null
+++ b/llm_studio/app_utils/sections/common.py
@@ -0,0 +1,269 @@
+import hashlib
+import logging
+from typing import List
+
+from h2o_wave import Q, ui
+
+from llm_studio.app_utils.cards import card_zones
+from llm_studio.app_utils.config import default_cfg
+
+logger = logging.getLogger(__name__)
+
+
+async def meta(q: Q) -> None:
+ if q.client["keep_meta"]: # Do not reset meta, keep current dialog opened
+ q.client["keep_meta"] = False
+ return
+
+ zones = card_zones(mode=q.client["mode_curr"])
+
+ if q.client["notification_bar"]:
+ notification_bar = ui.notification_bar(
+ type="warning",
+ timeout=20,
+ text=q.client["notification_bar"],
+ position="top-right",
+ )
+ else:
+ notification_bar = None
+
+ # TODO remove `stylesheet` when wave makes message bars smaller
+ q.page["meta"] = ui.meta_card(
+ box="",
+ title="H2O LLM Studio",
+ layouts=[
+ ui.layout(breakpoint="0px", width="100%", zones=zones),
+ ui.layout(breakpoint="1920px", width="1920px", zones=zones),
+ ],
+ scripts=[
+ ui.script(source, asynchronous=True) for source in q.app["script_sources"]
+ ],
+ stylesheet=ui.inline_stylesheet(
+ """
+ .ms-MessageBar {
+ padding-top: 3px;
+ padding-bottom: 3px;
+ min-height: 18px;
+ }
+ div[data-test="nav_bar"] .ms-Nav-groupContent {
+ margin-bottom: 0;
+ }
+
+ div[data-test="experiment/display/deployment/top_right"],
+ div[data-test="experiment/display/deployment/top_right"]
+ div[data-visible="true"]:last-child > div > div {
+ display: flex;
+ }
+
+ div[data-test="experiment/display/deployment/top_right"]
+ div[data-visible="true"]:last-child,
+ div[data-test="experiment/display/deployment/top_right"]
+ div[data-visible="true"]:last-child > div {
+ display: flex;
+ flex-grow: 1;
+ }
+
+ div[data-test="experiment/display/deployment/top_right"]
+ div[data-visible="true"]:last-child > div > div > div {
+ display: flex;
+ flex-grow: 1;
+ flex-direction: column;
+ }
+
+ div[data-test="experiment/display/deployment/top_right"]
+ div[data-visible="true"]:last-child > div > div > div > div {
+ flex-grow: 1;
+ }
+ """
+ ),
+ script=None,
+ notification_bar=notification_bar,
+ )
+
+ q.page["meta"].theme = "h2o-dark"
+
+
+def heap_analytics(
+ userid, user_properties=None, event_properties=None
+) -> ui.InlineScript:
+ script = (
+ "window.heap=window.heap||[],heap.load=function(e,t)"
+ "{window.heap.appid=e,window.heap."
+ 'config=t=t||{};var r=document.createElement("script");'
+ 'r.type="text/javascript",'
+ 'r.async=!0,r.src="https://cdn.heapanalytics.com/js/heap-"+e+".js";'
+ 'var a=document.getElementsByTagName("script")[0];'
+ "a.parentNode.insertBefore(r,a);"
+ "for(var n=function(e){return function(){heap.push([e]."
+ "concat(Array.prototype.slice.call(arguments,0)))}},"
+ 'p=["addEventProperties","addUserProperties","clearEventProperties","identify",'
+ '"resetIdentity","removeEventProperty","setEventProperties","track",'
+ '"unsetEventProperty"],o=0;o None:
+ """Display interface cards."""
+
+ await meta(q)
+
+ navigation_pages = ["Home", "Settings"]
+
+ if q.client["init_interface"] is None:
+ # to avoid flickering
+ q.page["header"] = ui.header_card(
+ box="header",
+ title=default_cfg.name,
+ image=q.app["icon_path"],
+ subtitle=f"v{default_cfg.version}",
+ )
+
+ if q.app.heap_mode:
+ logger.info("Heap on")
+ q.page["meta"].script = heap_analytics(
+ userid=q.auth.subject,
+ event_properties=(
+ f"{{version: '{q.app.version}'" + f", product: '{q.app.name}'}}"
+ ),
+ )
+ # execute the heap inline script once in the initialization
+ await q.page.save()
+ else:
+ logger.info("Heap off")
+
+ q.page["nav_bar"] = ui.nav_card(
+ box="nav",
+ items=[
+ ui.nav_group(
+ "Navigation",
+ items=[
+ ui.nav_item(page.lower(), page) for page in navigation_pages
+ ],
+ ),
+ ui.nav_group(
+ "Datasets",
+ items=[
+ ui.nav_item(name="dataset/import", label="Import dataset"),
+ ui.nav_item(name="dataset/list", label="View datasets"),
+ ],
+ ),
+ ui.nav_group(
+ "Experiments",
+ items=[
+ ui.nav_item(name="experiment/start", label="Create experiment"),
+ ui.nav_item(
+ name="experiment/start/grid_search",
+ label="Create grid search",
+ ),
+ ui.nav_item(name="experiment/list", label="View experiments"),
+ ],
+ ),
+ ],
+ value=(
+ default_cfg.start_page
+ if q.client["nav/active"] is None
+ else q.client["nav/active"]
+ ),
+ )
+ else:
+ # Only update menu properties to prevent from flickering
+ q.page["nav_bar"].value = (
+ default_cfg.start_page
+ if q.client["nav/active"] is None
+ else q.client["nav/active"]
+ )
+
+ q.client["init_interface"] = True
+
+
+async def clean_dashboard(q: Q, mode: str = "full", exclude: List[str] = []):
+ """Drop cards from Q page."""
+
+ logger.info(q.client.delete_cards)
+ for card_name in q.client.delete_cards:
+ if card_name not in exclude:
+ del q.page[card_name]
+
+ q.page["meta"].layouts[0].zones = card_zones(mode=mode)
+ q.client["mode_curr"] = mode
+ q.client["notification_bar"] = None
+
+
+async def delete_dialog(q: Q, names: List[str], action, entity):
+ title = "Do you really want to delete "
+ n_datasets = len(names)
+
+ if n_datasets == 1:
+ title = f"{title} {entity} {names[0]}?"
+ else:
+ title = f"{title} {n_datasets} {entity}s?"
+
+ q.page["meta"].dialog = ui.dialog(
+ f"Delete {entity}",
+ items=[
+ ui.text(title),
+ ui.markup(" "),
+ ui.buttons(
+ [
+ ui.button(name=action, label="Delete", primary=True),
+ ui.button(name="abort", label="Abort", primary=False),
+ ],
+ justify="end",
+ ),
+ ],
+ )
+ q.client["keep_meta"] = True
+
+
+async def info_dialog(q: Q, title: str, message: str):
+ q.page["meta"].dialog = ui.dialog(
+ title,
+ items=[
+ ui.text(message),
+ ui.markup(" "),
+ ui.buttons(
+ [
+ ui.button(name="abort", label="Continue", primary=False),
+ ],
+ justify="end",
+ ),
+ ],
+ blocking=True,
+ )
+ q.client["keep_meta"] = True
+
+
+async def heap_redact(q: Q) -> None:
+ if q.app.heap_mode:
+ # Send the page to the browser, so the following js can be applied
+ await q.page.save()
+
+ # replace dataset names with ****
+ q.page["meta"].script = ui.inline_script(
+ """
+document.querySelectorAll('div[data-automation-key="name"]').forEach(a => {
+ a.setAttribute('data-heap-redact-text', '')
+})
+
+document.querySelector('div[data-test="datasets_table"] \
+.ms-ScrollablePane--contentContainer').addEventListener('scroll', () => {
+ window.setTimeout(() => {{
+ document.querySelectorAll('div[data-automation-key="name"]').forEach(a => {
+ a.setAttribute('data-heap-redact-text', '')
+ })
+ }}, 100)
+})
+ """
+ )
diff --git a/llm_studio/app_utils/sections/dataset.py b/llm_studio/app_utils/sections/dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c7a57d44a811b2281cbe1c3223936db11db94e4
--- /dev/null
+++ b/llm_studio/app_utils/sections/dataset.py
@@ -0,0 +1,1477 @@
+import functools
+import hashlib
+import logging
+import os
+import re
+import shutil
+import textwrap
+import time
+import traceback
+from typing import List, Optional
+
+import pandas as pd
+from h2o_wave import Q, ui
+from h2o_wave.types import FormCard, ImageCard, MarkupCard, StatListItem, Tab
+
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.app_utils.db import Dataset
+from llm_studio.app_utils.sections.common import clean_dashboard
+from llm_studio.app_utils.sections.experiment import experiment_start
+from llm_studio.app_utils.sections.histogram_card import histogram_card
+from llm_studio.app_utils.utils import (
+ add_model_type,
+ azure_download,
+ azure_file_options,
+ check_valid_upload_content,
+ clean_error,
+ dir_file_table,
+ get_data_dir,
+ get_dataset_elements,
+ get_datasets,
+ get_experiments_status,
+ get_frame_stats,
+ get_model_types,
+ get_problem_types,
+ get_unique_dataset_name,
+ h2o_drive_download,
+ h2o_drive_file_options,
+ huggingface_download,
+ kaggle_download,
+ local_download,
+ make_label,
+ parse_ui_elements,
+ remove_temp_files,
+ s3_download,
+ s3_file_options,
+)
+from llm_studio.app_utils.wave_utils import busy_dialog, ui_table_from_df
+from llm_studio.src.datasets.conversation_chain_handler import get_conversation_chains
+from llm_studio.src.tooltips import tooltips
+from llm_studio.src.utils.config_utils import (
+ load_config_py,
+ load_config_yaml,
+ save_config_yaml,
+)
+from llm_studio.src.utils.data_utils import (
+ get_fill_columns,
+ read_dataframe,
+ read_dataframe_drop_missing_labels,
+ sanity_check,
+)
+from llm_studio.src.utils.plot_utils import PlotData
+
+logger = logging.getLogger(__name__)
+
+
+def file_extension_is_compatible(q):
+ cfg = q.client["dataset/import/cfg"]
+ allowed_extensions = cfg.dataset._allowed_file_extensions
+
+ is_correct_extension = []
+ for mode in ["train", "validation"]:
+ dataset_name = q.client[f"dataset/import/cfg/{mode}_dataframe"]
+
+ if dataset_name is None or dataset_name == "None":
+ continue
+ is_correct_extension.append(dataset_name.endswith(allowed_extensions))
+ return all(is_correct_extension)
+
+
+async def dataset_import(
+ q: Q,
+ step: int,
+ edit: Optional[bool] = False,
+ error: Optional[str] = "",
+ warning: Optional[str] = "",
+ info: Optional[str] = "",
+ allow_merge: bool = True,
+) -> None:
+ """Display dataset import cards.
+
+ Args:
+ q: Q
+ step: current step of wizard
+ edit: whether in edit mode
+ error: optional error message
+ warning: optional warning message
+ info: optional info message
+ allow_merge: whether to allow merging dataset when importing
+ """
+
+ await clean_dashboard(q, mode="full")
+ q.client["nav/active"] = "dataset/import"
+ if step == 1: # select import data source
+ q.page["dataset/import"] = ui.form_card(box="content", items=[])
+ q.client.delete_cards.add("dataset/import")
+
+ if q.client["dataset/import/source"] is None:
+ q.client["dataset/import/source"] = "Upload"
+
+ import_choices = [
+ ui.choice("Upload", "Upload"),
+ ui.choice("Local", "Local"),
+ ui.choice("S3", "AWS S3"),
+ ui.choice("Azure", "Azure Datalake"),
+ ui.choice("H2O-Drive", "H2O-Drive"),
+ ui.choice("Kaggle", "Kaggle"),
+ ui.choice("Huggingface", "Hugging Face"),
+ ]
+
+ items = [
+ ui.text_l("Import dataset"),
+ ui.dropdown(
+ name="dataset/import/source",
+ label="Source",
+ value=(
+ "Upload"
+ if q.client["dataset/import/source"] is None
+ else q.client["dataset/import/source"]
+ ),
+ choices=import_choices,
+ trigger=True,
+ tooltip="Source of dataset import",
+ ),
+ ]
+
+ if q.client["dataset/import/source"] == "S3":
+ if q.client["dataset/import/s3_bucket"] is None:
+ q.client["dataset/import/s3_bucket"] = q.client[
+ "default_aws_bucket_name"
+ ]
+ if q.client["dataset/import/s3_access_key"] is None:
+ q.client["dataset/import/s3_access_key"] = q.client[
+ "default_aws_access_key"
+ ]
+ if q.client["dataset/import/s3_secret_key"] is None:
+ q.client["dataset/import/s3_secret_key"] = q.client[
+ "default_aws_secret_key"
+ ]
+
+ files: List[str] | Exception = s3_file_options(
+ q.client["dataset/import/s3_bucket"],
+ q.client["dataset/import/s3_access_key"],
+ q.client["dataset/import/s3_secret_key"],
+ )
+
+ # Handle errors in S3 connection and display them nicely below
+ if isinstance(files, Exception):
+ warning = str(files)
+ files = []
+
+ if len(files) == 0:
+ ui_filename = ui.textbox(
+ name="dataset/import/s3_filename",
+ label="File name",
+ value="",
+ required=True,
+ tooltip="File name to be imported",
+ )
+ else:
+ default_file = files[0]
+ ui_filename = ui.dropdown(
+ name="dataset/import/s3_filename",
+ label="File name",
+ value=default_file,
+ choices=[ui.choice(x, x.split("/")[-1]) for x in files],
+ required=True,
+ tooltip="File name to be imported",
+ )
+
+ items += [
+ ui.textbox(
+ name="dataset/import/s3_bucket",
+ label="S3 bucket name",
+ value=q.client["dataset/import/s3_bucket"],
+ trigger=True,
+ required=True,
+ tooltip="S3 bucket name including relative paths",
+ ),
+ ui.textbox(
+ name="dataset/import/s3_access_key",
+ label="AWS access key",
+ value=q.client["dataset/import/s3_access_key"],
+ trigger=True,
+ required=True,
+ password=True,
+ tooltip="Optional AWS access key; empty for anonymous access.",
+ ),
+ ui.textbox(
+ name="dataset/import/s3_secret_key",
+ label="AWS secret key",
+ value=q.client["dataset/import/s3_secret_key"],
+ trigger=True,
+ required=True,
+ password=True,
+ tooltip="Optional AWS secret key; empty for anonymous access.",
+ ),
+ ui_filename,
+ ]
+
+ elif q.client["dataset/import/source"] == "Azure":
+ if q.client["dataset/import/azure_conn_string"] is None:
+ q.client["dataset/import/azure_conn_string"] = q.client[
+ "default_azure_conn_string"
+ ]
+ if q.client["dataset/import/azure_container"] is None:
+ q.client["dataset/import/azure_container"] = q.client[
+ "default_azure_container"
+ ]
+
+ files = azure_file_options(
+ q.client["dataset/import/azure_conn_string"],
+ q.client["dataset/import/azure_container"],
+ )
+
+ if not files:
+ ui_filename = ui.textbox(
+ name="dataset/import/azure_filename",
+ label="File name",
+ value="",
+ required=True,
+ tooltip="File name to be imported",
+ )
+ else:
+ default_file = files[0]
+ ui_filename = ui.dropdown(
+ name="dataset/import/azure_filename",
+ label="File name",
+ value=default_file,
+ choices=[ui.choice(x, x.split("/")[-1]) for x in files],
+ required=True,
+ tooltip="File name to be imported",
+ )
+
+ items += [
+ ui.textbox(
+ name="dataset/import/azure_conn_string",
+ label="Datalake connection string",
+ value=q.client["dataset/import/azure_conn_string"],
+ trigger=True,
+ required=True,
+ password=True,
+ tooltip="Azure connection string to connect to Datalake storage",
+ ),
+ ui.textbox(
+ name="dataset/import/azure_container",
+ label="Datalake container name",
+ value=q.client["dataset/import/azure_container"],
+ trigger=True,
+ required=True,
+ tooltip="Azure Datalake container name including relative paths",
+ ),
+ ui_filename,
+ ]
+
+ elif q.client["dataset/import/source"] == "Upload":
+ items += [
+ ui.file_upload(
+ name="dataset/import/local_upload",
+ label="Upload!",
+ multiple=False,
+ file_extensions=default_cfg.allowed_file_extensions,
+ )
+ ]
+
+ elif q.client["dataset/import/source"] == "Local":
+ current_path = (
+ q.client["dataset/import/local_path_current"]
+ if q.client["dataset/import/local_path_current"] is not None
+ else os.path.expanduser("~")
+ )
+
+ if q.args.__wave_submission_name__ == "dataset/import/local_path_list":
+ idx = int(q.args["dataset/import/local_path_list"][0])
+ options = q.client["dataset/import/local_path_list_last"]
+ new_path = os.path.abspath(os.path.join(current_path, options[idx]))
+ if os.path.exists(new_path):
+ current_path = new_path
+
+ results_df = dir_file_table(current_path)
+ files_list = results_df[current_path].tolist()
+ q.client["dataset/import/local_path_list_last"] = files_list
+ q.client["dataset/import/local_path_current"] = current_path
+
+ items += [
+ ui.textbox(
+ name="dataset/import/local_path",
+ label="File location",
+ value=current_path,
+ required=True,
+ tooltip="Location of file to be imported",
+ ),
+ ui_table_from_df(
+ q=q,
+ df=results_df,
+ name="dataset/import/local_path_list",
+ sortables=[],
+ searchables=[],
+ min_widths={current_path: "400"},
+ link_col=current_path,
+ height="calc(65vh)",
+ ),
+ ]
+
+ elif q.client["dataset/import/source"] == "H2O-Drive":
+
+ files = await h2o_drive_file_options(q)
+
+ # Handle errors in h2o_drive connection and display them nicely below
+ if isinstance(files, Exception):
+ warning = str(files)
+ files = []
+
+ if len(files) == 0:
+ ui_filename = ui.textbox(
+ name="dataset/import/h2o_drive_filename",
+ label="File name",
+ value="No files found",
+ required=True,
+ disabled=True,
+ tooltip="File name to be imported",
+ )
+ else:
+ default_file = files[0]
+ ui_filename = ui.dropdown(
+ name="dataset/import/h2o_drive_filename",
+ label="File name",
+ value=default_file,
+ choices=[ui.choice(x, x.split("/")[-1]) for x in files],
+ required=True,
+ tooltip="File name to be imported",
+ )
+
+ items += [ui_filename]
+
+ elif q.client["dataset/import/source"] == "Kaggle":
+ if q.client["dataset/import/kaggle_access_key"] is None:
+ q.client["dataset/import/kaggle_access_key"] = q.client[
+ "default_kaggle_username"
+ ]
+ if q.client["dataset/import/kaggle_secret_key"] is None:
+ q.client["dataset/import/kaggle_secret_key"] = q.client[
+ "default_kaggle_secret_key"
+ ]
+
+ items += [
+ ui.textbox(
+ name="dataset/import/kaggle_command",
+ label="Kaggle API command",
+ placeholder="kaggle competitions download -c dataset",
+ required=True,
+ tooltip="Kaggle API command to be executed",
+ ),
+ ui.textbox(
+ name="dataset/import/kaggle_access_key",
+ label="Kaggle username",
+ value=q.client["dataset/import/kaggle_access_key"],
+ required=True,
+ password=False,
+ tooltip="Kaggle username for API authentication",
+ ),
+ ui.textbox(
+ name="dataset/import/kaggle_secret_key",
+ label="Kaggle secret key",
+ value=q.client["dataset/import/kaggle_secret_key"],
+ required=True,
+ password=True,
+ tooltip="Kaggle secret key for API authentication",
+ ),
+ ]
+ elif q.client["dataset/import/source"] == "Huggingface":
+
+ if q.client["dataset/import/huggingface_split"] is None:
+ q.client["dataset/import/huggingface_split"] = "train"
+ if q.client["dataset/import/huggingface_api_token"] is None:
+ q.client["dataset/import/huggingface_api_token"] = q.client[
+ "default_huggingface_api_token"
+ ]
+
+ items += [
+ ui.textbox(
+ name="dataset/import/huggingface_dataset",
+ label="Hugging Face dataset",
+ value=q.client["dataset/import/huggingface_dataset"],
+ required=True,
+ tooltip="Name of the Hugging Face dataset",
+ ),
+ ui.textbox(
+ name="dataset/import/huggingface_split",
+ label="Split",
+ value=q.client["dataset/import/huggingface_split"],
+ required=True,
+ password=False,
+ tooltip="Split of the dataset",
+ ),
+ ui.textbox(
+ name="dataset/import/huggingface_api_token",
+ label="Hugging Face API token",
+ value=q.client["dataset/import/huggingface_api_token"],
+ required=False,
+ password=True,
+ tooltip="Optional Hugging Face API token",
+ ),
+ ]
+
+ allowed_types = ", ".join(default_cfg.allowed_file_extensions)
+ allowed_types = " or".join(allowed_types.rsplit(",", 1))
+ items += [
+ ui.message_bar(type="info", text=info + f"Must be a {allowed_types} file."),
+ ui.message_bar(type="error", text=error),
+ ui.message_bar(type="warning", text=warning),
+ ]
+
+ q.page["dataset/import"].items = items
+
+ buttons = [ui.button(name="dataset/list", label="Abort")]
+ if q.client["dataset/import/source"] != "Upload":
+ buttons.insert(
+ 0, ui.button(name="dataset/import/2", label="Continue", primary=True)
+ )
+
+ q.page["dataset/import/footer"] = ui.form_card(
+ box="footer", items=[ui.inline(items=buttons, justify="start")]
+ )
+ q.client.delete_cards.add("dataset/import/footer")
+
+ q.client["dataset/import/id"] = None
+ q.client["dataset/import/cfg_file"] = None
+
+ elif step == 2: # download / import data from source
+ q.page["dataset/import/footer"] = ui.form_card(box="footer", items=[])
+ try:
+ if not q.args["dataset/import/cfg_file"] and not edit:
+ if q.client["dataset/import/source"] == "S3":
+ (
+ q.client["dataset/import/path"],
+ q.client["dataset/import/name"],
+ ) = await s3_download(
+ q,
+ q.client["dataset/import/s3_bucket"],
+ q.client["dataset/import/s3_filename"],
+ q.client["dataset/import/s3_access_key"],
+ q.client["dataset/import/s3_secret_key"],
+ )
+ elif q.client["dataset/import/source"] == "Azure":
+ (
+ q.client["dataset/import/path"],
+ q.client["dataset/import/name"],
+ ) = await azure_download(
+ q,
+ q.client["dataset/import/azure_conn_string"],
+ q.client["dataset/import/azure_container"],
+ q.client["dataset/import/azure_filename"],
+ )
+ elif q.client["dataset/import/source"] in ("Upload", "Local"):
+ (
+ q.client["dataset/import/path"],
+ q.client["dataset/import/name"],
+ ) = await local_download(q, q.client["dataset/import/local_path"])
+ elif q.client["dataset/import/source"] == "H2O-Drive":
+ (
+ q.client["dataset/import/path"],
+ q.client["dataset/import/name"],
+ ) = await h2o_drive_download(
+ q, q.client["dataset/import/h2o_drive_filename"]
+ )
+ elif q.client["dataset/import/source"] == "Kaggle":
+ (
+ q.client["dataset/import/path"],
+ q.client["dataset/import/name"],
+ ) = await kaggle_download(
+ q,
+ q.client["dataset/import/kaggle_command"],
+ q.client["dataset/import/kaggle_access_key"],
+ q.client["dataset/import/kaggle_secret_key"],
+ )
+ elif q.client["dataset/import/source"] == "Huggingface":
+ (
+ q.client["dataset/import/path"],
+ q.client["dataset/import/name"],
+ ) = await huggingface_download(
+ q,
+ q.client["dataset/import/huggingface_dataset"],
+ q.client["dataset/import/huggingface_split"],
+ )
+
+ # store if in edit mode
+ q.client["dataset/import/edit"] = edit
+
+ # clear dataset triggers from client
+ for trigger_key in default_cfg.dataset_trigger_keys:
+ if q.client[f"dataset/import/cfg/{trigger_key}"]:
+ del q.client[f"dataset/import/cfg/{trigger_key}"]
+
+ await dataset_import(
+ q,
+ step=3,
+ edit=edit,
+ error=error,
+ warning=warning,
+ allow_merge=allow_merge,
+ )
+ except Exception:
+ logger.error("Dataset error:", exc_info=True)
+ error = (
+ "Dataset import failed. Please make sure all required "
+ "fields are filled correctly."
+ )
+ await clean_dashboard(q, mode="full")
+ await dataset_import(q, step=1, error=str(error))
+
+ elif step == 3: # set dataset configuration
+ q.page["dataset/import/footer"] = ui.form_card(box="footer", items=[])
+ try:
+ if not q.args["dataset/import/cfg_file"] and not edit:
+ q.client["dataset/import/name"] = get_unique_dataset_name(
+ q, q.client["dataset/import/name"]
+ )
+ q.page["dataset/import"] = ui.form_card(box="content", items=[])
+ q.client.delete_cards.add("dataset/import")
+
+ wizard = q.page["dataset/import"]
+
+ title = "Configure dataset"
+
+ items = [
+ ui.text_l(title),
+ ui.textbox(
+ name="dataset/import/name",
+ label="Dataset name",
+ value=q.client["dataset/import/name"],
+ required=True,
+ tooltip=tooltips["experiments_dataset_name"],
+ ),
+ ]
+
+ choices_problem_types = [
+ ui.choice(name, label) for name, label in get_problem_types()
+ ]
+
+ if q.client["dataset/import/cfg_file"] is None:
+ max_substring_len = 0
+ for c in choices_problem_types:
+ problem_type_name = c.name.replace("_config", "")
+ if problem_type_name in q.client["dataset/import/name"]:
+ if len(problem_type_name) > max_substring_len:
+ q.client["dataset/import/cfg_file"] = c.name
+ q.client["dataset/import/cfg_category"] = c.name.split("_")[
+ 0
+ ]
+ max_substring_len = len(problem_type_name)
+ if q.client["dataset/import/cfg_file"] is None:
+ q.client["dataset/import/cfg_file"] = default_cfg.cfg_file
+ q.client["dataset/import/cfg_category"] = q.client[ # type: ignore
+ "dataset/import/cfg_file"
+ ].split("_")[0]
+
+ # set default value of problem type if no match to category
+ if (
+ q.client["dataset/import/cfg_category"]
+ not in q.client["dataset/import/cfg_file"]
+ ):
+ q.client["dataset/import/cfg_file"] = get_problem_types(
+ category=q.client["dataset/import/cfg_category"]
+ )[0][0]
+
+ items += [
+ ui.dropdown(
+ name="dataset/import/cfg_file",
+ label="Problem Type",
+ required=True,
+ choices=choices_problem_types,
+ value=q.client["dataset/import/cfg_file"],
+ trigger=True,
+ tooltip=tooltips["experiments_problem_type"],
+ )
+ ]
+ model_types = get_model_types(q.client["dataset/import/cfg_file"])
+ if len(model_types) > 0:
+ # add model type to cfg file name here
+ q.client["dataset/import/cfg_file"] = add_model_type(
+ q.client["dataset/import/cfg_file"], model_types[0][0]
+ )
+ if not edit:
+ q.client["dataset/import/cfg"] = load_config_py(
+ config_path=(
+ f"llm_studio/python_configs/"
+ f"{q.client['dataset/import/cfg_file']}"
+ ),
+ config_name="ConfigProblemBase",
+ )
+
+ option_items = get_dataset_elements(cfg=q.client["dataset/import/cfg"], q=q)
+ items.extend(option_items)
+ items.append(ui.message_bar(type="error", text=error))
+ items.append(ui.message_bar(type="warning", text=warning))
+ if file_extension_is_compatible(q):
+ ui_nav_name = "dataset/import/4/edit" if edit else "dataset/import/4"
+ buttons = [
+ ui.button(name=ui_nav_name, label="Continue", primary=True),
+ ui.button(name="dataset/list", label="Abort"),
+ ]
+ if allow_merge:
+ datasets_df = q.client.app_db.get_datasets_df()
+ if datasets_df.shape[0]:
+ label = "Merge With Existing Dataset"
+ buttons.insert(1, ui.button(name="dataset/merge", label=label))
+ else:
+ problem_type = make_label(
+ re.sub("_config.*", "", q.client["dataset/import/cfg_file"])
+ )
+ items += [
+ ui.text(
+ " The chosen file extensions is not "
+ f"compatible with {problem_type}. "
+ )
+ ]
+ buttons = [
+ ui.button(name="dataset/list", label="Abort"),
+ ]
+ q.page["dataset/import/footer"] = ui.form_card(
+ box="footer", items=[ui.inline(items=buttons, justify="start")]
+ )
+
+ wizard.items = items
+
+ q.client.delete_cards.add("dataset/import/footer")
+
+ except Exception as exception:
+ logger.error("Dataset error:", exc_info=True)
+ error = clean_error(str(exception))
+ await clean_dashboard(q, mode="full")
+ await dataset_import(q, step=1, error=str(error))
+
+ elif step == 31: # activities after change in Parent ID columns
+ logger.info("Step 31")
+ cfg = q.client["dataset/import/cfg"]
+ cfg = parse_ui_elements(
+ cfg=cfg, q=q, limit=default_cfg.dataset_keys, pre="dataset/import/cfg/"
+ )
+ q.client["dataset/import/cfg"] = cfg
+ await dataset_import(q, 3, edit=True)
+ elif step == 4: # verify if dataset does not exist already
+ dataset_name = q.client["dataset/import/name"]
+ original_name = q.client["dataset/import/original_name"] # used in edit mode
+ valid_dataset_name = get_unique_dataset_name(q, dataset_name)
+ if valid_dataset_name != dataset_name and not (
+ q.client["dataset/import/edit"] and dataset_name == original_name
+ ):
+ err = f"Dataset {dataset_name} already exists"
+ q.client["dataset/import/name"] = valid_dataset_name
+ await dataset_import(q, 3, edit=edit, error=err)
+ else:
+ await dataset_import(q, 5, edit=edit)
+
+ elif step == 5: # visualize dataset
+ header = "Sample Data Visualization "
+ valid_visualization = False
+ continue_visible = True
+ try:
+ cfg = q.client["dataset/import/cfg"]
+ cfg = parse_ui_elements(
+ cfg=cfg, q=q, limit=default_cfg.dataset_keys, pre="dataset/import/cfg/"
+ )
+
+ q.client["dataset/import/cfg"] = cfg
+
+ await busy_dialog(
+ q=q,
+ title="Performing sanity checks on the data",
+ text="Please be patient...",
+ )
+ # add one-second delay for datasets where sanity check is instant
+ # to avoid flickering dialog
+ time.sleep(1)
+ sanity_check(cfg)
+
+ plot = cfg.logging.plots_class.plot_data(cfg)
+ text = (
+ "Data Validity Check. Click Continue if the input "
+ "data and labels appear correctly."
+ )
+ if plot.encoding == "image":
+ plot_item = ui.image(title="", type="png", image=plot.data)
+ elif plot.encoding == "html":
+ plot_item = ui.markup(content=plot.data)
+ elif plot.encoding == "df":
+ df = pd.read_parquet(plot.data)
+ df = df.iloc[:2000]
+ min_widths = {"Content": "800"}
+ plot_item = ui_table_from_df(
+ q=q,
+ df=df,
+ name="experiment/display/table",
+ markdown_cells=list(df.select_dtypes(include=["object"]).columns),
+ searchables=list(df.columns),
+ downloadable=False,
+ resettable=False,
+ min_widths=min_widths,
+ height="calc(100vh - 267px)",
+ max_char_length=5_000,
+ cell_overflow="tooltip",
+ )
+ else:
+ raise ValueError(f"Unknown plot encoding `{plot.encoding}`")
+
+ items = [ui.markup(content=header), ui.message_bar(text=text), plot_item]
+ valid_visualization = True
+
+ except AssertionError as exception:
+ logger.error(f"Error while validating data: {exception}", exc_info=True)
+ # Wrap the exception text to limit the line length to 100 characters
+ wrapped_exception_lines = textwrap.fill(
+ str(exception), width=100
+ ).splitlines()
+
+ # Join the wrapped exception lines with an extra newline to separate each
+ wrapped_exception = "\n".join(wrapped_exception_lines)
+ text = (
+ "# Error while validating data\n"
+ "Please review the error message below \n"
+ "\n"
+ "**Details of the Validation Error**:\n"
+ "\n"
+ f"{wrapped_exception}"
+ "\n"
+ )
+
+ items = [
+ ui.markup(content=header),
+ ui.message_bar(text=text, type="error"),
+ ui.expander(
+ name="expander",
+ label="Expand Error Traceback",
+ items=[ui.markup(f"{traceback.format_exc()} ")],
+ ),
+ ]
+ continue_visible = False
+ except Exception as exception:
+ logger.error(
+ f"Error while plotting data preview: {exception}", exc_info=True
+ )
+ text = (
+ "Error occurred while visualizing the data. Please go back and verify "
+ "whether the problem type and other settings were set properly."
+ )
+ items = [
+ ui.markup(content=header),
+ ui.message_bar(text=text, type="error"),
+ ui.expander(
+ name="expander",
+ label="Expand Error Traceback",
+ items=[ui.markup(f"{traceback.format_exc()} ")],
+ ),
+ ]
+ continue_visible = False
+
+ buttons = [
+ ui.button(
+ name="dataset/import/6",
+ label="Continue",
+ primary=valid_visualization,
+ visible=continue_visible,
+ ),
+ ui.button(
+ name="dataset/import/3/edit",
+ label="Back",
+ primary=not valid_visualization,
+ ),
+ ui.button(name="dataset/list", label="Abort"),
+ ]
+
+ q.page["dataset/import"] = ui.form_card(box="content", items=items)
+ q.client.delete_cards.add("dataset/import")
+
+ q.page["dataset/import/footer"] = ui.form_card(
+ box="footer", items=[ui.inline(items=buttons, justify="start")]
+ )
+ q.client.delete_cards.add("dataset/import/footer")
+
+ elif step == 6: # create dataset
+ if q.client["dataset/import/name"] == "":
+ await clean_dashboard(q, mode="full")
+ await dataset_import(q, step=2, error="Please enter all required fields!")
+
+ else:
+ folder_name = q.client["dataset/import/path"].split("/")[-1]
+ new_folder = q.client["dataset/import/name"]
+ act_path = q.client["dataset/import/path"]
+ new_path = new_folder.join(act_path.rsplit(folder_name, 1))
+
+ try:
+ shutil.move(q.client["dataset/import/path"], new_path)
+
+ cfg = q.client["dataset/import/cfg"]
+
+ # remap old path to new path
+ for k in default_cfg.dataset_folder_keys:
+ old_path = getattr(cfg.dataset, k, None)
+ if old_path is not None:
+ setattr(
+ cfg.dataset,
+ k,
+ old_path.replace(q.client["dataset/import/path"], new_path),
+ )
+
+ # change the default validation strategy if validation df set
+ if cfg.dataset.validation_dataframe != "None":
+ cfg.dataset.validation_strategy = "custom"
+ cfg_path = f"{new_path}/{q.client['dataset/import/cfg_file']}.yaml"
+ save_config_yaml(cfg_path, cfg)
+
+ train_rows = None
+ if os.path.exists(cfg.dataset.train_dataframe):
+ train_rows = read_dataframe_drop_missing_labels(
+ cfg.dataset.train_dataframe, cfg
+ ).shape[0]
+ validation_rows = None
+ if os.path.exists(cfg.dataset.validation_dataframe):
+ validation_rows = read_dataframe_drop_missing_labels(
+ cfg.dataset.validation_dataframe, cfg
+ ).shape[0]
+
+ dataset = Dataset(
+ id=q.client["dataset/import/id"],
+ name=q.client["dataset/import/name"],
+ path=new_path,
+ config_file=cfg_path,
+ train_rows=train_rows,
+ validation_rows=validation_rows,
+ )
+ if q.client["dataset/import/id"] is not None:
+ q.client.app_db.delete_dataset(dataset.id)
+ q.client.app_db.add_dataset(dataset)
+ await dataset_list(q)
+
+ except Exception as exception:
+ logger.error("Dataset error:", exc_info=True)
+ q.client.app_db._session.rollback()
+ error = clean_error(str(exception))
+ await clean_dashboard(q, mode="full")
+ await dataset_import(q, step=2, error=str(error))
+
+
+async def dataset_merge(q: Q, step, error=""):
+ if step == 1: # Select which dataset to merge
+ await clean_dashboard(q, mode="full")
+ q.client["nav/active"] = "dataset/merge"
+
+ q.page["dataset/merge"] = ui.form_card(box="content", items=[])
+ q.client.delete_cards.add("dataset/merge")
+
+ datasets_df = q.client.app_db.get_datasets_df()
+ import_choices = [
+ ui.choice(x["path"], x["name"]) for idx, x in datasets_df.iterrows()
+ ]
+
+ items = [
+ ui.text_l("Merge current dataset with an existing dataset"),
+ ui.dropdown(
+ name="dataset/merge/target",
+ label="Dataset",
+ value=datasets_df.iloc[0]["path"],
+ choices=import_choices,
+ trigger=False,
+ tooltip="Source of dataset import",
+ ),
+ ]
+
+ if error:
+ items.append(ui.message_bar(type="error", text=error))
+
+ q.page["dataset/merge"].items = items
+
+ buttons = [
+ ui.button(name="dataset/merge/action", label="Merge", primary=True),
+ ui.button(name="dataset/import/3", label="Back", primary=False),
+ ui.button(name="dataset/list", label="Abort"),
+ ]
+
+ q.page["dataset/import/footer"] = ui.form_card(
+ box="footer", items=[ui.inline(items=buttons, justify="start")]
+ )
+ q.client.delete_cards.add("dataset/import/footer")
+
+ elif step == 2: # copy file to dataset and go to edit dataset
+ current_dir = q.client["dataset/import/path"]
+ target_dir = q.args["dataset/merge/target"]
+
+ if current_dir == target_dir:
+ await dataset_merge(q, step=1, error="Cannot merge dataset with itself")
+ return
+
+ datasets_df = q.client.app_db.get_datasets_df().set_index("path")
+ has_dataset_entry = current_dir in datasets_df.index
+
+ if has_dataset_entry:
+ experiment_df = q.client.app_db.get_experiments_df()
+ source_id = int(datasets_df.loc[current_dir, "id"])
+ has_experiment = any(experiment_df["dataset"].astype(int) == source_id)
+ else:
+ source_id = None
+ has_experiment = False
+
+ current_files = os.listdir(current_dir)
+ current_files = [x for x in current_files if not x.endswith(".yaml")]
+ target_files = os.listdir(target_dir)
+ overlapping_files = list(set(current_files).intersection(set(target_files)))
+ rename_map = {}
+
+ for file in overlapping_files:
+ tmp_str = file.split(".")
+ if len(tmp_str) == 1:
+ file_name, extension = file, ""
+ else:
+ file_name, extension = ".".join(tmp_str[:-1]), f".{tmp_str[-1]}"
+
+ cnt = 1
+ while f"{file_name}_{cnt}{extension}" in target_files:
+ cnt += 1
+
+ rename_map[file] = f"{file_name}_{cnt}{extension}"
+ target_files.append(rename_map[file])
+
+ if len(overlapping_files):
+ warning = (
+ f"Renamed {', '.join(rename_map.keys())} to "
+ f"{', '.join(rename_map.values())} due to duplicated entries."
+ )
+ else:
+ warning = ""
+
+ for file in current_files:
+ new_file = rename_map.get(file, file)
+ src = os.path.join(current_dir, file)
+ dst = os.path.join(target_dir, new_file)
+
+ if has_experiment:
+ if os.path.isdir(src):
+ shutil.copytree(src, dst)
+ else:
+ shutil.copy(src, dst)
+ else:
+ shutil.move(src, dst)
+
+ if not has_experiment:
+ shutil.rmtree(current_dir)
+ if has_dataset_entry:
+ q.client.app_db.delete_dataset(source_id)
+
+ dataset_id = int(datasets_df.loc[target_dir, "id"])
+ await dataset_edit(q, dataset_id, warning=warning, allow_merge=False)
+
+
+async def dataset_list_table(
+ q: Q,
+ show_experiment_datasets: bool = True,
+) -> None:
+ """Pepare dataset list form card
+
+ Args:
+ q: Q
+ show_experiment_datasets: whether to also show datasets linked to experiments
+ """
+
+ q.client["dataset/list/df_datasets"] = get_datasets(
+ q=q,
+ show_experiment_datasets=show_experiment_datasets,
+ )
+
+ df_viz = q.client["dataset/list/df_datasets"].copy()
+
+ columns_to_drop = [
+ "id",
+ "path",
+ "config_file",
+ "validation dataframe",
+ ]
+
+ df_viz = df_viz.drop(columns=columns_to_drop, errors="ignore")
+ if "problem type" in df_viz.columns:
+ df_viz["problem type"] = df_viz["problem type"].str.replace("Text ", "")
+
+ widths = {
+ "name": "200",
+ "problem type": "210",
+ "train dataframe": "190",
+ "train rows": "120",
+ "validation rows": "130",
+ "labels": "120",
+ "actions": "5",
+ }
+
+ actions_dict = {
+ "dataset/newexperiment": "New experiment",
+ "dataset/edit": "Edit dataset",
+ "dataset/delete/dialog/single": "Delete dataset",
+ }
+
+ q.page["dataset/list"] = ui.form_card(
+ box="content",
+ items=[
+ ui_table_from_df(
+ q=q,
+ df=df_viz,
+ name="dataset/list/table",
+ sortables=["train rows", "validation rows"],
+ filterables=["name", "problem type"],
+ searchables=[],
+ min_widths=widths,
+ link_col="name",
+ height="calc(100vh - 267px)",
+ actions=actions_dict,
+ ),
+ ui.message_bar(type="info", text=""),
+ ],
+ )
+ q.client.delete_cards.add("dataset/list")
+
+
+async def dataset_list(q: Q, reset: bool = True) -> None:
+ """Display all datasets."""
+ q.client["nav/active"] = "dataset/list"
+
+ if reset:
+ await clean_dashboard(q, mode="full")
+ await dataset_list_table(q)
+
+ q.page["dataset/display/footer"] = ui.form_card(
+ box="footer",
+ items=[
+ ui.inline(
+ items=[
+ ui.button(
+ name="dataset/import", label="Import dataset", primary=True
+ ),
+ ui.button(
+ name="dataset/list/delete",
+ label="Delete datasets",
+ primary=False,
+ ),
+ ],
+ justify="start",
+ )
+ ],
+ )
+ q.client.delete_cards.add("dataset/display/footer")
+ remove_temp_files(q)
+
+ await q.page.save()
+
+
+async def dataset_newexperiment(q: Q, dataset_id: int):
+ """Start a new experiment from given dataset."""
+
+ dataset = q.client.app_db.get_dataset(dataset_id)
+
+ q.client["experiment/start/cfg_file"] = dataset.config_file.split("/")[-1].replace(
+ ".yaml", ""
+ )
+ q.client["experiment/start/cfg_category"] = q.client[
+ "experiment/start/cfg_file"
+ ].split("_")[0]
+ q.client["experiment/start/dataset"] = str(dataset_id)
+
+ await experiment_start(q)
+
+
+async def dataset_edit(
+ q: Q, dataset_id: int, error: str = "", warning: str = "", allow_merge: bool = True
+):
+ """Edit selected dataset.
+
+ Args:
+ q: Q
+ dataset_id: dataset id to edit
+ error: optional error message
+ warning: optional warning message
+ allow_merge: whether to allow merging dataset when editing
+ """
+
+ dataset = q.client.app_db.get_dataset(dataset_id)
+
+ experiments_df = q.client.app_db.get_experiments_df()
+ experiments_df = experiments_df[experiments_df["dataset"] == str(dataset_id)]
+ statuses, _ = get_experiments_status(experiments_df)
+ num_invalid = len([stat for stat in statuses if stat in ["running", "queued"]])
+
+ if num_invalid:
+ info = "s" if num_invalid > 1 else ""
+ info_str = (
+ f"Dataset {dataset.name} is linked to {num_invalid} "
+ f"running or queued experiment{info}. Wait for them to finish or stop them "
+ "first before editing the dataset."
+ )
+ q.page["dataset/list"].items[1].message_bar.text = info_str
+ return
+
+ q.client["dataset/import/id"] = dataset_id
+
+ q.client["dataset/import/cfg_file"] = dataset.config_file.split("/")[-1].replace(
+ ".yaml", ""
+ )
+ q.client["dataset/import/cfg_category"] = q.client["dataset/import/cfg_file"].split(
+ "_"
+ )[0]
+ q.client["dataset/import/path"] = dataset.path
+ q.client["dataset/import/name"] = dataset.name
+ q.client["dataset/import/original_name"] = dataset.name
+ q.client["dataset/import/cfg"] = load_config_yaml(dataset.config_file)
+
+ if allow_merge and experiments_df.shape[0]:
+ allow_merge = False
+
+ await dataset_import(
+ q=q, step=2, edit=True, error=error, warning=warning, allow_merge=allow_merge
+ )
+
+
+async def dataset_list_delete(q: Q):
+ """Allow to select multiple datasets for deletion."""
+
+ await dataset_list_table(q, show_experiment_datasets=False)
+
+ q.page["dataset/list"].items[0].table.multiple = True
+
+ info_str = "Only datasets not linked to experiments can be deleted."
+
+ q.page["dataset/list"].items[1].message_bar.text = info_str
+
+ q.page["dataset/display/footer"].items = [
+ ui.inline(
+ items=[
+ ui.button(
+ name="dataset/delete/dialog", label="Delete datasets", primary=True
+ ),
+ ui.button(name="dataset/list/delete/abort", label="Abort"),
+ ]
+ )
+ ]
+
+
+async def dataset_delete(q: Q, dataset_ids: List[int]):
+ """Delete selected datasets.
+
+ Args:
+ q: Q
+ dataset_ids: list of dataset ids to delete
+ """
+
+ for dataset_id in dataset_ids:
+ dataset = q.client.app_db.get_dataset(dataset_id)
+ q.client.app_db.delete_dataset(dataset.id)
+
+ try:
+ shutil.rmtree(dataset.path)
+ except OSError:
+ pass
+
+
+async def dataset_delete_single(q: Q, dataset_id: int):
+ dataset = q.client.app_db.get_dataset(dataset_id)
+
+ experiments_df = q.client.app_db.get_experiments_df()
+ num_experiments = sum(experiments_df["dataset"] == str(dataset_id))
+ if num_experiments:
+ info = "s" if num_experiments > 1 else ""
+ info_str = (
+ f"Dataset {dataset.name} is linked to {num_experiments} "
+ f"experiment{info}. Only datasets not linked to experiments can be deleted."
+ )
+ await dataset_list(q)
+ q.page["dataset/list"].items[1].message_bar.text = info_str
+ else:
+ await dataset_delete(q, [dataset_id])
+ await dataset_list(q)
+
+
+async def dataset_display(q: Q) -> None:
+ """Display a selected dataset."""
+
+ dataset_id = q.client["dataset/list/df_datasets"]["id"].iloc[
+ q.client["dataset/display/id"]
+ ]
+ dataset: Dataset = q.client.app_db.get_dataset(dataset_id)
+ config_filename = dataset.config_file
+ cfg = load_config_yaml(config_filename)
+ dataset_filename = cfg.dataset.train_dataframe
+
+ if (
+ q.client["dataset/display/tab"] is None
+ or q.args["dataset/display/data"] is not None
+ ):
+ q.client["dataset/display/tab"] = "dataset/display/data"
+
+ if q.args["dataset/display/visualization"] is not None:
+ q.client["dataset/display/tab"] = "dataset/display/visualization"
+
+ if q.args["dataset/display/statistics"] is not None:
+ q.client["dataset/display/tab"] = "dataset/display/statistics"
+
+ if q.args["dataset/display/summary"] is not None:
+ q.client["dataset/display/tab"] = "dataset/display/summary"
+
+ await clean_dashboard(q, mode=q.client["dataset/display/tab"])
+
+ items: List[Tab] = [
+ ui.tab(name="dataset/display/data", label="Sample Train Data"),
+ ui.tab(
+ name="dataset/display/visualization", label="Sample Train Visualization"
+ ),
+ ui.tab(name="dataset/display/statistics", label="Train Data Statistics"),
+ ui.tab(name="dataset/display/summary", label="Summary"),
+ ]
+
+ q.page["dataset/display/tab"] = ui.tab_card(
+ box="nav2",
+ link=True,
+ items=items,
+ value=q.client["dataset/display/tab"],
+ )
+ q.client.delete_cards.add("dataset/display/tab")
+
+ if q.client["dataset/display/tab"] == "dataset/display/data":
+ await show_data_tab(q=q, cfg=cfg, filename=dataset_filename)
+
+ elif q.client["dataset/display/tab"] == "dataset/display/visualization":
+ await show_visualization_tab(q, cfg)
+
+ elif q.client["dataset/display/tab"] == "dataset/display/statistics":
+ await show_statistics_tab(
+ q, dataset_filename=dataset_filename, config_filename=config_filename
+ )
+
+ elif q.client["dataset/display/tab"] == "dataset/display/summary":
+ await show_summary_tab(q, dataset_id)
+
+ q.page["dataset/display/footer"] = ui.form_card(
+ box="footer",
+ items=[
+ ui.inline(
+ items=[
+ ui.button(
+ name="dataset/newexperiment/from_current",
+ label="Create experiment",
+ primary=False,
+ disabled=False,
+ tooltip=None,
+ ),
+ ui.button(name="dataset/list", label="Back", primary=False),
+ ],
+ justify="start",
+ )
+ ],
+ )
+ q.client.delete_cards.add("dataset/display/footer")
+
+
+async def show_data_tab(q: Q, cfg, filename: str):
+ fill_columns = get_fill_columns(cfg)
+ df = read_dataframe(filename, n_rows=200, fill_columns=fill_columns)
+ q.page["dataset/display/data"] = ui.form_card(
+ box="first",
+ items=[
+ ui_table_from_df(
+ q=q,
+ df=df,
+ name="dataset/display/data/table",
+ sortables=list(df.columns),
+ markdown_cells=None, # render all cells as raw text
+ height="calc(100vh - 267px)",
+ cell_overflow="wrap",
+ )
+ ],
+ )
+ q.client.delete_cards.add("dataset/display/data")
+
+
+async def show_visualization_tab(q: Q, cfg):
+ try:
+ plot = cfg.logging.plots_class.plot_data(cfg)
+ except Exception as error:
+ logger.error(f"Error while plotting data preview: {error}", exc_info=True)
+ plot = PlotData("Error while plotting data. ", encoding="html")
+ card: ImageCard | MarkupCard | FormCard
+ if plot.encoding == "image":
+ card = ui.image_card(box="first", title="", type="png", image=plot.data)
+ elif plot.encoding == "html":
+ card = ui.markup_card(box="first", title="", content=plot.data)
+ elif plot.encoding == "df":
+ df = pd.read_parquet(plot.data)
+ df = df.iloc[:2000]
+ min_widths = {"Content": "800"}
+ card = ui.form_card(
+ box="first",
+ items=[
+ ui_table_from_df(
+ q=q,
+ df=df,
+ name="dataset/display/visualization/table",
+ markdown_cells=list(df.select_dtypes(include=["object"]).columns),
+ searchables=list(df.columns),
+ downloadable=True,
+ resettable=True,
+ min_widths=min_widths,
+ height="calc(100vh - 267px)",
+ max_char_length=50_000,
+ cell_overflow="tooltip",
+ )
+ ],
+ )
+
+ else:
+ raise ValueError(f"Unknown plot encoding `{plot.encoding}`")
+ q.page["dataset/display/visualization"] = card
+ q.client.delete_cards.add("dataset/display/visualization")
+
+
+async def show_summary_tab(q: Q, dataset_id):
+ dataset_df = get_datasets(q)
+ dataset_df = dataset_df[dataset_df.id == dataset_id]
+ stat_list_items: List[StatListItem] = []
+ for col in dataset_df.columns:
+ if col in ["id", "config_file", "path", "process_id", "status"]:
+ continue
+ v = dataset_df[col].values[0]
+ t: StatListItem = ui.stat_list_item(label=make_label(col), value=str(v))
+
+ stat_list_items.append(t)
+ q.page["dataset/display/summary"] = ui.stat_list_card(
+ box="first", items=stat_list_items, title=""
+ )
+ q.client.delete_cards.add("dataset/display/summary")
+
+
+async def show_statistics_tab(q: Q, dataset_filename, config_filename):
+ cfg_hash = hashlib.md5(open(config_filename, "rb").read()).hexdigest()
+ stats_dict = compute_dataset_statistics(dataset_filename, config_filename, cfg_hash)
+
+ for chat_type in ["prompts", "answers"]:
+ q.page[f"dataset/display/statistics/{chat_type}_histogram"] = histogram_card(
+ x=stats_dict[chat_type],
+ x_axis_description=f"text_length_{chat_type.capitalize()}",
+ title=f"Text Length Distribution for {chat_type.capitalize()}"
+ f" (split by whitespace)",
+ histogram_box="first",
+ )
+ q.client.delete_cards.add(f"dataset/display/statistics/{chat_type}_histogram")
+
+ q.page["dataset/display/statistics/full_conversation_histogram"] = histogram_card(
+ x=stats_dict["complete_conversations"],
+ x_axis_description="text_length_complete_conversations",
+ title="Text Length Distribution for complete "
+ "conversations (split by whitespace)",
+ histogram_box="second",
+ )
+ q.client.delete_cards.add("dataset/display/statistics/full_conversation_histogram")
+
+ if len(set(stats_dict["number_of_prompts"])) > 1:
+ q.page["dataset/display/statistics/parent_id_length_histogram"] = (
+ histogram_card(
+ x=stats_dict["number_of_prompts"],
+ x_axis_description="number_of_prompts",
+ title="Distribution of number of prompt-answer turns per conversation.",
+ histogram_box="second",
+ )
+ )
+ q.client.delete_cards.add(
+ "dataset/display/statistics/parent_id_length_histogram"
+ )
+
+ df_stats = stats_dict["df_stats"]
+ if df_stats is None:
+ component_items = [
+ ui.text(
+ "Dataset does not contain numerical or text features. "
+ "No statistics available."
+ )
+ ]
+ else:
+ if df_stats.shape[1] > 5: # mixed text and numeric
+ widths = {col: "77" for col in df_stats}
+ else: # only text features
+ widths = None
+ component_items = [
+ ui_table_from_df(
+ q=q,
+ df=df_stats,
+ name="dataset/display/statistics/table",
+ sortables=list(df_stats.columns),
+ min_widths=widths,
+ height="265px",
+ )
+ ]
+ q.page["dataset/display/statistics"] = ui.form_card(
+ box="third",
+ items=component_items,
+ )
+ q.client.delete_cards.add("dataset/display/statistics")
+
+
+@functools.lru_cache()
+def compute_dataset_statistics(dataset_path: str, cfg_path: str, cfg_hash: str) -> dict:
+ """
+ Compute various statistics for a dataset.
+ - text length distribution for prompts and answers
+ - text length distribution for complete conversations
+ - distribution of number of prompt-answer turns per conversation
+ - statistics for non text features
+
+ We use LRU caching to avoid recomputing the statistics for the same dataset.
+ Thus, cfg_hash is used as a function argument to identify the dataset.
+ """
+ df_train = read_dataframe(dataset_path)
+ cfg = load_config_yaml(cfg_path)
+ conversations = get_conversation_chains(
+ df=df_train, cfg=cfg, limit_chained_samples=True
+ )
+ stats_dict = {}
+ for chat_type in ["prompts", "answers"]:
+ text_lengths = [
+ [len(text.split(" ")) for text in conversation[chat_type]]
+ for conversation in conversations
+ ]
+ text_lengths = [item for sublist in text_lengths for item in sublist]
+ stats_dict[chat_type] = text_lengths
+ input_texts = []
+ for conversation in conversations:
+ input_text = conversation["systems"][0]
+ prompts = conversation["prompts"]
+ answers = conversation["answers"]
+ for prompt, answer in zip(prompts, answers):
+ input_text += prompt + answer
+ input_texts += [input_text]
+ stats_dict["complete_conversations"] = [
+ len(text.split(" ")) for text in input_texts
+ ]
+ stats_dict["number_of_prompts"] = [
+ len(conversation["prompts"]) for conversation in conversations
+ ]
+ stats_dict["df_stats"] = get_frame_stats(df_train)
+ return stats_dict
+
+
+async def dataset_import_uploaded_file(q: Q) -> None:
+ local_path = await q.site.download(
+ q.args["dataset/import/local_upload"][0],
+ f"{get_data_dir(q)}/"
+ f'{q.args["dataset/import/local_upload"][0].split("/")[-1]}',
+ )
+ await q.site.unload(q.args["dataset/import/local_upload"][0])
+ valid, error = check_valid_upload_content(local_path)
+ if valid:
+ q.args["dataset/import/local_path"] = local_path
+ q.client["dataset/import/local_path"] = q.args["dataset/import/local_path"]
+ await dataset_import(q, step=2)
+ else:
+ await dataset_import(q, step=1, error=error)
+
+
+async def dataset_delete_current_datasets(q: Q) -> None:
+ dataset_ids = list(
+ q.client["dataset/list/df_datasets"]["id"].iloc[
+ list(map(int, q.client["dataset/list/table"]))
+ ]
+ )
+ await dataset_delete(q, dataset_ids)
+ await dataset_list(q)
diff --git a/llm_studio/app_utils/sections/experiment.py b/llm_studio/app_utils/sections/experiment.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e5178fef4ea1e929a718ea1c01e4197dda6cdd6
--- /dev/null
+++ b/llm_studio/app_utils/sections/experiment.py
@@ -0,0 +1,2134 @@
+import asyncio
+import glob
+import itertools
+import logging
+import os
+import random
+import shutil
+import time
+import traceback
+import zipfile
+from pathlib import Path
+from typing import Callable, List, Optional, Set, Union
+
+import accelerate
+import einops
+import huggingface_hub
+import numpy as np
+import pandas as pd
+import torch
+import transformers
+import yaml
+from h2o_wave import Q, data, ui
+from sqlitedict import SqliteDict
+
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.app_utils.hugging_face_utils import (
+ get_chat_template,
+ get_model_card,
+ publish_model_to_hugging_face,
+)
+from llm_studio.app_utils.sections.chat import chat_tab, load_cfg_model_tokenizer
+from llm_studio.app_utils.sections.common import clean_dashboard
+from llm_studio.app_utils.utils import (
+ GridCheckError,
+ add_model_type,
+ filter_grid_search_combination,
+ flatten_dict,
+ get_cfg_list_items,
+ get_data_dir,
+ get_download_link,
+ get_experiment_status,
+ get_experiments,
+ get_grid_search,
+ get_model_types,
+ get_problem_categories,
+ get_problem_types,
+ get_ui_elements_for_cfg,
+ get_unique_name,
+ hf_repo_friendly_name,
+ parse_ui_elements,
+ remove_model_type,
+ set_env,
+ set_grid_to_cfg,
+ start_experiment,
+)
+from llm_studio.app_utils.wave_utils import busy_dialog, ui_table_from_df, wave_theme
+from llm_studio.python_configs.cfg_checks import check_config_for_errors
+from llm_studio.src.datasets.text_utils import get_tokenizer
+from llm_studio.src.tooltips import tooltips
+from llm_studio.src.utils.config_utils import (
+ GENERATION_PROBLEM_TYPES,
+ NON_GENERATION_PROBLEM_TYPES,
+ load_config_py,
+ load_config_yaml,
+ save_config_yaml,
+)
+from llm_studio.src.utils.exceptions import LLMResourceException
+from llm_studio.src.utils.export_utils import (
+ check_available_space,
+ get_adapter_model_path,
+ get_artifact_path_path,
+ get_logs_path,
+ get_model_path,
+ get_predictions_path,
+ save_logs,
+ save_prediction_outputs,
+)
+from llm_studio.src.utils.logging_utils import write_flag
+from llm_studio.src.utils.modeling_utils import unwrap_model
+from llm_studio.src.utils.plot_utils import PLOT_ENCODINGS
+from llm_studio.src.utils.utils import add_file_to_zip, kill_child_processes_and_current
+
+logger = logging.getLogger(__name__)
+
+
+def trace_calls(func):
+ """
+ Trace calls to the function by printing the function name and the stack trace.
+ """
+
+ async def async_wrapper(*args, **kwargs):
+ logger.debug(f"Async function {func.__name__} called from:")
+ logger.debug("".join(traceback.format_stack(limit=2)))
+ return await func(*args, **kwargs)
+
+ def sync_wrapper(*args, **kwargs):
+ logger.debug(f"Function {func.__name__} called from:")
+ logger.debug("".join(traceback.format_stack(limit=2)))
+ return func(*args, **kwargs)
+
+ # Check if the function is asynchronous
+ if asyncio.iscoroutinefunction(func):
+ return async_wrapper
+ else:
+ return sync_wrapper
+
+
+@trace_calls
+async def experiment_start(q: Q) -> None:
+ """Display experiment start cards."""
+
+ await clean_dashboard(q, mode="experiment_start", exclude=["experiment/start"])
+
+ show_update_warnings = True
+ is_create_experiment = False
+ # reset certain configs if new experiment start session
+ if (
+ q.args.__wave_submission_name__ == "experiment/start"
+ or q.args.__wave_submission_name__ == "experiment/start/grid_search"
+ or q.args.__wave_submission_name__ == "experiment/start_experiment"
+ or q.args.__wave_submission_name__ == "dataset/newexperiment"
+ or q.args.__wave_submission_name__ == "dataset/newexperiment/from_current"
+ or q.args.__wave_submission_name__ == "experiment/list/new"
+ or q.args.__wave_submission_name__ == "experiment/list/new_gridsearch"
+ ):
+ q.client["experiment/start/cfg_experiment_prev"] = None
+ q.client["experiment/start/cfg_file_prev"] = None
+ q.client["experiment/start/prev_dataset"] = None
+ q.client["experiment/start/cfg_sub"] = None
+ show_update_warnings = False
+ is_create_experiment = True
+ if (
+ q.args.__wave_submission_name__ == "experiment/start"
+ or q.args.__wave_submission_name__ == "experiment/start_experiment"
+ or q.args.__wave_submission_name__ == "dataset/newexperiment"
+ or q.args.__wave_submission_name__ == "dataset/newexperiment/from_current"
+ or q.args.__wave_submission_name__ == "experiment/list/new"
+ ):
+ q.client["experiment/start/grid_search"] = None
+ elif (
+ q.args.__wave_submission_name__ == "experiment/start/grid_search"
+ or q.args.__wave_submission_name__ == "experiment/list/new_gridsearch"
+ ):
+ q.client["experiment/start/grid_search"] = True
+
+ # set active navigation
+ if q.client["experiment/start/grid_search"]:
+ q.client["nav/active"] = "experiment/start/grid_search"
+ else:
+ q.client["nav/active"] = "experiment/start"
+
+ # get all the datasets available
+ df_datasets = q.client.app_db.get_datasets_df()
+ # Hide inference only datasets
+ df_datasets = df_datasets.loc[df_datasets["train_rows"].notna()]
+ if (
+ not q.client["experiment/start/dataset"]
+ or q.client["experiment/start/dataset"] not in df_datasets.id.astype(str).values
+ ):
+ if len(df_datasets) >= 1:
+ q.client["experiment/start/dataset"] = str(df_datasets["id"].iloc[-1])
+ else:
+ q.client["experiment/start/dataset"] = "1"
+
+ warning_message = "Experiment settings might be updated after changing {}"
+
+ items = [
+ ui.separator(name="general_expander", label="General settings"),
+ ui.dropdown(
+ name="experiment/start/dataset",
+ label="Dataset",
+ required=True,
+ value=q.client["experiment/start/dataset"],
+ choices=[
+ ui.choice(str(row["id"]), str(row["name"]))
+ for _, row in df_datasets.iterrows()
+ ],
+ trigger=True,
+ tooltip=tooltips["experiments_dataset"],
+ ),
+ ]
+
+ if (
+ show_update_warnings
+ and q.client["experiment/start/dataset_prev"]
+ != q.client["experiment/start/dataset"]
+ ):
+ items += [
+ ui.message_bar(type="warning", text=warning_message.format("Dataset"))
+ ]
+ show_update_warnings = False
+
+ if (
+ q.client["experiment/start/cfg_file"] is None
+ or q.client["experiment/start/dataset_prev"]
+ != q.client["experiment/start/dataset"]
+ ) and q.client["experiment/start/cfg_category"] != "experiment":
+ dataset = q.client.app_db.get_dataset(q.client["experiment/start/dataset"])
+ if dataset is not None:
+ problem_type = dataset.config_file.replace(dataset.path + "/", "").replace(
+ ".yaml", ""
+ )
+ else:
+ problem_type = default_cfg.cfg_file
+ q.client["experiment/start/cfg_file"] = problem_type
+ q.client["experiment/start/cfg_category"] = problem_type.split("_")[0]
+
+ if q.client["experiment/start/cfg_category"] == "experiment":
+ q.client["experiment/start/cfg_file"] = "experiment"
+
+ # get all experiments
+ df_experiments = get_experiments(q)
+
+ # get all problem category choices
+ choices_problem_categories = [
+ ui.choice(name, label) for name, label in get_problem_categories()
+ ]
+
+ if len(df_experiments["id"]) > 0:
+ choices_problem_categories += [ui.choice("experiment", "From Experiment")]
+
+ # set default value of problem type if no match to category
+ if (
+ q.client["experiment/start/cfg_category"]
+ not in q.client["experiment/start/cfg_file"]
+ ):
+ if q.client["experiment/start/cfg_category"] != "experiment":
+ q.client["experiment/start/cfg_file"] = get_problem_types(
+ category=q.client["experiment/start/cfg_category"]
+ )[0][0]
+
+ # get all problem type choices
+ choices_problem_types = [
+ ui.choice(name, label)
+ for name, label in get_problem_types(
+ category=q.client["experiment/start/cfg_category"]
+ )
+ ]
+
+ # remove model type if present in cfg file name here
+ q.client["experiment/start/cfg_file"] = remove_model_type(
+ q.client["experiment/start/cfg_file"]
+ )
+
+ if len(df_experiments["id"]) > 0:
+ if q.client["experiment/start/cfg_experiment"] is None:
+ q.client["experiment/start/cfg_experiment"] = str(
+ df_experiments["id"].iloc[0]
+ )
+ # Default pretrained from the previous experiment to False
+ if (
+ q.client["experiment/start/cfg_experiment_pretrained"] is None
+ or is_create_experiment
+ ):
+ q.client["experiment/start/cfg_experiment_pretrained"] = False
+
+ if q.client["experiment/start/cfg_category"] != "experiment":
+ items += [
+ ui.dropdown(
+ name="experiment/start/cfg_file",
+ label="Problem Type",
+ required=True,
+ choices=choices_problem_types,
+ value=q.client["experiment/start/cfg_file"],
+ trigger=True,
+ tooltip=tooltips["experiments_problem_type"],
+ )
+ ]
+
+ model_types = get_model_types(q.client["experiment/start/cfg_file"])
+ if len(model_types) > 0:
+ choices = [ui.choice(name, label) for name, label in model_types]
+ if q.client["experiment/start/cfg_sub"] in [None, ""]:
+ q.client["experiment/start/cfg_sub"] = model_types[0][0]
+ items += [
+ ui.dropdown(
+ name="experiment/start/cfg_sub",
+ label="Model Type",
+ required=True,
+ choices=choices,
+ value=q.client["experiment/start/cfg_sub"],
+ trigger=True,
+ )
+ ]
+ else:
+ q.client["experiment/start/cfg_sub"] = ""
+
+ # add model type to cfg file name here
+ q.client["experiment/start/cfg_file"] = add_model_type(
+ q.client["experiment/start/cfg_file"], q.client["experiment/start/cfg_sub"]
+ )
+
+ if (
+ show_update_warnings
+ and q.client["experiment/start/cfg_file_prev"]
+ != q.client["experiment/start/cfg_file"]
+ and q.client["experiment/start/cfg_category"] != "experiment"
+ ):
+ items += [
+ ui.message_bar(type="warning", text=warning_message.format("Problem Type"))
+ ]
+ show_update_warnings = False
+
+ if q.client["experiment/start/cfg_category"] == "experiment":
+ if (
+ show_update_warnings
+ and q.client["experiment/start/cfg_experiment_prev"]
+ != q.client["experiment/start/cfg_experiment"]
+ ):
+ items += [
+ ui.message_bar(
+ type="warning", text=warning_message.format("previous Experiment")
+ )
+ ]
+
+ # Show pretrained weights toggle only for successfully finished experiments
+ if (
+ df_experiments.loc[
+ df_experiments.id == int(q.client["experiment/start/cfg_experiment"]),
+ "status",
+ ].values[0]
+ == "finished"
+ ):
+ items += [
+ ui.toggle(
+ name="experiment/start/cfg_experiment_pretrained",
+ label="Use previous experiment weights",
+ value=q.client["experiment/start/cfg_experiment_pretrained"],
+ trigger=True,
+ )
+ ]
+
+ # only show yaml option, when not starting from another experiment
+ if q.client["experiment/start/cfg_category"] != "experiment":
+ items += [
+ ui.toggle(
+ name="experiment/start/from_yaml",
+ label="Import config from YAML",
+ value=False,
+ trigger=True,
+ tooltip=tooltips["experiments_import_config_from_yaml"],
+ )
+ ]
+
+ if q.args["experiment/start/from_yaml"]:
+ items += [
+ ui.file_upload(
+ name="experiment/upload_yaml",
+ label="Upload!",
+ multiple=False,
+ file_extensions=["yaml"],
+ )
+ ]
+
+ if q.args["experiment/upload_yaml"] is not None:
+ # reset previous, so the UI will be reloaded
+ q.client["experiment/start/cfg_file_prev"] = None
+ await config_import_uploaded_file(q)
+
+ logger.info(
+ f"PREV {q.client['experiment/start/cfg_file_prev']} "
+ f"{q.client['experiment/start/cfg_file']} "
+ f"{q.client['experiment/start/dataset_prev']} "
+ f"{q.client['experiment/start/dataset']} "
+ f"{q.client['experiment/start/cfg_experiment_prev']} "
+ f"{q.client['experiment/start/cfg_experiment']} "
+ )
+
+ if q.client["experiment/start/cfg_category"] == "experiment":
+ logger.info("Starting from experiment")
+
+ # reset previous config file
+ q.client["experiment/start/cfg_file_prev"] = None
+
+ q.client["experiment/start/experiment"] = q.client.app_db.get_experiment(
+ q.client["experiment/start/cfg_experiment"]
+ )
+
+ parent_path = os.path.dirname(q.client["experiment/start/experiment"].path)
+ parent_config = load_config_yaml(f"{parent_path}/cfg.yaml")
+
+ q.client["experiment/start/cfg"] = parent_config
+
+ # set pretrained weights
+ if q.client["experiment/start/cfg_experiment_pretrained"]:
+ prev_weights = os.path.join(
+ q.client["experiment/start/experiment"].path,
+ "checkpoint.pth",
+ )
+ if os.path.exists(prev_weights):
+ q.client["experiment/start/cfg"].architecture.pretrained_weights = (
+ prev_weights
+ )
+ q.client["experiment/start/cfg"].architecture._visibility[
+ "pretrained_weights"
+ ] = -1
+
+ experiments_df = q.client.app_db.get_experiments_df()
+ output_dir = os.path.abspath(
+ os.path.join(q.client["experiment/start/cfg"].output_directory, "..")
+ )
+ q.client["experiment/start/cfg"].experiment_name = get_unique_name(
+ q.client["experiment/start/cfg"].experiment_name,
+ experiments_df["name"].values,
+ lambda x: os.path.exists(os.path.join(output_dir, x)),
+ )
+
+ # Configuration flags:
+ # from_dataset -- take the values from the dataset config
+ # from_cfg -- take the values from the configuration file
+ # from_default -- take the values from the default settings
+ # from_dataset_args -- take the values from the dataset's q.args
+ # Otherwise -- take the values from the q.args (user input)
+
+ # pick default values from config
+ if (
+ q.client["experiment/start/cfg_experiment_prev"]
+ != q.client["experiment/start/cfg_experiment"]
+ ):
+ q.client["experiment/start/cfg_mode/from_dataset"] = False
+ q.client["experiment/start/cfg_mode/from_cfg"] = True
+ q.client["experiment/start/cfg_mode/from_dataset_args"] = False
+
+ q.client["experiment/start/dataset"] = str(
+ q.client["experiment/start/experiment"].dataset
+ )
+
+ items[1].dropdown.value = q.client["experiment/start/dataset"]
+ # pick default values from config or dataset
+ elif (
+ q.client["experiment/start/dataset_prev"]
+ != q.client["experiment/start/dataset"]
+ ):
+ q.client["experiment/start/cfg_mode/from_dataset"] = True
+ q.client["experiment/start/cfg_mode/from_cfg"] = True
+ q.client["experiment/start/cfg_mode/from_dataset_args"] = False
+ # pick default values from args
+ else:
+ q.client["experiment/start/cfg_mode/from_dataset"] = False
+ q.client["experiment/start/cfg_mode/from_cfg"] = False
+ q.client["experiment/start/cfg_mode/from_dataset_args"] = True
+
+ q.client["experiment/start/cfg_mode/from_default"] = False
+ q.client["experiment/start/cfg_experiment_prev"] = q.client[
+ "experiment/start/cfg_experiment"
+ ]
+
+ else:
+ logger.info("Starting from CFG")
+
+ # reset previous experiment
+ q.client["experiment/start/cfg_experiment_prev"] = None
+
+ # pick default values from dataset or config
+ if (
+ q.client["experiment/start/cfg_file_prev"]
+ != q.client["experiment/start/cfg_file"]
+ ) or (
+ q.client["experiment/start/dataset_prev"]
+ != q.client["experiment/start/dataset"]
+ ):
+ q.client["experiment/start/cfg_mode/from_dataset"] = True
+ q.client["experiment/start/cfg_mode/from_cfg"] = True
+ q.client["experiment/start/cfg_mode/from_default"] = True
+ q.client["experiment/start/cfg_mode/from_dataset_args"] = False
+ # pick default values from args
+ else:
+ q.client["experiment/start/cfg_mode/from_dataset"] = False
+ q.client["experiment/start/cfg_mode/from_cfg"] = False
+ q.client["experiment/start/cfg_mode/from_default"] = False
+ q.client["experiment/start/cfg_mode/from_dataset_args"] = True
+
+ q.client["experiment/start/cfg_file_prev"] = q.client[
+ "experiment/start/cfg_file"
+ ]
+
+ config_path = (
+ f"llm_studio/python_configs/{q.client['experiment/start/cfg_file']}"
+ )
+
+ q.client["experiment/start/cfg"] = load_config_py(
+ config_path=config_path, config_name="ConfigProblemBase"
+ )
+
+ q.client["experiment/start/dataset_prev"] = q.client["experiment/start/dataset"]
+ logger.info(f"From dataset {q.client['experiment/start/cfg_mode/from_dataset']}")
+ logger.info(f"From cfg {q.client['experiment/start/cfg_mode/from_cfg']}")
+ logger.info(f"From default {q.client['experiment/start/cfg_mode/from_default']}")
+ logger.info(f"Config file: {q.client['experiment/start/cfg_file']}")
+
+ option_items = get_ui_elements_for_cfg(cfg=q.client["experiment/start/cfg"], q=q)
+ items.extend(option_items)
+
+ if q.client["experiment/start/cfg_mode/from_cfg"]:
+ q.page["experiment/start"] = ui.form_card(box="content", items=items)
+ else:
+ q.page["experiment/start"].items = items
+
+ q.client.delete_cards.add("experiment/start")
+
+ q.page["experiment/start/footer"] = ui.form_card(
+ box="footer",
+ items=[
+ ui.inline(
+ items=[
+ ui.button(
+ name="experiment/start/run",
+ label=(
+ "Run grid search"
+ if q.client["experiment/start/grid_search"]
+ else "Run experiment"
+ ),
+ primary=True,
+ )
+ ],
+ justify="start",
+ )
+ ],
+ )
+ q.client.delete_cards.add("experiment/start/footer")
+
+
+def experiment_input_type_error(
+ q: Q, pre: str = "experiment/start"
+) -> Union[bool, GridCheckError]:
+ """Error check for custom entered values in combo boxes (grid search)
+
+ Returns:
+ GridCheckError if errors found and False if no errors found
+ """
+ cfg = q.client[f"{pre}/cfg"]
+ cfg = parse_ui_elements(cfg=cfg, q=q, pre=f"{pre}/cfg/")
+
+ try:
+ get_grid_search(cfg=cfg, q=q, pre=pre)
+ except GridCheckError as e:
+ return e
+ return False
+
+
+async def experiment_run(q: Q):
+ """Start an experiment.
+
+ Args:
+ q: Q
+ """
+ # import here to avoid circular imports
+ from llm_studio.app_utils.sections.project import list_current_experiments
+
+ pre = "experiment/start"
+
+ logger.info("Starting experiment")
+ logger.info(f"{pre}/cfg_file")
+ logger.info(f"CFG: {q.client[f'{pre}/cfg_file']}")
+
+ if q.client[f"{pre}/cfg_category"] == "experiment":
+ q.client[f"{pre}/cfg_file"] = q.client[f"{pre}/experiment"].config_file
+
+ cfg = q.client[f"{pre}/cfg"]
+ cfg = parse_ui_elements(cfg=cfg, q=q, pre=f"{pre}/cfg/")
+ cfg.experiment_name = cfg.experiment_name.replace("/", "-")
+
+ grid_search = get_grid_search(cfg=cfg, q=q, pre=pre)
+
+ if len(grid_search) == 0:
+ errors = check_config_for_errors(cfg)
+ if errors["title"] and not q.args["experiment/start/error/proceed"]:
+ title = (
+ errors["title"][0]
+ if len(errors["title"]) == 1
+ else "The following configuration mismatches were found:"
+ )
+ error_text = [ui.text(message) for message in errors["message"]]
+ q.page["meta"].dialog = ui.dialog(
+ title=title,
+ name="experiment/start/error/dialog",
+ items=error_text
+ + [
+ ui.buttons(
+ [
+ ui.button(
+ name="experiment/start/error/ok",
+ label="Ok",
+ primary=True,
+ ),
+ ui.button(
+ name="experiment/start/error/proceed",
+ label="I want to proceed anyhow",
+ primary=False,
+ ),
+ ]
+ )
+ ],
+ closable=True,
+ )
+ q.client["keep_meta"] = True
+ else:
+ start_experiment(cfg=cfg, q=q, pre=pre)
+ await list_current_experiments(q)
+ else:
+ exp_name = cfg.experiment_name
+
+ all_grid_hyperparams = sorted(grid_search)
+ combinations = itertools.product(
+ *(grid_search[name] for name in all_grid_hyperparams)
+ )
+ combinations = [dict(zip(all_grid_hyperparams, x)) for x in list(combinations)]
+
+ random.shuffle(combinations)
+
+ q.page["meta"].dialog = ui.dialog(
+ title="Start grid search",
+ name="experiment/start/gridsearch/dialog",
+ items=[
+ ui.text(
+ "Your selected grid of hyperparameters results in "
+ f"{len(combinations)} individual experiments. "
+ "Do you want to proceed?"
+ )
+ ]
+ + [
+ ui.buttons(
+ [
+ ui.button(
+ name="experiment/start/gridsearch/proceed",
+ label=(
+ f"Start grid search of {len(combinations)} experiments"
+ ),
+ primary=True,
+ ),
+ ui.button(
+ name="experiment/start/gridsearch/cancel",
+ label="Cancel",
+ primary=False,
+ ),
+ ]
+ )
+ ],
+ closable=True,
+ )
+ q.client["keep_meta"] = True
+
+ if q.args["experiment/start/gridsearch/proceed"]:
+ all_grid_names = []
+ for exp_idx, combo in enumerate(combinations):
+ filtered_combo = filter_grid_search_combination(
+ grid=combo.copy(), cfg=cfg
+ )
+
+ grid_name = "_".join(
+ [
+ f"{hyp}_{filtered_combo[hyp]}"
+ for hyp in sorted(filtered_combo.keys())
+ if len(grid_search[hyp]) > 1
+ ]
+ )
+ if grid_name in all_grid_names:
+ continue
+ else:
+ all_grid_names.append(grid_name)
+
+ cfg = set_grid_to_cfg(cfg=cfg, grid=combo)
+
+ if grid_name != "":
+ cfg.experiment_name = exp_name + f"_{grid_name}"
+ cfg.experiment_name = cfg.experiment_name.replace("/", "-")
+
+ start_experiment(cfg=cfg, q=q, pre=pre)
+ await list_current_experiments(q)
+
+ # Remove the dialog
+ q.client["keep_meta"] = False
+
+
+def get_experiment_table(q, df_viz, height="calc(100vh - 245px)", actions=None):
+ col_remove = [
+ "id",
+ "path",
+ "mode",
+ "seed",
+ "process_id",
+ "gpu_list",
+ "loss",
+ "eta",
+ "epoch",
+ ]
+
+ for col in col_remove:
+ if col in df_viz:
+ del df_viz[col]
+
+ df_viz = df_viz.rename(
+ columns={"config_file": "problem type"},
+ )
+ df_viz["problem type"] = df_viz["problem type"].str.replace("Text ", "")
+
+ if actions == "experiment":
+ actions_dict = {
+ "experiment/list/new": "New experiment",
+ "experiment/list/new_gridsearch": "New grid search",
+ "experiment/list/rename": "Rename experiment",
+ "experiment/list/stop/table": "Stop experiment",
+ "experiment/list/delete/table/dialog": "Delete experiment",
+ }
+ else:
+ actions_dict = {}
+
+ min_widths = {
+ "name": "350",
+ "dataset": "150",
+ "problem type": "195",
+ "metric": "75",
+ "val metric": "102",
+ "progress": "85",
+ "status": "90",
+ "info": "115",
+ "actions": "5",
+ }
+
+ return ui_table_from_df(
+ q=q,
+ df=df_viz,
+ name="experiment/list/table",
+ sortables=["val metric"],
+ filterables=["name", "dataset", "problem type", "metric", "status"],
+ searchables=["name", "dataset"],
+ numerics=["val metric"],
+ tags=["status"],
+ progresses=["progress"],
+ min_widths=min_widths,
+ link_col="name",
+ height=height,
+ actions=actions_dict,
+ )
+
+
+async def experiment_list(
+ q: Q,
+ reset: bool = True,
+ allowed_statuses: Optional[List[str]] = None,
+ actions: bool = True,
+) -> None:
+ """List all experiments."""
+
+ q.client["nav/active"] = "experiment/list"
+
+ if reset:
+ await clean_dashboard(q, mode="full")
+
+ q.client["experiment/list/df_experiments"] = get_experiments(
+ q,
+ status=allowed_statuses,
+ )
+
+ df_viz = q.client["experiment/list/df_experiments"].copy()
+
+ table = get_experiment_table(
+ q,
+ df_viz,
+ actions="experiment" if actions else None,
+ )
+
+ message_bar = get_experiment_list_message_bar(q)
+
+ items = [table, message_bar]
+
+ q.page["experiment/list"] = ui.form_card(box="content", items=items)
+ q.client.delete_cards.add("experiment/list")
+
+ buttons = [
+ ui.button(name="experiment/list/refresh", label="Refresh", primary=True),
+ ui.button(
+ name="experiment/list/compare",
+ label="Compare experiments",
+ primary=False,
+ ),
+ ui.button(name="experiment/list/stop", label="Stop experiments", primary=False),
+ ui.button(
+ name="experiment/list/delete", label="Delete experiments", primary=False
+ ),
+ ]
+
+ q.page["dataset/display/footer"] = ui.form_card(
+ box="footer", items=[ui.inline(items=buttons, justify="start")]
+ )
+ q.client.delete_cards.add("dataset/display/footer")
+
+
+def get_table_and_message_item_indices(q):
+ table_item_idx, message_item_idx = 0, 1
+ return table_item_idx, message_item_idx
+
+
+async def experiment_compare(q: Q, selected_rows: list):
+ if q.client["experiment/compare/tab"] is None:
+ q.client["experiment/compare/tab"] = "experiment/compare/charts"
+ if q.args["experiment/compare/charts"] is not None:
+ q.client["experiment/compare/tab"] = "experiment/compare/charts"
+ if q.args["experiment/compare/config"] is not None:
+ q.client["experiment/compare/tab"] = "experiment/compare/config"
+
+ experiment_ids = [
+ q.client["experiment/list/df_experiments"]["id"].iloc[int(idx)]
+ for idx in selected_rows
+ ]
+
+ await clean_dashboard(q, mode=q.client["experiment/compare/tab"])
+ tabs = [
+ ui.tab(name="experiment/compare/charts", label="Charts"),
+ ui.tab(name="experiment/compare/config", label="Config"),
+ ]
+ q.page["experiment/compare/tab"] = ui.tab_card(
+ box="nav2", link=True, items=tabs, value=q.client["experiment/compare/tab"]
+ )
+ q.client.delete_cards.add("experiment/compare/tab")
+
+ if q.client["experiment/compare/tab"] == "experiment/compare/charts":
+ charts = []
+ experiment_names = []
+
+ for experiment_id in experiment_ids:
+ experiment = q.client.app_db.get_experiment(experiment_id)
+ experiment_path = experiment.path
+ charts.append(load_charts(experiment_path))
+ current_name = f" {experiment.name}"
+ experiment_names.append(current_name)
+
+ await charts_tab(q, charts, experiment_names)
+
+ elif q.client["experiment/compare/tab"] == "experiment/compare/config":
+ if q.client["experiment/compare/diff_toggle"] is None:
+ q.client["experiment/compare/diff_toggle"] = False
+
+ settings = pd.DataFrame()
+ for experiment_id in experiment_ids:
+ experiment = q.client.app_db.get_experiment(experiment_id)
+ experiment_path = experiment.path
+ experiment_cfg = load_config_yaml(os.path.join(experiment_path, "cfg.yaml"))
+ items = get_cfg_list_items(experiment_cfg)
+ act_df = pd.Series({item.label: item.value for item in items})
+ settings[experiment.name] = act_df
+
+ settings.index.name = "setting"
+
+ if q.client["experiment/compare/diff_toggle"]:
+ val_counts = settings.T.nunique()
+ drop_idx = val_counts[val_counts == 1].index.values
+ settings = settings.drop(drop_idx)
+
+ items = [
+ ui.toggle(
+ name="experiment/compare/diff_toggle",
+ label="Show differences only",
+ value=q.client["experiment/compare/diff_toggle"],
+ trigger=True,
+ ),
+ ui_table_from_df(
+ q=q,
+ df=settings.reset_index(),
+ name="experiment/compare/summary/table",
+ link_col="setting",
+ height="calc(100vh - 315px)",
+ ),
+ ]
+
+ q.page["experiment/compare/config"] = ui.form_card(box="first", items=items)
+ q.client.delete_cards.add("experiment/compare/config")
+
+ buttons = [
+ ui.button(name="experiment/compare", label="Refresh", primary=True),
+ ui.button(name="experiment/list/current", label="Back", primary=False),
+ ]
+ q.page["experiment/compare/footer"] = ui.form_card(
+ box="footer", items=[ui.inline(items=buttons, justify="start")]
+ )
+ q.client.delete_cards.add("experiment/compare/footer")
+
+
+async def experiment_rename_form(q: Q, error: str = "") -> None:
+ experiment = q.client.app_db.get_experiment(q.client["experiment/rename/id"])
+
+ experiment_name = experiment.name
+ items = [
+ ui.textbox(
+ name="experiment/rename/name",
+ label=f"New name for {experiment_name}",
+ value=experiment_name,
+ required=True,
+ )
+ ]
+
+ if error:
+ items.append(ui.message_bar(type="error", text=error))
+
+ q.page["experiment/list"].items = items
+
+ buttons = [
+ ui.button(name="experiment/rename/action", label="Rename", primary=True),
+ ui.button(name="experiment/list/current", label="Abort", primary=False),
+ ]
+ q.page["dataset/display/footer"] = ui.form_card(
+ box="footer", items=[ui.inline(items=buttons, justify="start")]
+ )
+ q.client.delete_cards.add("dataset/display/footer")
+
+
+async def experiment_rename_ui_workflow(q: Q):
+ selected_row = q.args["experiment/list/rename"]
+ rename_id = q.client["experiment/list/df_experiments"]["id"].iloc[int(selected_row)]
+ q.client["experiment/rename/id"] = rename_id
+ await experiment_rename_form(q)
+
+
+async def experiment_rename_action(q, experiment, new_name):
+ """Rename experiment with `current_id` id in DB to `new_name`"""
+
+ old_name = experiment.name
+ old_path = experiment.path
+ new_path = old_path.replace(old_name, new_name)
+
+ if old_path != new_path:
+ old_exp_path = f"{old_path}"
+ exp_path = f"{new_path}"
+ logger.info(f"Renaming {old_exp_path} to {exp_path}")
+ shutil.move(os.path.abspath(old_exp_path), os.path.abspath(exp_path))
+
+ # update the experiment name in the DB
+ with SqliteDict(os.path.join(new_path, "charts.db")) as charts:
+ for k1 in PLOT_ENCODINGS:
+ if k1 == "df":
+ # this is required to properly overwrite it
+ df = charts[k1].copy()
+ for k2, v2 in df.items():
+ logger.info(
+ f"Renaming charts {v2} to {v2.replace(old_name, new_name)}"
+ )
+ df[k2] = v2.replace(old_name, new_name)
+ charts[k1] = df
+ charts.commit()
+
+ for config_file in ["cfg.yaml"]:
+ config_path = os.path.join(exp_path, config_file)
+ if os.path.exists(config_path):
+ experiment_cfg = load_config_yaml(config_path)
+ experiment_cfg.experiment_name = new_name
+ experiment_cfg.output_directory = new_path
+ save_config_yaml(config_path, experiment_cfg)
+
+ rename_files = ["preds"]
+ for file in rename_files:
+ old_file = get_artifact_path_path(old_name, exp_path, file)
+ new_file = get_artifact_path_path(new_name, exp_path, file)
+ if os.path.exists(old_file):
+ logger.info(f"Renaming {old_file} to {new_file}")
+ shutil.move(os.path.abspath(old_file), os.path.abspath(new_file))
+
+ delete_files = ["logs"] # will be generated on demand with updates
+ for file in delete_files:
+ file = get_artifact_path_path(old_name, exp_path, file)
+ if os.path.exists(file):
+ logger.info(f"Deleting {file}")
+ os.remove(file)
+
+ q.client.app_db.rename_experiment(experiment.id, new_name, new_path)
+
+
+async def experiment_delete(q: Q, experiment_ids: List[int]) -> None:
+ """Delete selected experiments.
+
+ Args:
+ q: Q
+ experiment_ids: list of experiment ids to delete
+ """
+
+ for experiment_id in experiment_ids:
+ experiment = q.client.app_db.get_experiment(experiment_id)
+ q.client.app_db.delete_experiment(experiment.id)
+ try:
+ shutil.rmtree(f"{experiment.path}")
+ except FileNotFoundError:
+ logger.warning(f"Experiment path {experiment.path} not found for deletion.")
+
+
+async def experiment_stop(q: Q, experiment_ids: List[int]) -> None:
+ """Stop selected experiments.
+
+ Args:
+ q: Q
+ experiment_ids: list of experiment ids to stop
+ """
+
+ for experiment_id in experiment_ids:
+ experiment = q.client.app_db.get_experiment(experiment_id)
+
+ try:
+ ret = kill_child_processes_and_current(int(experiment.process_id))
+ if ret:
+ flag_path = os.path.join(experiment.path, "flags.json")
+ write_flag(flag_path, "status", "stopped")
+ except Exception as e:
+ logger.error(f"Error while stopping the experiment: {e}")
+ pass
+
+
+def load_charts(experiment_path):
+ try:
+ with SqliteDict(os.path.join(experiment_path, "charts.db")) as charts:
+ charts = dict(charts)
+ except Exception:
+ charts = {}
+ logger.warning("Too early, wait for the charts to appear")
+
+ return charts
+
+
+async def experiment_display(q: Q) -> None:
+ """Display a selected experiment."""
+
+ experiment_id = q.client["experiment/list/df_experiments"]["id"].iloc[
+ q.client["experiment/display/id"]
+ ]
+ q.client["experiment/display/experiment_id"] = experiment_id
+ experiment = q.client.app_db.get_experiment(experiment_id)
+ q.client["experiment/display/experiment"] = experiment
+
+ q.client["experiment/display/experiment_path"] = experiment.path
+
+ checkpoints_exists = os.path.exists(
+ os.path.join(q.client["experiment/display/experiment_path"], "checkpoint.pth")
+ )
+ adapter_exists = os.path.exists(
+ os.path.join(q.client["experiment/display/experiment_path"], "adapter_model")
+ )
+ status, _ = get_experiment_status(experiment.path)
+
+ charts = load_charts(q.client["experiment/display/experiment_path"])
+ q.client["experiment/display/charts"] = charts
+
+ if experiment.mode == "train":
+ if q.client["experiment/display/tab"] is None:
+ q.client["experiment/display/tab"] = "experiment/display/charts"
+ else:
+ if q.client["experiment/display/tab"] is None:
+ q.client["experiment/display/tab"] = "experiment/display/summary"
+
+ if q.args["experiment/display/charts"] is not None:
+ q.client["experiment/display/tab"] = "experiment/display/charts"
+ if q.args["experiment/display/summary"] is not None:
+ q.client["experiment/display/tab"] = "experiment/display/summary"
+ if q.args["experiment/display/train_data_insights"] is not None:
+ q.client["experiment/display/tab"] = "experiment/display/train_data_insights"
+ if q.args["experiment/display/validation_prediction_insights"] is not None:
+ q.client["experiment/display/tab"] = (
+ "experiment/display/validation_prediction_insights"
+ )
+ if q.args["experiment/display/config"] is not None:
+ q.client["experiment/display/tab"] = "experiment/display/config"
+ if q.args["experiment/display/deployment"] is not None:
+ q.client["experiment/display/tab"] = "experiment/display/deployment"
+ if q.args["experiment/display/logs"] is not None:
+ q.client["experiment/display/tab"] = "experiment/display/logs"
+ if q.args["experiment/display/chat"] is not None:
+ q.client["experiment/display/tab"] = "experiment/display/chat"
+
+ await clean_dashboard(q, mode=q.client["experiment/display/tab"])
+
+ tabs = [
+ ui.tab(name="experiment/display/charts", label="Charts"),
+ ui.tab(name="experiment/display/summary", label="Summary"),
+ ]
+ # html for legacy experiments
+ has_train_data_insights = any(
+ [
+ charts.get(plot_encoding, dict()).get("train_data") is not None
+ for plot_encoding in PLOT_ENCODINGS
+ ]
+ )
+ if has_train_data_insights:
+ tabs += [
+ ui.tab(
+ name="experiment/display/train_data_insights",
+ label="Train Data Insights",
+ )
+ ]
+ has_validation_prediction_insights = any(
+ [
+ charts.get(plot_encoding, dict()).get("validation_predictions") is not None
+ for plot_encoding in PLOT_ENCODINGS
+ ]
+ )
+ if has_validation_prediction_insights:
+ tabs += [
+ ui.tab(
+ name="experiment/display/validation_prediction_insights",
+ label="Validation Prediction Insights",
+ )
+ ]
+
+ tabs += [
+ ui.tab(name="experiment/display/logs", label="Logs"),
+ ui.tab(name="experiment/display/config", label="Config"),
+ ]
+
+ if status == "finished" and checkpoints_exists:
+ tabs += [ui.tab(name="experiment/display/chat", label="Chat")]
+
+ q.page["experiment/display/tab"] = ui.tab_card(
+ box="nav2", link=True, items=tabs, value=q.client["experiment/display/tab"]
+ )
+ q.client.delete_cards.add("experiment/display/tab")
+
+ if q.client["experiment/display/tab"] == "experiment/display/charts":
+ await charts_tab(q, [charts], [""])
+ elif q.client["experiment/display/tab"] in [
+ "experiment/display/train_data_insights",
+ "experiment/display/validation_prediction_insights",
+ ]:
+ await insights_tab(charts, q)
+ elif q.client["experiment/display/tab"] in ["experiment/display/summary"]:
+ await summary_tab(experiment_id, q)
+ elif q.client["experiment/display/tab"] in ["experiment/display/config"]:
+ await configs_tab(q)
+ elif q.client["experiment/display/tab"] in ["experiment/display/logs"]:
+ await logs_tab(q)
+ elif q.client["experiment/display/tab"] in ["experiment/display/chat"]:
+ await chat_tab(q)
+
+ await q.page.save()
+
+ buttons = [
+ ui.button(name="experiment/display/refresh", label="Refresh", primary=True)
+ ]
+
+ buttons += [
+ ui.button(
+ name="experiment/display/download_logs",
+ label="Download logs/config",
+ primary=False,
+ )
+ ]
+
+ if status == "finished":
+ buttons += [
+ ui.button(
+ name="experiment/display/download_predictions",
+ label="Download predictions",
+ primary=False,
+ disabled=False,
+ tooltip=None,
+ )
+ ]
+
+ if checkpoints_exists:
+ buttons += [
+ ui.button(
+ name="experiment/display/download_model",
+ label="Download model",
+ primary=False,
+ disabled=False,
+ tooltip=None,
+ ),
+ ]
+
+ if adapter_exists:
+ buttons += [
+ ui.button(
+ name="experiment/display/download_adapter",
+ label="Download adapter",
+ primary=False,
+ disabled=False,
+ tooltip=None,
+ ),
+ ]
+
+ if checkpoints_exists:
+ buttons += [
+ ui.button(
+ name="experiment/display/push_to_huggingface",
+ label="Push checkpoint to huggingface",
+ primary=False,
+ disabled=False,
+ tooltip=None,
+ ),
+ ]
+
+ buttons += [ui.button(name="experiment/list/current", label="Back", primary=False)]
+
+ q.page["experiment/display/footer"] = ui.form_card(
+ box="footer",
+ items=[
+ ui.inline(items=buttons, justify="start"),
+ ],
+ )
+ q.client.delete_cards.add("experiment/display/footer")
+
+
+async def insights_tab(charts, q):
+ if q.client["experiment/display/tab"] == "experiment/display/train_data_insights":
+ key = "train_data"
+ elif (
+ q.client["experiment/display/tab"]
+ == "experiment/display/validation_prediction_insights"
+ ):
+ key = "validation_predictions"
+ for k1 in PLOT_ENCODINGS:
+ if k1 not in charts:
+ continue
+ for k2, v2 in charts[k1].items():
+ if k2 != key:
+ continue
+ if k1 == "html":
+ q.page[f"experiment/display/charts/{k1}_{k2}"] = ui.markup_card(
+ box="first", title="", content=v2
+ )
+ q.client.delete_cards.add(f"experiment/display/charts/{k1}_{k2}")
+
+ continue
+
+ elif k1 == "image":
+ q.page[f"experiment/display/charts/{k1}_{k2}"] = ui.image_card(
+ box="first", title="", type="png", image=v2
+ )
+ q.client.delete_cards.add(f"experiment/display/charts/{k1}_{k2}")
+ continue
+
+ elif k1 == "df":
+ df = pd.read_parquet(v2)
+ min_widths = {
+ col: "350" for col in df.columns if "text" in str(col).lower()
+ }
+ #
+ if key == "train_data":
+ min_widths["Content"] = "800"
+ q.page[f"experiment/display/charts/{k1}_{k2}"] = ui.form_card(
+ box="first",
+ items=[
+ ui_table_from_df(
+ q=q,
+ df=df,
+ name=f"experiment/display/charts/{k1}_{k2}",
+ sortables=[
+ col for col in df.columns if col.startswith("Metric")
+ ],
+ markdown_cells=[
+ col
+ for col in df.columns
+ if not col.startswith("Metric")
+ ],
+ searchables=list(df.columns),
+ downloadable=True,
+ resettable=True,
+ min_widths=min_widths,
+ height="calc(100vh - 267px)",
+ max_char_length=50_000,
+ cell_overflow="tooltip",
+ )
+ ],
+ )
+ q.client.delete_cards.add(f"experiment/display/charts/{k1}_{k2}")
+ continue
+
+
+async def summary_tab(experiment_id, q):
+ experiment_df = get_experiments(q)
+ input_dict = experiment_df[experiment_df.id == experiment_id].iloc[0].to_dict()
+ cfg = load_config_yaml(
+ os.path.join(q.client["experiment/display/experiment_path"], "cfg.yaml")
+ )
+ _ = get_tokenizer(cfg)
+
+ # experiment card
+ card_name = "experiment/display/summary/experiment"
+ q.page[card_name] = ui.form_card(
+ box=ui.box(zone="first"),
+ items=[
+ ui.separator("Experiment"),
+ ui.stats(
+ [
+ ui.stat(
+ value=cfg.experiment_name,
+ label="Name",
+ ),
+ ],
+ justify="between",
+ inset=True,
+ ),
+ ui.stats(
+ [
+ ui.stat(
+ value=input_dict["config_file"],
+ label="Problem Type",
+ ),
+ ],
+ justify="between",
+ inset=True,
+ ),
+ ],
+ )
+ q.client.delete_cards.add(card_name)
+
+ # datasets card
+ card_name = "experiment/display/summary/datasets"
+ q.page[card_name] = ui.form_card(
+ box=ui.box(zone="first"),
+ items=[
+ ui.separator("Datasets"),
+ ui.stats(
+ [
+ ui.stat(
+ value=Path(cfg.dataset.train_dataframe).stem,
+ label="Training Dataset",
+ ),
+ ],
+ justify="between",
+ inset=True,
+ ),
+ ui.stats(
+ [
+ ui.stat(
+ value=(
+ "-"
+ if cfg.dataset.validation_dataframe in ["", "None", None]
+ else Path(cfg.dataset.validation_dataframe).stem
+ ),
+ label="Validation Dataset",
+ ),
+ ],
+ justify="between",
+ inset=True,
+ ),
+ ],
+ )
+ q.client.delete_cards.add(card_name)
+
+ # score card
+ card_name = "experiment/display/summary/score"
+ q.page[card_name] = ui.form_card(
+ box=ui.box(zone="first"),
+ items=[
+ ui.separator("Score"),
+ ui.stats(
+ [
+ ui.stat(
+ value=input_dict["metric"],
+ label="Metric",
+ ),
+ ],
+ justify="between",
+ inset=True,
+ ),
+ ui.stats(
+ [
+ ui.stat(
+ value=(
+ "-"
+ if input_dict["val metric"] in ["", "None", None]
+ else str(input_dict["val metric"])
+ ),
+ label="Validation Score",
+ ),
+ ],
+ justify="between",
+ inset=True,
+ ),
+ ],
+ )
+ q.client.delete_cards.add(card_name)
+
+ # main configs card
+ card_name = "experiment/display/summary/main_configs"
+ q.page[card_name] = ui.form_card(
+ box=ui.box(zone="second"),
+ items=[
+ ui.separator("Main Configurations"),
+ ui.stats(
+ [
+ ui.stat(
+ value=cfg.llm_backbone,
+ label="LLM Backbone",
+ ),
+ ui.stat(
+ value=str(cfg.training.lora),
+ label="Lora",
+ ),
+ ui.stat(
+ value=str(cfg.training.epochs),
+ label="Epochs",
+ ),
+ ui.stat(
+ value=str(cfg.training.batch_size),
+ label="Batch Size",
+ ),
+ ],
+ justify="between",
+ inset=True,
+ ),
+ ui.stats(
+ [
+ ui.stat(
+ value=str(input_dict["loss"]),
+ label="Loss Function",
+ ),
+ ui.stat(
+ value=cfg.architecture.backbone_dtype,
+ label="Backbone Dtype",
+ ),
+ ui.stat(
+ value=str(cfg.architecture.gradient_checkpointing),
+ label="Gradient Checkpointing",
+ ),
+ ui.stat(
+ value=input_dict["gpu_list"],
+ label="GPU List",
+ ),
+ ],
+ justify="between",
+ inset=True,
+ ),
+ ],
+ )
+ q.client.delete_cards.add(card_name)
+
+ # code card
+ card_name = "experiment/display/summary/code"
+ content = get_experiment_summary_code_card(cfg=cfg)
+ q.page[card_name] = ui.markdown_card(
+ box=ui.box(zone="third"),
+ title="",
+ content=content,
+ )
+ q.client.delete_cards.add(card_name)
+
+
+async def configs_tab(q):
+ experiment_cfg = load_config_yaml(
+ os.path.join(q.client["experiment/display/experiment_path"], "cfg.yaml")
+ )
+ items = get_cfg_list_items(experiment_cfg)
+ q.page["experiment/display/config"] = ui.stat_list_card(
+ box="first", items=items, title=""
+ )
+ q.client.delete_cards.add("experiment/display/config")
+
+
+async def logs_tab(q):
+ logs_path = os.path.join(q.client["experiment/display/experiment_path"], "logs.log")
+ text = ""
+ in_pre = 0
+ # Read log file only if it already exists
+ if os.path.exists(logs_path):
+ with open(logs_path, "r") as f:
+ for line in f.readlines():
+ if in_pre == 0:
+ text += ""
+ if "INFO: Lock" in line:
+ continue
+ # maximum line length
+ n = 250
+ chunks = [line[i : i + n] for i in range(0, len(line), n)]
+ text += "
".join(chunks)
+
+ # Check for formatted HTML text
+ if "
" in line:
+ in_pre += 1
+ if " " in line:
+ in_pre -= 1
+ if in_pre == 0:
+ text += "
"
+ items = [ui.text(text)]
+ q.page["experiment/display/logs"] = ui.form_card(box="first", items=items, title="")
+ q.client.delete_cards.add("experiment/display/logs")
+
+
+def subsample(key1, key2, value, max_plot_points=1000):
+ act_plot_points = len(value["steps"])
+ if act_plot_points > max_plot_points:
+ stride = int(np.ceil(act_plot_points / max_plot_points))
+ value["steps"] = value["steps"][::stride]
+ value["values"] = value["values"][::stride]
+ logger.info(
+ f"{key1} {key2} sampled from size {act_plot_points} to size "
+ f"{len(value['steps'])} using stride {stride}."
+ )
+ return value
+
+
+def unite_validation_metric_charts(charts_list):
+ unique_metrics = []
+ for chart in charts_list:
+ unique_metrics.extend(list(chart.get("validation", {}).keys()))
+
+ unique_metrics = set([key for key in unique_metrics if key != "loss"])
+
+ if len(unique_metrics) > 1:
+ for chart in charts_list:
+ if "validation" in chart:
+ for key in unique_metrics:
+ if key in chart["validation"]:
+ chart["validation"]["metric"] = chart["validation"][key]
+ del chart["validation"][key]
+ return charts_list
+
+
+async def charts_tab(q, charts_list, legend_labels):
+ charts_list = unite_validation_metric_charts(charts_list)
+
+ box = ["top_left", "top_right", "bottom_left", "bottom_right"]
+ cnt = 0
+ for k1 in ["meta", "train", "validation"]:
+ if all([k1 not in charts for charts in charts_list]):
+ continue
+
+ all_second_keys: Set = set()
+ for charts in charts_list:
+ if k1 in charts:
+ all_second_keys = all_second_keys.union(set(charts[k1].keys()))
+
+ # Always plot loss in the lower left corner
+ if "loss" in all_second_keys:
+ all_second_keys.remove("loss")
+ list_all_second_keys = ["loss"] + list(all_second_keys)
+ else:
+ list_all_second_keys = list(all_second_keys)
+
+ for k2 in list_all_second_keys:
+ logger.info(f"{k1} {k2}")
+
+ items = []
+
+ if k1 == "meta" and k2 == "lr":
+ tooltip = "Current learning rate throughout the training process."
+ elif k1 == "train" and k2 == "loss":
+ tooltip = (
+ "Current training loss throughout the training process. "
+ "Loss is calculated as the average of the last ten batches."
+ )
+ elif k1 == "validation" and k2 == "loss":
+ tooltip = (
+ "Current validation loss throughout the training process. "
+ "Loss is calculated as the average of all validation batches. "
+ )
+ elif k1 == "validation" and k2 != "loss":
+ tooltip = (
+ "Current validation metric throughout the training process. "
+ "Metric is calculated on full validation set predictions."
+ )
+ else:
+ continue
+
+ title = f"{k1} {k2}".upper().replace("META LR", "LEARNING RATE")
+ if k2 == "loss":
+ title = title.replace("LOSS", "BATCH LOSS")
+
+ items.append(ui.text(title, tooltip=tooltip))
+
+ rows = []
+
+ max_samples = q.client["chart_plot_max_points"]
+ for charts, label in zip(charts_list, legend_labels):
+ if k1 not in charts or k2 not in charts[k1]:
+ continue
+
+ v2 = charts[k1][k2]
+ v2 = subsample(k1, k2, v2, max_samples)
+
+ if k2 == "lr" and "lr_diff" in charts["meta"]:
+ v3 = charts["meta"]["lr_diff"]
+ v3 = subsample("meta", "lr_diff", v3, max_samples)
+ rows.extend(
+ [
+ (v2["steps"][i], f"learning rate{label}", v2["values"][i])
+ for i in range(len(v2["values"]))
+ ]
+ + [
+ (
+ v3["steps"][i],
+ f"differential learning rate{label}",
+ v3["values"][i],
+ )
+ for i in range(len(v3["values"]))
+ ]
+ )
+ color = "=type"
+ fields = ["step", "type", "value"]
+
+ elif len(charts_list) > 1:
+ rows.extend(
+ [
+ (v2["steps"][i], label.strip(), v2["values"][i])
+ for i in range(len(v2["values"]))
+ ]
+ )
+ color = "=type"
+ fields = ["step", "type", "value"]
+ else:
+ rows.extend(
+ [
+ (v2["steps"][i], v2["values"][i]) # type: ignore
+ for i in range(len(v2["values"]))
+ ]
+ )
+ color = wave_theme.color
+ fields = ["step", "value"]
+
+ d = data(fields=fields, rows=rows, pack=True)
+
+ viz = ui.visualization(
+ plot=ui.plot(
+ [
+ ui.mark(
+ type="line",
+ x_title="step",
+ x_scale="linear",
+ y_scale="linear",
+ x="=step",
+ y="=value",
+ color=color,
+ y_min=0 if k1 == "meta" and k2 == "lr" else None,
+ color_range=wave_theme.color_range,
+ )
+ ]
+ ),
+ data=d, # type: ignore
+ interactions=["brush"],
+ height="max(calc((100vh - 275px)*0.41), 225px)",
+ width="100%",
+ )
+
+ items.append(viz)
+
+ if k1 == "validation" and k2 == "loss" and np.sum(v2["values"]) == 0:
+ items.append(
+ ui.message_bar(
+ type="info",
+ text="Validation batch loss cannot be \
+ calculated for this problem type.",
+ )
+ )
+
+ q.page[f"experiment/display/charts/{k1}_{k2}"] = ui.form_card(
+ box=box[cnt], items=items
+ )
+ q.client.delete_cards.add(f"experiment/display/charts/{k1}_{k2}")
+
+ cnt += 1
+
+
+async def experiment_artifact_build_error_dialog(q: Q, error: str):
+ q.page["meta"].dialog = ui.dialog(
+ "Failed to build artifact", items=[ui.text(error)], closable=True
+ )
+ q.client["keep_meta"] = True
+
+
+async def experiment_download_artifact(
+ q: Q,
+ get_artifact_path_fn: Callable[[str, str], str],
+ save_artifact_fn: Callable[[str, str], str],
+ additional_log: Optional[str] = "",
+ min_disk_space: Optional[float] = 0.0,
+):
+ """Download specific artifact, if it does not exist, create it on demand
+
+ Args:
+ q: Q
+ get_artifact_path_fn: function that returns path to the artifact
+ save_artifact_fn: function that generates the artifact and returns its path
+ additional_log: additional information to be logged
+ min_disk_space: minimal disk available needed to generate artifact
+ """
+
+ experiment = q.client["experiment/display/experiment"]
+ experiment_path = q.client["experiment/display/experiment_path"]
+
+ zip_path = get_artifact_path_fn(experiment.name, experiment_path)
+
+ if not os.path.exists(zip_path):
+ try:
+ check_available_space(experiment_path, min_disk_space)
+ except LLMResourceException as e:
+ error = f"Cannot create {os.path.basename(zip_path)}. {e}"
+ await experiment_artifact_build_error_dialog(q, error)
+ return
+
+ logger.info(f"Creating {zip_path} on demand")
+ zip_path = save_artifact_fn(experiment.name, experiment_path)
+
+ if additional_log:
+ logger.info(f"{additional_log}: {zip_path}")
+
+ q.page["meta"].script = ui.inline_script(
+ f'window.open("{get_download_link(q, zip_path)}", "_blank");'
+ )
+ await q.page.save()
+
+
+async def experiment_download_predictions(q: Q):
+ """Download experiment predictions."""
+ await experiment_download_artifact(
+ q, get_predictions_path, save_prediction_outputs, "Predictions path", None
+ )
+
+
+async def experiment_download_logs(q: Q):
+ """Download experiment logs."""
+
+ experiment = q.client["experiment/display/experiment"]
+ experiment_path = q.client["experiment/display/experiment_path"]
+ zip_path = get_logs_path(experiment.name, experiment_path)
+
+ if not os.path.exists(zip_path):
+ logs = q.client["experiment/display/charts"]
+ logger.info(f"Creating {zip_path} on demand")
+ zip_path = save_logs(experiment.name, experiment_path, logs)
+
+ download_url = get_download_link(q, zip_path)
+ logger.info(f"Logs URL: {download_url}")
+
+ q.page["meta"].script = ui.inline_script(
+ f'window.open("{download_url}", "_blank");'
+ )
+ await q.page.save()
+
+
+async def config_import_uploaded_file(q: Q):
+ """ "Importing a config file from drag and drop to the filesystem"""
+
+ file_url = q.args["experiment/upload_yaml"][0]
+ file_name = file_url.split("/")[-1]
+ path = f"{get_data_dir(q)}/{file_name}"
+
+ local_path = await q.site.download(file_url, path)
+
+ await q.site.unload(q.args["experiment/upload_yaml"][0])
+
+ with open(local_path, "r") as f:
+ yaml_data = yaml.safe_load(f)
+
+ yaml_data = flatten_dict(yaml_data)
+
+ q.client["experiment/yaml_data"] = yaml_data
+
+
+async def show_message(q, msg_key, page, idx, msg_type):
+ info = q.client[msg_key]
+ if info:
+ q.page[page].items[idx].message_bar.text = info
+ q.page[page].items[idx].message_bar.type = msg_type
+ q.client[msg_key] = ""
+
+
+def get_experiment_list_message_bar(q):
+ if q.client["experiment_halt_reason"]:
+ msg_bar = ui.message_bar(type="error", text=q.client["experiment_halt_reason"])
+ del q.client["experiment_halt_reason"]
+
+ elif q.client["force_disable_pipelines"]:
+ msg_bar = ui.message_bar(type="info", text=q.client["force_disable_pipelines"])
+ del q.client["force_disable_pipelines"]
+
+ else:
+ msg_bar = ui.message_bar(type="info", text="")
+
+ return msg_bar
+
+
+async def experiment_download_model(q: Q):
+ experiment = q.client["experiment/display/experiment"]
+ experiment_path = q.client["experiment/display/experiment_path"]
+ zip_path = get_model_path(experiment.name, experiment_path)
+
+ if not os.path.exists(zip_path):
+ logger.info(f"Creating {zip_path} on demand")
+ cfg = load_config_yaml(os.path.join(experiment_path, "cfg.yaml"))
+
+ experiments = get_experiments(q)
+ num_running_queued = len(
+ experiments[experiments["status"].isin(["queued", "running"])]
+ )
+ if num_running_queued > 0 or (
+ cfg.training.lora and cfg.architecture.backbone_dtype in ("int4", "int8")
+ ):
+ logger.info("Preparing model on CPU. This might slow down the progress.")
+ device = "cpu"
+ else:
+ device = q.client["gpu_used_for_download"]
+ logger.info(
+ f"Preparing model on {device}. In case of issues or OOM consider "
+ "changing the default device for downloading in settings."
+ )
+ with set_env(HF_TOKEN=q.client["default_huggingface_api_token"]):
+ cfg, model, tokenizer = load_cfg_model_tokenizer(
+ experiment_path, merge=True, device=device
+ )
+
+ model = unwrap_model(model)
+ checkpoint_path = cfg.output_directory
+
+ model_save_time = time.time()
+ model.backbone.save_pretrained(checkpoint_path)
+ # See PreTrainedTokenizerBase.save_pretrained for documentation
+ # Safeguard against None return if tokenizer class is
+ # not inherited from PreTrainedTokenizerBase
+ if cfg.problem_type in GENERATION_PROBLEM_TYPES:
+ tokenizer.chat_template = get_chat_template(cfg)
+ tokenizer_files = list(tokenizer.save_pretrained(checkpoint_path) or [])
+
+ card = get_model_card(cfg, model, repo_id="")
+ card.save(os.path.join(experiment_path, "model_card.md"))
+
+ logger.info(f"Creating Zip File at {zip_path}")
+ zf = zipfile.ZipFile(zip_path, "w")
+
+ FILES_TO_PUSH = [
+ "vocab.json",
+ "sentencepiece.bpe.model",
+ "bpe_encoder.bin",
+ "tokenizer_config.json",
+ "tokenizer.json",
+ "special_tokens_map.json",
+ "merges.txt",
+ "generation_config.json",
+ "config.json",
+ "added_tokens.json",
+ "model_card.md",
+ "classification_head.pth",
+ "regression_head.pth",
+ ]
+ FILES_TO_PUSH = set(
+ FILES_TO_PUSH
+ + [os.path.split(tokenizer_file)[-1] for tokenizer_file in tokenizer_files]
+ )
+
+ # Add tokenizer and config.json files, as well as potential classification head
+ paths_added = []
+ for file in FILES_TO_PUSH:
+ path = os.path.join(experiment_path, file)
+ if os.path.isfile(path):
+ paths_added.append(path)
+ add_file_to_zip(zf=zf, path=path)
+
+ # Add model weight files. save_pretrained() does not return the saved files
+ weight_paths = glob.glob(os.path.join(checkpoint_path, "pytorch_model*.*"))
+ for path in weight_paths:
+ paths_added.append(path)
+ add_file_to_zip(zf=zf, path=path)
+
+ # Add all files that were created after the model was saved.
+ # This is useful for potential changes/different
+ # naming conventions across different backbones.
+ # Also adds newly generated safetensor files.
+ for file in os.listdir(checkpoint_path):
+ file_path = os.path.join(checkpoint_path, file)
+ if (
+ os.path.getmtime(file_path) > model_save_time
+ and file_path not in paths_added
+ and file_path != zip_path
+ ):
+ add_file_to_zip(zf=zf, path=file_path)
+ paths_added.append(file_path)
+ logger.info(
+ f"Added {file_path} to zip file as it "
+ "was created when saving the model state."
+ )
+
+ # Add all files from subdirectories, which include the intermediate checkpoints
+ subdirectories = [
+ d
+ for d in os.listdir(checkpoint_path)
+ if os.path.isdir(os.path.join(checkpoint_path, d))
+ ]
+ for subdirectory in subdirectories:
+ for file in os.listdir(os.path.join(checkpoint_path, subdirectory)):
+ file_path = os.path.join(checkpoint_path, subdirectory, file)
+ add_file_to_zip(zf=zf, path=file_path, folder=subdirectory)
+ paths_added.append(file_path)
+ logger.info(f"Added {file_path} to zip file.")
+ zf.close()
+
+ download_url = get_download_link(q, zip_path)
+ logger.info(f"Logs URL: {download_url}")
+
+ q.page["meta"].script = ui.inline_script(
+ f'window.open("{download_url}", "_blank");'
+ )
+ await q.page.save()
+
+
+async def experiment_download_adapter(q: Q):
+ experiment = q.client["experiment/display/experiment"]
+ experiment_path = q.client["experiment/display/experiment_path"]
+
+ zip_path = get_adapter_model_path(experiment.name, experiment_path)
+
+ if not os.path.exists(zip_path):
+ logger.info(f"Creating {zip_path} on demand")
+
+ logger.info(f"Creating Zip File at {zip_path}")
+ zf = zipfile.ZipFile(zip_path, "w")
+
+ FILES_TO_PUSH = [
+ "adapter_model/adapter_config.json",
+ "adapter_model/adapter_model.safetensors",
+ "adapter_model/README.md",
+ ]
+
+ paths_added = []
+ for file in FILES_TO_PUSH:
+ path = os.path.join(experiment_path, file)
+ if os.path.isfile(path):
+ paths_added.append(path)
+ add_file_to_zip(zf=zf, path=path)
+
+ zf.close()
+
+ download_url = get_download_link(q, zip_path)
+ logger.info(f"Logs URL: {download_url}")
+
+ q.page["meta"].script = ui.inline_script(
+ f'window.open("{download_url}", "_blank");'
+ )
+ await q.page.save()
+
+
+async def experiment_push_to_huggingface_dialog(q: Q, error: str = ""):
+ if q.args["experiment/display/push_to_huggingface"] or error:
+ devices = ["cpu", "cpu_shard"] + [
+ f"cuda:{idx}" for idx in range(torch.cuda.device_count())
+ ]
+ default_device = "cuda:0"
+
+ experiments = get_experiments(q)
+ num_running_queued = len(
+ experiments[experiments["status"].isin(["queued", "running"])]
+ )
+ experiment_path = q.client["experiment/display/experiment_path"]
+ cfg = load_config_yaml(os.path.join(experiment_path, "cfg.yaml"))
+ if num_running_queued > 0 or cfg.environment.use_deepspeed:
+ default_device = "cpu"
+
+ try:
+ huggingface_hub.login(q.client["default_huggingface_api_token"])
+ user_id = huggingface_hub.whoami()["name"]
+ except Exception:
+ user_id = ""
+
+ dialog_items = [
+ ui.message_bar("error", error, visible=True if error else False),
+ ui.textbox(
+ name="experiment/display/push_to_huggingface/account_name",
+ label="Account Name",
+ value=user_id,
+ width="500px",
+ required=False,
+ tooltip=(
+ "The account name on HF to push the model to. "
+ "Leaving it empty will push it to the default user account."
+ ),
+ ),
+ ui.textbox(
+ name="experiment/display/push_to_huggingface/model_name",
+ label="Model Name",
+ value=hf_repo_friendly_name(
+ q.client["experiment/display/experiment"].name
+ ),
+ width="500px",
+ required=True,
+ tooltip="The name of the model as shown on HF.",
+ ),
+ ui.dropdown(
+ name="experiment/display/push_to_huggingface/device",
+ label="Device for preparing the model",
+ required=True,
+ value=default_device,
+ width="500px",
+ choices=[ui.choice(str(d), str(d)) for d in devices],
+ tooltip=(
+ "The local device to prepare the model before pushing it to HF. "
+ "CPU will never load the weights to the GPU, which can be useful "
+ "for large models, but will be significantly slower. "
+ "Cpu_shard will first load on CPU and then shard on all GPUs "
+ "before pushing to HF."
+ ),
+ ),
+ ui.textbox(
+ name="experiment/display/push_to_huggingface/api_key",
+ label="Hugging Face API Key",
+ value=q.client["default_huggingface_api_token"],
+ width="500px",
+ password=True,
+ required=True,
+ tooltip="HF API key, needs write access.",
+ ),
+ ui.toggle(
+ name="default_safe_serialization",
+ label="Use Hugging Face safetensors for safe serialization",
+ value=q.client["default_safe_serialization"],
+ ),
+ ui.buttons(
+ [
+ ui.button(
+ name="experiment/display/push_to_huggingface_submit",
+ label="Export",
+ primary=True,
+ ),
+ ui.button(name="cancel", label="Cancel", primary=False),
+ ]
+ ),
+ ]
+ elif q.args["experiment/display/push_to_huggingface_submit"]:
+ await busy_dialog(
+ q=q,
+ title="Exporting to HuggingFace",
+ text="Model size can affect the export time significantly.",
+ )
+
+ experiment_path = q.client["experiment/display/experiment_path"]
+ device = q.client["experiment/display/push_to_huggingface/device"]
+ api_key = q.client["experiment/display/push_to_huggingface/api_key"]
+ user_id = q.client["experiment/display/push_to_huggingface/account_name"]
+ safe_serialization = q.client["default_safe_serialization"]
+ model_name = q.client[
+ "experiment/display/push_to_huggingface/model_name"
+ ].replace(".", "-")
+
+ publish_model_to_hugging_face(
+ path_to_experiment=experiment_path,
+ device=device,
+ api_key=api_key,
+ user_id=user_id,
+ model_name=model_name,
+ safe_serialization=safe_serialization,
+ hf_transfer=q.client["default_hf_hub_enable_hf_transfer"],
+ )
+
+ dialog_items = [
+ ui.message_bar("success", "Success"),
+ ui.buttons(
+ [
+ ui.button(name="ok", label="OK", primary=True),
+ ]
+ ),
+ ]
+
+ dialog = ui.dialog(
+ title="Push to HuggingFace Hub",
+ items=dialog_items,
+ closable=True,
+ name="push_to_huggingface_dialog",
+ )
+
+ q.page["meta"].dialog = dialog
+ q.client["keep_meta"] = True
+
+
+def get_experiment_summary_code_card(cfg) -> str:
+ repo_id: Optional[str] = None
+ hf_yaml_path = f"{cfg.output_directory}/hf.yaml"
+
+ with open(
+ os.path.join("model_cards", cfg.environment._summary_card_template), "r"
+ ) as f:
+ text = f.read()
+
+ if os.path.exists(hf_yaml_path):
+ with open(hf_yaml_path, "r") as fp:
+ repo_id = yaml.load(fp, Loader=yaml.FullLoader)["repo_id"]
+
+ if repo_id is None:
+ repo_id = "account/model"
+
+ # Model repo
+ text = text.replace("{{repo_id}}", repo_id)
+
+ # Versions
+ text = text.replace("{{transformers_version}}", transformers.__version__)
+ text = text.replace("{{einops_version}}", einops.__version__)
+ text = text.replace("{{accelerate_version}}", accelerate.__version__)
+ text = text.replace("{{torch_version}}", torch.__version__)
+
+ # Configs
+ text = text.replace("{{text_prompt_start}}", str(cfg.dataset.text_prompt_start))
+ text = text.replace(
+ "{{text_answer_separator}}", str(cfg.dataset.text_answer_separator)
+ )
+ text = text.replace(
+ "{{end_of_sentence}}",
+ (
+ str(cfg.tokenizer._tokenizer_eos_token)
+ if cfg.dataset.add_eos_token_to_prompt
+ else ""
+ ),
+ )
+
+ text = text.replace("{{trust_remote_code}}", str(cfg.environment.trust_remote_code))
+
+ if cfg.problem_type not in NON_GENERATION_PROBLEM_TYPES:
+ text = text.replace(
+ "{{min_new_tokens}}", str(cfg.prediction.min_length_inference)
+ )
+ text = text.replace(
+ "{{max_new_tokens}}", str(cfg.prediction.max_length_inference)
+ )
+ text = text.replace("{{do_sample}}", str(cfg.prediction.do_sample))
+ text = text.replace("{{num_beams}}", str(cfg.prediction.num_beams))
+ text = text.replace("{{temperature}}", str(cfg.prediction.temperature))
+ text = text.replace(
+ "{{repetition_penalty}}", str(cfg.prediction.repetition_penalty)
+ )
+
+ if cfg.dataset.system_column != "None":
+ text = text.replace(
+ "{{sample_messages}}",
+ """[
+ {
+ "role": "system",
+ "content": "You are a friendly and polite chatbot.",
+ },
+ {"role": "user", "content": "Hi, how are you?"},
+ {"role": "assistant", "content": "I'm doing great, how about you?"},
+ {"role": "user", "content": "Why is drinking water so healthy?"},
+]""",
+ )
+ else:
+ text = text.replace(
+ "{{sample_messages}}",
+ """[
+ {"role": "user", "content": "Hi, how are you?"},
+ {"role": "assistant", "content": "I'm doing great, how about you?"},
+ {"role": "user", "content": "Why is drinking water so healthy?"},
+]""",
+ )
+
+ return text
diff --git a/llm_studio/app_utils/sections/histogram_card.py b/llm_studio/app_utils/sections/histogram_card.py
new file mode 100644
index 0000000000000000000000000000000000000000..e76eb796c3018c0ba0209d5ad29779a2a0d692f7
--- /dev/null
+++ b/llm_studio/app_utils/sections/histogram_card.py
@@ -0,0 +1,97 @@
+from typing import List
+
+import pandas as pd
+from h2o_wave import data, ui
+
+
+def histogram_card(
+ x,
+ a=0.1,
+ b=0.9,
+ x_axis_description="text_length",
+ histogram_box="first",
+ title="Text Length (split by whitespace)",
+):
+ assert " " not in x_axis_description, (
+ "x_axis_description in histogram card must not contain spaces, "
+ "as the card would not be rendered."
+ )
+ df_quantile = compute_quantile_df(x, a, b)
+ df_quantile = df_quantile.rename(columns={"length": x_axis_description})
+ card = ui.plot_card(
+ box=histogram_box,
+ title=title,
+ data=data(
+ fields=df_quantile.columns.tolist(),
+ rows=df_quantile.values.tolist(),
+ pack=True,
+ ),
+ plot=ui.plot(
+ marks=[
+ ui.mark(
+ type="area",
+ x=f"={x_axis_description}",
+ x_title=f"Total samples: {len(x)}",
+ y="=count",
+ y_title="Count",
+ color="=data_type",
+ shape="circle",
+ )
+ ]
+ ),
+ )
+ return card
+
+
+def compute_quantile_df(x: List[int], a: float, b: float):
+ """
+ Compute the quantiles based on the input list x.
+
+ Returns a dataframe with the following columns:
+ - length: length of the text
+ - count: number of texts with this length
+ - data_type: quantile type
+ (first (a * 100)% quantile, (a * 100)%-(100 * (1 - b))% quantile,
+ last (100 * (1 - b))% quantile)
+
+ Note that quantiles are overlapping on the edges.
+ """
+ if not x:
+ raise ValueError("Input list x is empty")
+
+ if not 0.05 <= a <= b <= 0.95:
+ raise ValueError(
+ "Values of a and b must be in [0.05, 0.95] "
+ "and a should be less than or equal to b"
+ )
+
+ x_axis_description = "length"
+ df = pd.DataFrame(x, columns=[x_axis_description])
+ df["count"] = 1
+ df_quantile = (
+ df.groupby([x_axis_description])
+ .sum()
+ .reset_index()
+ .sort_values(by=x_axis_description)[[x_axis_description, "count"]]
+ )
+ sorted_data = sorted(x)
+ first_quantile = sorted_data[int(len(sorted_data) * a)]
+ last_quantile = sorted_data[-int(len(sorted_data) * (1 - b))]
+
+ df_first = df_quantile.loc[df_quantile[x_axis_description] <= first_quantile].copy()
+ df_first["data_type"] = f"first {int(a * 100)}% quantile"
+ df_last = df_quantile.loc[df_quantile[x_axis_description] >= last_quantile].copy()
+ df_last["data_type"] = f"last {100 - int(b * 100)}% quantile"
+ df_quantile["data_type"] = f"{int(a * 100)}%-{int(b * 100)}% quantile"
+ middle_quantile_min = max(0, len(df_first) - 1)
+ middle_quantile_max = (
+ min(len(df_quantile), (len(df_quantile) - len(df_last) - 1)) + 1
+ )
+ df_quantile = pd.concat(
+ [
+ df_first,
+ df_quantile.loc[middle_quantile_min:middle_quantile_max],
+ df_last,
+ ]
+ )
+ return df_quantile
diff --git a/llm_studio/app_utils/sections/home.py b/llm_studio/app_utils/sections/home.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2db434c8e883318b719f52bb18ad018f2535d61
--- /dev/null
+++ b/llm_studio/app_utils/sections/home.py
@@ -0,0 +1,177 @@
+import psutil
+import torch
+from h2o_wave import Q, data, ui
+
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.app_utils.sections.common import clean_dashboard
+from llm_studio.app_utils.utils import (
+ get_datasets,
+ get_experiments,
+ get_gpu_usage,
+ get_single_gpu_usage,
+)
+from llm_studio.app_utils.wave_utils import ui_table_from_df, wave_theme
+from llm_studio.src.utils.export_utils import get_size_str
+
+
+async def home(q: Q) -> None:
+ await clean_dashboard(q, mode="home")
+ q.client["nav/active"] = "home"
+
+ experiments = get_experiments(q)
+ hdd = psutil.disk_usage(default_cfg.llm_studio_workdir)
+
+ q.page["home/disk_usage"] = ui.tall_gauge_stat_card(
+ box=ui.box("content", order=2, width="20%" if len(experiments) > 0 else "30%"),
+ title="Disk usage",
+ value=f"{hdd.percent:.2f} %",
+ aux_value=f"{get_size_str(hdd.used, sig_figs=1)} /\
+ {get_size_str(hdd.total, sig_figs=1)}",
+ plot_color=wave_theme.get_primary_color(q),
+ progress=hdd.percent / 100,
+ )
+
+ if len(experiments) > 0:
+ num_finished = len(experiments[experiments["status"] == "finished"])
+ num_running_queued = len(
+ experiments[experiments["status"].isin(["queued", "running"])]
+ )
+ num_failed_stopped = len(
+ experiments[experiments["status"].isin(["failed", "stopped"])]
+ )
+
+ q.page["home/experiments_stats"] = ui.form_card(
+ box=ui.box("content", order=1, width="40%"),
+ title="Experiments",
+ items=[
+ ui.visualization(
+ plot=ui.plot(
+ [ui.mark(type="interval", x="=status", y="=count", y_min=0)]
+ ),
+ data=data(
+ fields="status count",
+ rows=[
+ ("finished", num_finished),
+ ("queued + running", num_running_queued),
+ ("failed + stopped", num_failed_stopped),
+ ],
+ pack=True,
+ ),
+ )
+ ],
+ )
+
+ stats = []
+ if torch.cuda.is_available():
+ stats.append(ui.stat(label="Current GPU load", value=f"{get_gpu_usage():.1f}%"))
+ stats += [
+ ui.stat(label="Current CPU load", value=f"{psutil.cpu_percent()}%"),
+ ui.stat(
+ label="Memory usage",
+ value=f"{get_size_str(psutil.virtual_memory().used, sig_figs=1)} /\
+ {get_size_str(psutil.virtual_memory().total, sig_figs=1)}",
+ ),
+ ]
+
+ q.page["home/compute_stats"] = ui.tall_stats_card(
+ box=ui.box("content", order=1, width="40%" if len(experiments) > 0 else "70%"),
+ items=stats,
+ )
+
+ if torch.cuda.is_available():
+ q.page["home/gpu_stats"] = ui.form_card(
+ box=ui.box("expander", width="100%"),
+ items=[
+ ui.expander(
+ name="expander",
+ label="Detailed GPU stats",
+ items=get_single_gpu_usage(
+ highlight=wave_theme.get_primary_color(q)
+ ),
+ expanded=True,
+ )
+ ],
+ )
+ q.client.delete_cards.add("home/gpu_stats")
+
+ q.client.delete_cards.add("home/compute_stats")
+ q.client.delete_cards.add("home/disk_usage")
+ q.client.delete_cards.add("home/experiments_stats")
+
+ q.client["dataset/list/df_datasets"] = get_datasets(q)
+ df_viz = q.client["dataset/list/df_datasets"].copy()
+ df_viz = df_viz[df_viz.columns.intersection(["name", "problem type"])]
+
+ if torch.cuda.is_available():
+ table_height = "max(calc(100vh - 660px), 400px)"
+ else:
+ table_height = "max(calc(100vh - 550px), 400px)"
+
+ q.page["dataset/list"] = ui.form_card(
+ box="datasets",
+ items=[
+ ui.inline(
+ [
+ ui.button(
+ name="dataset/list", icon="Database", label="", primary=True
+ ),
+ ui.label("List of Datasets"),
+ ]
+ ),
+ ui_table_from_df(
+ q=q,
+ df=df_viz,
+ name="dataset/list/table",
+ sortables=[],
+ searchables=[],
+ min_widths={"name": "240", "problem type": "130"},
+ link_col="name",
+ height=table_height,
+ ),
+ ],
+ )
+ q.client.delete_cards.add("dataset/list")
+
+ q.client["experiment/list/df_experiments"] = get_experiments(q, status="finished")
+
+ df_viz = q.client["experiment/list/df_experiments"].copy()
+ df_viz = df_viz.rename(columns={"process_id": "pid", "config_file": "problem type"})
+ df_viz = df_viz[
+ df_viz.columns.intersection(
+ ["name", "dataset", "problem type", "metric", "val metric"]
+ )
+ ]
+
+ q.page["experiment/list"] = ui.form_card(
+ box="experiments",
+ items=[
+ ui.inline(
+ [
+ ui.button(
+ name="experiment/list",
+ icon="FlameSolid",
+ label="",
+ primary=True,
+ ),
+ ui.label("List of Experiments"),
+ ]
+ ),
+ ui_table_from_df(
+ q=q,
+ df=df_viz,
+ name="experiment/list/table",
+ sortables=["val metric"],
+ numerics=["val metric"],
+ min_widths={
+ # "id": "50",
+ "name": "115",
+ "dataset": "100",
+ "problem type": "120",
+ "metric": "70",
+ "val metric": "85",
+ },
+ link_col="name",
+ height=table_height,
+ ),
+ ],
+ )
diff --git a/llm_studio/app_utils/sections/project.py b/llm_studio/app_utils/sections/project.py
new file mode 100644
index 0000000000000000000000000000000000000000..a86083b44ea07fc98fc61b5574d363c5daa735e4
--- /dev/null
+++ b/llm_studio/app_utils/sections/project.py
@@ -0,0 +1,125 @@
+import logging
+import os
+
+from h2o_wave import Q, ui
+
+from llm_studio.app_utils.sections.experiment import (
+ experiment_compare,
+ experiment_list,
+ experiment_rename_action,
+ experiment_rename_form,
+ get_table_and_message_item_indices,
+)
+from llm_studio.app_utils.utils import get_experiments_status
+
+logger = logging.getLogger(__name__)
+
+
+async def experiment_rename_action_workflow(q: Q):
+ if q.args["experiment/rename/name"]:
+ q.client["experiment/rename/name"] = q.args["experiment/rename/name"]
+
+ new_name = q.client["experiment/rename/name"]
+ if new_name and new_name.strip():
+ current_id = q.client["experiment/rename/id"]
+ experiment = q.client.app_db.get_experiment(current_id)
+ new_path = experiment.path.replace(experiment.name, new_name)
+
+ experiment_df = q.client.app_db.get_experiments_df()
+ experiment_df["status"], experiment_df["info"] = get_experiments_status(
+ experiment_df
+ )
+ status = experiment_df.set_index("id").loc[experiment.id, "status"]
+
+ if os.path.exists(new_path):
+ error = f"Experiment {new_name} already exists."
+ await experiment_rename_form(q, error=error)
+ elif status in ["running", "queued"]:
+ error = "Cannot rename running or queued experiments."
+ await experiment_rename_form(q, error=error)
+ else:
+ await experiment_rename_action(q, experiment, new_name)
+ await list_current_experiments(q)
+ else:
+ await experiment_rename_form(q, error="New name must be non-empty")
+
+
+async def list_current_experiments(q, allowed_statuses=None, actions=True, reset=True):
+ await experiment_list(
+ q,
+ allowed_statuses=allowed_statuses,
+ reset=reset,
+ actions=actions,
+ )
+
+ if not reset: # in case of abort button disable multi-select
+ table_item_idx, message_item_idx = get_table_and_message_item_indices(q)
+ q.page["experiment/list"].items[table_item_idx].table.multiple = False
+
+
+async def current_experiment_list_stop(q: Q) -> None:
+ """Allow to select experiments to stop."""
+
+ table_item_idx, message_item_idx = get_table_and_message_item_indices(q)
+ stop_label = "Stop experiments"
+
+ q.page["experiment/list"].items[table_item_idx].table.multiple = True
+ q.page["dataset/display/footer"].items = [
+ ui.inline(
+ items=[
+ ui.button(name="experiment/stop", label=stop_label, primary=True),
+ ui.button(name="experiment/list/current/noreset", label="Abort"),
+ ]
+ )
+ ]
+
+
+async def current_experiment_list_delete(q: Q) -> None:
+ """Allow to select experiments to delete."""
+
+ table_item_idx, message_item_idx = get_table_and_message_item_indices(q)
+ delete_label = "Delete experiments"
+
+ q.page["experiment/list"].items[table_item_idx].table.multiple = True
+ q.page["dataset/display/footer"].items = [
+ ui.inline(
+ items=[
+ ui.button(
+ name="experiment/delete/dialog", label=delete_label, primary=True
+ ),
+ ui.button(name="experiment/list/current/noreset", label="Abort"),
+ ]
+ )
+ ]
+
+
+async def current_experiment_list_compare(q: Q) -> None:
+ """Allow to select previous experiment to start new one."""
+
+ table_item_idx, message_item_idx = get_table_and_message_item_indices(q)
+ q.page["experiment/list"].items[table_item_idx].table.multiple = True
+ q.page["dataset/display/footer"].items = [
+ ui.inline(
+ items=[
+ ui.button(
+ name="experiment/compare",
+ label="Compare experiments",
+ primary=True,
+ ),
+ ui.button(name="experiment/list/current/noreset", label="Abort"),
+ ]
+ )
+ ]
+
+
+async def current_experiment_compare(q: Q) -> None:
+ selected_rows = q.args["experiment/list/table"]
+ if selected_rows:
+ q.client["experiment/compare/selected"] = selected_rows
+ elif q.client["experiment/compare/selected"]:
+ selected_rows = q.client["experiment/compare/selected"]
+ else:
+ await list_current_experiments(q)
+ return
+
+ await experiment_compare(q, selected_rows)
diff --git a/llm_studio/app_utils/sections/settings.py b/llm_studio/app_utils/sections/settings.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b142b1e81528516a4c289164f8b314c1631e2eb
--- /dev/null
+++ b/llm_studio/app_utils/sections/settings.py
@@ -0,0 +1,624 @@
+import multiprocessing
+
+import torch
+from h2o_wave import Q, ui
+
+from llm_studio.app_utils.sections.common import clean_dashboard
+from llm_studio.app_utils.setting_utils import Secrets
+from llm_studio.src.loggers import ExternalLoggers
+
+
+async def settings(q: Q) -> None:
+ await clean_dashboard(q, mode="full")
+ q.client["nav/active"] = "settings"
+
+ label_width = "280px"
+ textbox_width = "350px"
+
+ q.page["settings/content"] = ui.form_card(
+ box="content",
+ items=[
+ ui.message_bar(
+ type="info",
+ text="Setting changes are directly applied for the \
+ current session and can be made persistent by using the \
+ ***Save settings persistently*** button below. To reload \
+ the persistently saved settings, use the ***Load settings*** button.",
+ ),
+ ui.separator("Credential Storage"),
+ ui.inline(
+ items=[
+ ui.label("Credential Handler", width=label_width),
+ ui.dropdown(
+ name="credential_saver",
+ value=q.client["credential_saver"],
+ choices=[ui.choice(name, name) for name in Secrets.names()],
+ trigger=False,
+ width="300px",
+ ),
+ ]
+ ),
+ ui.message_bar(
+ type="info",
+ text="""Method used to save credentials (passwords) \
+ for ***Save settings persistently***. \
+ The recommended approach for saving credentials (passwords) is to \
+ use either Keyring or to avoid permanent storage \
+ (requiring re-entry upon app restart). \
+ Keyring will be disabled if it is not set up on the host machine. \
+ Only resort to local .env if your machine's \
+ accessibility is restricted to you.\n\
+ When you select ***Save settings persistently***, \
+ credentials will be removed from all non-selected methods. \
+ ***Restore Default Settings*** will clear credentials from all methods.
+ """,
+ ),
+ ui.separator("Appearance"),
+ ui.inline(
+ items=[
+ ui.label("Delete Dialogs", width=label_width),
+ ui.toggle(
+ name="delete_dialogs",
+ value=q.client["delete_dialogs"],
+ trigger=False,
+ tooltip=(
+ "Whether to show delete dialogs before deleting "
+ "datasets or experiments."
+ ),
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Maximum Chart Points", width=label_width),
+ ui.spinbox(
+ name="chart_plot_max_points",
+ label=None,
+ min=1,
+ max=10000,
+ step=1000,
+ value=q.client["chart_plot_max_points"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the maximum number of points shown in the "
+ "experiment chart plots. Plots will be sub-sampled if "
+ "needed.",
+ ),
+ ]
+ ),
+ ui.separator("Default Connector Settings"),
+ ui.inline(
+ items=[
+ ui.label("AWS S3 bucket name", width=label_width),
+ ui.textbox(
+ name="default_aws_bucket_name",
+ label=None,
+ value=q.client["default_aws_bucket_name"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the value for the AWS bucket for \
+ dataset import. S3 bucket name including relative paths.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("AWS access key", width=label_width),
+ ui.textbox(
+ name="default_aws_access_key",
+ label=None,
+ value=q.client["default_aws_access_key"],
+ width=textbox_width,
+ password=True,
+ trigger=False,
+ tooltip="Set the value for the AWS access key \
+ for dataset import.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("AWS secret key", width=label_width),
+ ui.textbox(
+ name="default_aws_secret_key",
+ label=None,
+ value=q.client["default_aws_secret_key"],
+ width=textbox_width,
+ password=True,
+ trigger=False,
+ tooltip="Set the value for the AWS secret key \
+ for dataset import.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Azure Datalake connection string", width=label_width),
+ ui.textbox(
+ name="default_azure_conn_string",
+ label=None,
+ value=q.client["default_azure_conn_string"],
+ width=textbox_width,
+ password=True,
+ trigger=False,
+ tooltip="Set the value for the Azure Datalake \
+ connection string for dataset import.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Azure Datalake container name", width=label_width),
+ ui.textbox(
+ name="default_azure_container",
+ label=None,
+ value=q.client["default_azure_container"],
+ width=textbox_width,
+ password=False,
+ trigger=False,
+ tooltip="Set the value for the Azure Datalake \
+ container name for dataset import.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Kaggle username", width=label_width),
+ ui.textbox(
+ name="default_kaggle_username",
+ label=None,
+ value=q.client["default_kaggle_username"],
+ width=textbox_width,
+ password=False,
+ trigger=False,
+ tooltip="Set the value for the Kaggle username \
+ for dataset import.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Kaggle secret key", width=label_width),
+ ui.textbox(
+ name="default_kaggle_secret_key",
+ label=None,
+ value=q.client["default_kaggle_secret_key"],
+ width=textbox_width,
+ password=True,
+ trigger=False,
+ tooltip="Set the value for the Kaggle secret key \
+ for dataset import.",
+ ),
+ ]
+ ),
+ ui.separator("Default Experiment Settings"),
+ ui.inline(
+ items=[
+ ui.label("Number of Workers", width=label_width),
+ ui.spinbox(
+ name="default_number_of_workers",
+ label=None,
+ min=1,
+ max=multiprocessing.cpu_count(),
+ step=1,
+ value=q.client["default_number_of_workers"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the value for the number of workers \
+ sliders in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Logger", width=label_width),
+ ui.dropdown(
+ name="default_logger",
+ value=q.client["default_logger"],
+ choices=[
+ ui.choice(name, name) for name in ExternalLoggers.names()
+ ],
+ trigger=False,
+ width="100px",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Neptune Project", width=label_width),
+ ui.textbox(
+ name="default_neptune_project",
+ label=None,
+ value=q.client["default_neptune_project"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the value for the neptune project \
+ in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Neptune API Token", width=label_width),
+ ui.textbox(
+ name="default_neptune_api_token",
+ label=None,
+ value=q.client["default_neptune_api_token"],
+ width=textbox_width,
+ password=True,
+ trigger=False,
+ tooltip="Set the value for the Neptune API token \
+ in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Wandb Project", width=label_width),
+ ui.textbox(
+ name="default_wandb_project",
+ label=None,
+ value=q.client["default_wandb_project"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the value for the Wandb project \
+ in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Wandb Entity", width=label_width),
+ ui.textbox(
+ name="default_wandb_entity",
+ label=None,
+ value=q.client["default_wandb_entity"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the value for the Wandb entity \
+ in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Wandb API Token", width=label_width),
+ ui.textbox(
+ name="default_wandb_api_token",
+ label=None,
+ value=q.client["default_wandb_api_token"],
+ width=textbox_width,
+ password=True,
+ trigger=False,
+ tooltip="Set the value for the Wandb API token \
+ in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Hugging Face API Token", width=label_width),
+ ui.textbox(
+ name="default_huggingface_api_token",
+ label=None,
+ value=q.client["default_huggingface_api_token"],
+ width=textbox_width,
+ password=True,
+ trigger=False,
+ tooltip="Set the value for the Hugging Face API token \
+ in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Hugging Face Hub Enable HF Transfer", width=label_width),
+ ui.toggle(
+ name="default_hf_hub_enable_hf_transfer",
+ value=(
+ True
+ if q.client["default_hf_hub_enable_hf_transfer"]
+ else False
+ ),
+ tooltip=(
+ "Toggle to enable \
+ HF Transfer for faster \
+ downloads. Toggle, if you are experiencing issues on down-\
+ or upload. EXPERIMENTAL."
+ ),
+ trigger=False,
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("OpenAI API Token", width=label_width),
+ ui.textbox(
+ name="default_openai_api_token",
+ label=None,
+ value=q.client["default_openai_api_token"],
+ width=textbox_width,
+ password=True,
+ trigger=False,
+ tooltip="Set the value for the OpenAI API token \
+ in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("GPT evaluation max samples", width=label_width),
+ ui.spinbox(
+ name="default_gpt_eval_max",
+ label=None,
+ value=q.client["default_gpt_eval_max"],
+ width=textbox_width,
+ min=1,
+ max=10000,
+ step=1,
+ trigger=False,
+ tooltip="Set the maximum samples for GPT evaluation. \
+ This is used to prevent unexpected high API costs. \
+ Increase at your own risk.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Use OpenAI API on Azure", width=label_width),
+ ui.toggle(
+ name="default_openai_azure",
+ value=q.client["default_openai_azure"],
+ tooltip=(
+ "Toggle to use Microsoft Azure Endpoints for the "
+ "OpenAI API."
+ ),
+ trigger=True,
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("OpenAI API Endpoint", width=label_width),
+ ui.textbox(
+ name="default_openai_api_base",
+ label=None,
+ value=q.client["default_openai_api_base"],
+ width=textbox_width,
+ password=False,
+ trigger=False,
+ tooltip=(
+ "Set the value for the OpenAI API endpoint. "
+ "Use when on Azure."
+ ),
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("OpenAI API Deployment ID", width=label_width),
+ ui.textbox(
+ name="default_openai_api_deployment_id",
+ label=None,
+ value=q.client["default_openai_api_deployment_id"],
+ width=textbox_width,
+ password=False,
+ trigger=False,
+ tooltip=(
+ "Set the value for the OpenAI API deployment ID. "
+ "Use when on Azure."
+ ),
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("OpenAI API version", width=label_width),
+ ui.textbox(
+ name="default_openai_api_version",
+ label=None,
+ value=q.client["default_openai_api_version"],
+ width=textbox_width,
+ password=False,
+ trigger=False,
+ tooltip=(
+ "Set the value for the OpenAI API version. "
+ "Use when on Azure."
+ ),
+ ),
+ ]
+ ),
+ ui.separator("Experiment Maximum Settings"),
+ ui.inline(
+ items=[
+ ui.label("Number of Epochs", width=label_width),
+ ui.spinbox(
+ name="set_max_epochs",
+ label=None,
+ min=1,
+ max=2000,
+ step=1,
+ value=q.client["set_max_epochs"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the maximum value for the epoch slider \
+ in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Batch Size", width=label_width),
+ ui.spinbox(
+ name="set_max_batch_size",
+ label=None,
+ min=1,
+ max=4096,
+ step=1,
+ value=q.client["set_max_batch_size"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the maximum value for the batch size slider \
+ in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Num Classes", width=label_width),
+ ui.spinbox(
+ name="set_max_num_classes",
+ label=None,
+ min=1,
+ max=16384,
+ step=1,
+ value=q.client["set_max_num_classes"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the maximum value for the num classes \
+ slider in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Max Length", width=label_width),
+ ui.spinbox(
+ name="set_max_max_length",
+ label=None,
+ min=1,
+ max=16384 * 8,
+ step=1,
+ value=q.client["set_max_max_length"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the maximum value for the max length \
+ slider in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("Gradient clip", width=label_width),
+ ui.spinbox(
+ name="set_max_gradient_clip",
+ label=None,
+ min=1,
+ max=16384,
+ step=1,
+ value=q.client["set_max_gradient_clip"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the maximum value for the gradient clip \
+ slider in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("LoRA R", width=label_width),
+ ui.spinbox(
+ name="set_max_lora_r",
+ label=None,
+ min=1,
+ max=16384,
+ step=1,
+ value=q.client["set_max_lora_r"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the maximum value for the LoRA R \
+ slider in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("LoRA alpha", width=label_width),
+ ui.spinbox(
+ name="set_max_lora_alpha",
+ label=None,
+ min=1,
+ max=16384,
+ step=1,
+ value=q.client["set_max_lora_alpha"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the maximum value for the LoRA Alpha \
+ slider in the experiment setup.",
+ ),
+ ]
+ ),
+ ui.separator("Default System Settings"),
+ ui.inline(
+ items=[
+ ui.label("GPU used for Download", width=label_width),
+ ui.dropdown(
+ name="gpu_used_for_download",
+ label=None,
+ value=q.client["gpu_used_for_download"],
+ width=textbox_width,
+ choices=[
+ ui.choice(str(d), str(d))
+ for d in ["cpu", "cpu_shard"]
+ + [
+ f"cuda:{idx}"
+ for idx in range(torch.cuda.device_count())
+ ]
+ ],
+ tooltip=(
+ "The local device to prepare the model before pushing "
+ "it to HF or downloading. CPU will never load the weights "
+ "to the GPU, which can be useful "
+ "for large models, but will be significantly slower. "
+ "Cpu_shard will first load on CPU and then shard "
+ "on all GPUs before pushing to HF."
+ ),
+ ),
+ ]
+ ),
+ ui.inline(
+ items=[
+ ui.label("GPU used for Chat", width=label_width),
+ ui.spinbox(
+ name="gpu_used_for_chat",
+ label=None,
+ min=1,
+ max=torch.cuda.device_count(),
+ step=1,
+ value=q.client["gpu_used_for_chat"],
+ width=textbox_width,
+ trigger=False,
+ tooltip="Set the gpu id that is used for the chat window.",
+ ),
+ ]
+ ),
+ ],
+ )
+
+ q.client.delete_cards.add("settings/content")
+
+ q.page["settings/footer"] = ui.form_card(
+ box="footer",
+ items=[
+ ui.inline(
+ items=[
+ ui.button(
+ name="save_settings",
+ label="Save settings persistently",
+ primary=True,
+ ),
+ ui.button(
+ name="load_settings", label="Load settings", primary=False
+ ),
+ ui.button(
+ name="restore_default_settings",
+ label="Restore default settings",
+ primary=False,
+ ),
+ ],
+ justify="start",
+ )
+ ],
+ )
+ q.client.delete_cards.add("settings/footer")
diff --git a/llm_studio/app_utils/setting_utils.py b/llm_studio/app_utils/setting_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc490ae1fb9ba73bb4dd3eec49ed91a7f1861b68
--- /dev/null
+++ b/llm_studio/app_utils/setting_utils.py
@@ -0,0 +1,344 @@
+import errno
+import functools
+import logging
+import os
+import pickle
+import signal
+import traceback
+from typing import Any, List
+
+import keyring
+import yaml
+from h2o_wave import Q, ui
+from keyring.errors import KeyringLocked, PasswordDeleteError
+
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.app_utils.utils import get_database_dir, get_user_id
+
+logger = logging.getLogger(__name__)
+SECRET_KEYS = [
+ key
+ for key in default_cfg.user_settings
+ if any(password in key for password in ["api", "secret", "key"])
+]
+USER_SETTING_KEYS = [key for key in default_cfg.user_settings if key not in SECRET_KEYS]
+
+
+async def save_user_settings_and_secrets(q: Q):
+ await _save_secrets(q)
+ _save_user_settings(q)
+
+
+def load_user_settings_and_secrets(q: Q):
+ _maybe_migrate_to_yaml(q)
+ _load_secrets(q)
+ _load_user_settings(q)
+
+
+def load_default_user_settings(q: Q, clear_secrets=True):
+ for key in default_cfg.user_settings:
+ q.client[key] = default_cfg.user_settings[key]
+ if clear_secrets:
+ _clear_secrets(q, key)
+
+
+class NoSaver:
+ """
+ Base class that provides methods for saving, loading, and deleting password entries.
+
+ Attributes:
+ username (str): The username associated with the password entries.
+ root_dir (str): The root directory.
+
+ Methods:
+ save(name: str, password: str) -> None:
+ Save a password entry with the given name and password.
+
+ load(name: str) -> str:
+ Load and return the password associated with the given name.
+
+ delete(name: str) -> None:
+ Delete the password entry with the given name.
+
+ """
+
+ def __init__(self, username: str, root_dir: str):
+ self.username = username
+ self.root_dir = root_dir
+
+ def save(self, name: str, password: str):
+ pass
+
+ def load(self, name: str) -> str:
+ return ""
+
+ def delete(self, name: str):
+ pass
+
+
+class KeyRingSaver(NoSaver):
+ """
+ A class for saving, loading, and deleting passwords using the keyring library.
+ Some machines may not have keyring installed, so this class may not be available.
+ """
+
+ def __init__(self, username: str, root_dir: str):
+ super().__init__(username, root_dir)
+ self.namespace = f"{username}_h2o_llmstudio"
+
+ def save(self, name: str, password: str):
+ keyring.set_password(self.namespace, name, password)
+
+ def load(self, name: str) -> str:
+ return keyring.get_password(self.namespace, name) or "" # type: ignore
+
+ def delete(self, name: str):
+ try:
+ keyring.delete_password(self.namespace, name)
+ except (KeyringLocked, PasswordDeleteError):
+ pass
+ except Exception as e:
+ logger.warning(f"Error deleting password for keyring: {e}")
+
+
+class EnvFileSaver(NoSaver):
+ """
+ This module provides the EnvFileSaver class, which is used to save, load,
+ and delete name-password pairs in an environment file.
+ Only use this class if you are sure that the environment file is secure.
+ """
+
+ @property
+ def filename(self):
+ return os.path.join(self.root_dir, f"{self.username}.env")
+
+ def save(self, name: str, password: str):
+ data = {}
+ if os.path.exists(self.filename):
+ with open(self.filename, "r") as f:
+ data = yaml.safe_load(f)
+ data[name] = password
+ with open(self.filename, "w") as f:
+ yaml.safe_dump(data, f)
+
+ def load(self, name: str) -> str:
+ if not os.path.exists(self.filename):
+ return ""
+
+ with open(self.filename, "r") as f:
+ data = yaml.safe_load(f)
+ return data.get(name, "")
+
+ def delete(self, name: str):
+ if os.path.exists(self.filename):
+ with open(self.filename, "r") as f:
+ data = yaml.safe_load(f)
+ if data and name in data:
+ del data[name]
+ with open(self.filename, "w") as f:
+ yaml.safe_dump(data, f)
+
+
+# https://stackoverflow.com/questions/2281850/timeout-function-if-it-takes-too-long-to-finish
+class TimeoutError(Exception):
+ pass
+
+
+def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):
+ def decorator(func):
+ def _handle_timeout(signum, frame):
+ raise TimeoutError(error_message)
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ signal.signal(signal.SIGALRM, _handle_timeout)
+ signal.alarm(seconds)
+ try:
+ result = func(*args, **kwargs)
+ finally:
+ signal.alarm(0)
+ return result
+
+ return wrapper
+
+ return decorator
+
+
+@timeout(3)
+def check_if_keyring_works():
+ """
+ Test if keyring is working. On misconfigured machines,
+ Keyring may hang up to 2 minutes with the following error:
+ jeepney.wrappers.DBusErrorResponse:
+ [org.freedesktop.DBus.Error.TimedOut]
+ ("Failed to activate service 'org.freedesktop.secrets':
+ timed out (service_start_timeout=120000ms)",)
+
+ To avoid waiting for 2 minutes, we kill the process after 3 seconds.
+ """
+ keyring.get_password("service", "username")
+
+
+class Secrets:
+ """
+ Factory class to get the secrets' handler.
+ """
+
+ _secrets = {
+ "Do not save credentials permanently": NoSaver,
+ ".env File": EnvFileSaver,
+ }
+ try:
+ check_if_keyring_works()
+ logger.info("Keyring is correctly configured on this machine.")
+ _secrets["Keyring"] = KeyRingSaver
+ except TimeoutError:
+ logger.warning(
+ "Error loading keyring due to timeout. Disabling keyring save option."
+ )
+ except Exception as e:
+ logger.warning(f"Error loading keyring: {e}. Disabling keyring save option.")
+
+ @classmethod
+ def names(cls) -> List[str]:
+ return sorted(cls._secrets.keys())
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ return cls._secrets.get(name)
+
+
+def _save_user_settings(q: Q):
+ user_settings = {key: q.client[key] for key in USER_SETTING_KEYS}
+ with open(_get_usersettings_path(q), "w") as f:
+ yaml.dump(user_settings, f)
+
+
+def _load_user_settings(q: Q):
+ if os.path.isfile(_get_usersettings_path(q)):
+ logger.info("Reading user settings")
+ with open(_get_usersettings_path(q), "r") as f:
+ user_settings = yaml.load(f, Loader=yaml.FullLoader)
+ for key in USER_SETTING_KEYS:
+ q.client[key] = user_settings.get(key, default_cfg.user_settings[key])
+ else:
+ logger.info("No user settings found. Using default settings.")
+ # User may have deleted the user settings file. We load the default settings.
+ # Secrets may still be stored in keyring or env file.
+ load_default_user_settings(q, clear_secrets=False)
+
+
+async def _save_secrets(q: Q):
+ secret_name, secrets_handler = _get_secrets_handler(q)
+ for key in SECRET_KEYS:
+ try:
+ _clear_secrets(q, key, excludes=tuple(secret_name))
+ if q.client[key]:
+ secrets_handler.save(key, q.client[key])
+
+ except Exception:
+ exception = str(traceback.format_exc())
+ logger.error(f"Could not save password {key} to {secret_name}")
+ q.page["meta"].dialog = ui.dialog(
+ title="Could not save secrets. "
+ "Please choose another Credential Handler.",
+ name="secrets_error",
+ items=[
+ ui.text(
+ f"The following error occurred when"
+ f" using {secret_name}: {exception}."
+ ),
+ ui.button(
+ name="settings/close_error_dialog", label="Close", primary=True
+ ),
+ ],
+ closable=True,
+ )
+ q.client["keep_meta"] = True
+ await q.page.save()
+ break
+ else: # if no exception
+ # force dataset connector updated when the user decides to click on save
+ q.client["dataset/import/s3_bucket"] = q.client["default_aws_bucket_name"]
+ q.client["dataset/import/s3_access_key"] = q.client["default_aws_access_key"]
+ q.client["dataset/import/s3_secret_key"] = q.client["default_aws_secret_key"]
+ q.client["dataset/import/kaggle_access_key"] = q.client[
+ "default_kaggle_username"
+ ]
+ q.client["dataset/import/kaggle_secret_key"] = q.client[
+ "default_kaggle_secret_key"
+ ]
+
+
+def _load_secrets(q: Q):
+ secret_name, secrets_handler = _get_secrets_handler(q)
+ for key in SECRET_KEYS:
+ try:
+ q.client[key] = secrets_handler.load(key) or default_cfg.user_settings[key]
+ except Exception:
+ logger.error(f"Could not load password {key} from {secret_name}")
+ q.client[key] = ""
+
+
+def _get_secrets_handler(q: Q):
+ secret_name = (
+ q.client["credential_saver"] or default_cfg.user_settings["credential_saver"]
+ )
+ secrets_handler = Secrets.get(secret_name)(
+ username=get_user_id(q), root_dir=get_database_dir(q)
+ )
+ return secret_name, secrets_handler
+
+
+def _clear_secrets(q: Q, name: str, excludes=tuple()):
+ for secret_name in Secrets.names():
+ if secret_name not in excludes:
+ secrets_handler = Secrets.get(secret_name)(
+ username=get_user_id(q), root_dir=get_database_dir(q)
+ )
+
+ secrets_handler.delete(name)
+
+
+def _maybe_migrate_to_yaml(q: Q):
+ """
+ Migrate user settings from a pickle file to a YAML file.
+ """
+ # prior, we used to save the user settings in a pickle file
+ old_usersettings_path = os.path.join(
+ get_database_dir(q), f"{get_user_id(q)}.settings"
+ )
+ if not os.path.isfile(old_usersettings_path):
+ return
+
+ try:
+ with open(old_usersettings_path, "rb") as f:
+ user_settings = pickle.load(f)
+
+ secret_name, secrets_handler = _get_secrets_handler(q)
+ logger.info(f"Migrating token using {secret_name}")
+ for key in SECRET_KEYS:
+ if key in user_settings:
+ secrets_handler.save(key, user_settings[key])
+
+ with open(_get_usersettings_path(q), "w") as f:
+ yaml.dump(
+ {
+ key: value
+ for key, value in user_settings.items()
+ if key in USER_SETTING_KEYS
+ },
+ f,
+ )
+ os.remove(old_usersettings_path)
+ logger.info(f"Successfully migrated tokens to {secret_name}. Old file deleted.")
+ except Exception as e:
+ logger.info(
+ f"Could not migrate tokens. "
+ f"Please delete {old_usersettings_path} and set your credentials again."
+ f"Error: \n\n {e} {traceback.format_exc()}"
+ )
+
+
+def _get_usersettings_path(q: Q):
+ return os.path.join(get_database_dir(q), f"{get_user_id(q)}.yaml")
diff --git a/llm_studio/app_utils/static/icon_300.svg b/llm_studio/app_utils/static/icon_300.svg
new file mode 100644
index 0000000000000000000000000000000000000000..c8e5b768fcadb18cca6d64cec5e2d8988f5725a3
--- /dev/null
+++ b/llm_studio/app_utils/static/icon_300.svg
@@ -0,0 +1,96 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/llm_studio/app_utils/static/llm-studio-logo-light.png b/llm_studio/app_utils/static/llm-studio-logo-light.png
new file mode 100644
index 0000000000000000000000000000000000000000..4ceea9d349adcbdd1b66f6db6a7ca5175967d1a5
Binary files /dev/null and b/llm_studio/app_utils/static/llm-studio-logo-light.png differ
diff --git a/llm_studio/app_utils/static/llm-studio-logo.png b/llm_studio/app_utils/static/llm-studio-logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..b1750d691638982a30fc01eb1780afddd88312b7
Binary files /dev/null and b/llm_studio/app_utils/static/llm-studio-logo.png differ
diff --git a/llm_studio/app_utils/utils.py b/llm_studio/app_utils/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..bda8e7503096db7a651843b239664ef695b8d083
--- /dev/null
+++ b/llm_studio/app_utils/utils.py
@@ -0,0 +1,2394 @@
+import asyncio
+import collections
+import contextlib
+import dataclasses
+import glob
+import json
+import logging
+import math
+import os
+import random
+import re
+import shutil
+import socket
+import string
+import subprocess
+import time
+import uuid
+import zipfile
+from collections import defaultdict
+from contextlib import closing
+from functools import partial
+from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Type, Union
+
+import GPUtil
+import h2o_drive
+import numpy as np
+import pandas as pd
+import psutil
+import yaml
+from azure.storage.filedatalake import DataLakeServiceClient
+from boto3.session import Session
+from botocore.handlers import disable_signing
+from datasets import load_dataset
+from h2o_wave import Choice, Q, ui
+from pandas.core.frame import DataFrame
+from sqlitedict import SqliteDict
+
+from llm_studio.app_utils.db import Experiment
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.src import possible_values
+from llm_studio.src.utils.config_utils import (
+ _get_type_annotation_error,
+ load_config_yaml,
+ parse_cfg_dataclass,
+ save_config_yaml,
+)
+from llm_studio.src.utils.data_utils import is_valid_data_frame, read_dataframe
+from llm_studio.src.utils.export_utils import get_size_str
+from llm_studio.src.utils.type_annotations import KNOWN_TYPE_ANNOTATIONS
+
+from .config import default_cfg
+
+logger = logging.getLogger(__name__)
+
+
+class GridCheckError(Exception):
+ pass
+
+
+def get_user_id(q):
+ return q.auth.subject
+
+
+def get_user_name(q):
+ return q.auth.username
+
+
+def get_data_dir(q):
+ return os.path.join(default_cfg.llm_studio_workdir, default_cfg.data_folder, "user")
+
+
+def get_database_dir(q):
+ return os.path.join(default_cfg.llm_studio_workdir, default_cfg.data_folder, "dbs")
+
+
+def get_output_dir(q):
+ return os.path.join(
+ default_cfg.llm_studio_workdir, default_cfg.output_folder, "user"
+ )
+
+
+def get_download_dir(q):
+ return os.path.join(
+ default_cfg.llm_studio_workdir, default_cfg.output_folder, "download"
+ )
+
+
+def get_user_db_path(q):
+ return os.path.join(get_database_dir(q), "user.db")
+
+
+def get_usersettings_path(q):
+ return os.path.join(get_database_dir(q), f"{get_user_id(q)}.settings")
+
+
+def find_free_port():
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
+ s.bind(("", 0))
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ return s.getsockname()[1]
+
+
+def start_process(
+ cfg: DefaultConfigProblemBase, gpu_list: List, process_queue: List, env_vars: Dict
+) -> subprocess.Popen:
+ """Starts train.py for a given configuration setting
+
+ Args:
+ cfg: DefaultConfigProblemBase config
+ gpu_list: list of GPUs to use for the training
+ process_queue: list of processes to wait for before starting the training
+ env_vars: dictionary of ENV variables to pass to the training process
+ Returns:
+ Process
+
+ """
+
+ num_gpus = len(gpu_list)
+ config_name = os.path.join(cfg.output_directory, "cfg.yaml")
+ env = {**os.environ, **env_vars}
+
+ if num_gpus == 0:
+ cmd = [
+ "python",
+ "llm_studio/train_wave.py",
+ "-Y",
+ config_name,
+ ]
+ # Do not delete for debug purposes
+ # elif num_gpus == 1:
+ # cmd = [
+ # "env",
+ # f"CUDA_VISIBLE_DEVICES={','.join(gpu_list)}",
+ # "python",
+ # "-u",
+ # "llm_studio/train_wave.py",
+ # "-P",
+ # config_name,
+ # ]
+ else:
+ free_port = find_free_port()
+ if cfg.environment.use_deepspeed:
+ logger.info("Starting deepspeed...")
+ cmd = [
+ "env",
+ "deepspeed",
+ "--include",
+ f"localhost:{','.join(gpu_list)}",
+ "--master_port",
+ f"{str(free_port)}",
+ "llm_studio/train_wave.py",
+ "-Y",
+ config_name,
+ ]
+ else:
+ logger.info("Starting torchrun...")
+ cmd = [
+ "env",
+ f"CUDA_VISIBLE_DEVICES={','.join(gpu_list)}",
+ "torchrun",
+ f"--nproc_per_node={str(num_gpus)}",
+ f"--master_port={str(free_port)}",
+ "llm_studio/train_wave.py",
+ "-Y",
+ config_name,
+ ]
+
+ if len(process_queue) > 0:
+ cmd.append("-Q")
+ cmd.append(",".join([str(x) for x in process_queue]))
+
+ p = subprocess.Popen(
+ cmd,
+ env=env,
+ )
+
+ logger.info(f"Percentage of RAM memory used: {psutil.virtual_memory().percent}")
+
+ return p
+
+
+def clean_macos_artifacts(path: str) -> None:
+ """Cleans artifacts from MacOSX zip archives
+
+ Args:
+ path: path to the unzipped directory
+ """
+
+ shutil.rmtree(os.path.join(path, "__MACOSX/"), ignore_errors=True)
+
+ for ds_store in glob.glob(os.path.join(path, "**/.DS_Store"), recursive=True):
+ try:
+ os.remove(ds_store)
+ except OSError:
+ pass
+
+
+def s3_session(aws_access_key: str, aws_secret_key: str) -> Any:
+ """Establishes s3 session
+
+ Args:
+ aws_access_key: s3 access key
+ aws_secret_key: s3 secret key
+
+ Returns:
+ Session
+
+ """
+
+ session = Session(
+ aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key
+ )
+ s3 = session.resource("s3")
+ # if no key is present, disable signing
+ if aws_access_key == "" and aws_secret_key == "":
+ s3.meta.client.meta.events.register("choose-signer.s3.*", disable_signing)
+
+ return s3
+
+
+def filter_valid_files(files) -> List[str]:
+ valid_files = [
+ file
+ for file in files
+ if any([file.endswith(ext) for ext in default_cfg.allowed_file_extensions])
+ ]
+
+ return valid_files
+
+
+def s3_file_options(
+ bucket: str, aws_access_key: str, aws_secret_key: str
+) -> List[str] | Exception:
+ """ "Returns all zip files in the target s3 bucket
+
+ Args:
+ bucket: s3 bucket name
+ aws_access_key: s3 access key
+ aws_secret_key: s3 secret key
+
+ Returns:
+ List of zip files in bucket or Exception in case of access error
+
+ """
+
+ try:
+ bucket = bucket.replace("s3://", "")
+ if bucket[-1] == os.sep:
+ bucket = bucket[:-1]
+
+ bucket_split = bucket.split(os.sep)
+ bucket = bucket_split[0]
+ s3 = s3_session(aws_access_key, aws_secret_key)
+ s3_bucket = s3.Bucket(bucket)
+
+ folder = "/".join(bucket_split[1:])
+
+ files = []
+ for s3_file in s3_bucket.objects.filter(Prefix=f"{folder}/"):
+ if s3_file.key == f"{folder}/":
+ continue
+
+ files.append(s3_file.key)
+
+ files = filter_valid_files(files)
+ return files
+
+ except Exception as e:
+ logger.warning(f"Can't load S3 datasets list: {e}")
+ return e
+
+
+def convert_file_size(size: float):
+ """Converts file size to human readable format
+
+ Args:
+ size: size in bytes
+
+ Returns:
+ size in readable format
+ """
+
+ if size == 0:
+ return "0B"
+ size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
+ i = int(math.floor(math.log(size, 1024)))
+ p = math.pow(1024, i)
+ s = round(size / p, 2)
+ return "%.2f %s" % (s, size_name[i])
+
+
+class S3Progress:
+ """Progress update for s3 downloads
+
+ Source:
+ https://stackoverflow.com/a/59843153/1281171
+
+ """
+
+ def __init__(self, q: Q, size: float) -> None:
+ """Initialize
+
+ Args:
+ q: Q
+ size: size of the file to download
+ """
+
+ self._q: Q = q
+ self._size: float = size
+ self._seen_so_far: float = 0.0
+ self._percentage: float = 0.0
+
+ def progress(self, bytes_amount: float):
+ """Update progress
+
+ Args:
+ bytes_amount: amount of bytes downloaded
+ """
+
+ self._seen_so_far += bytes_amount
+ self._percentage = (self._seen_so_far / self._size) * 100.0
+
+ async def update_ui(self):
+ """Update progress in UI"""
+
+ self._q.page["meta"].dialog = ui.dialog(
+ title="S3 file download in progress",
+ blocking=True,
+ items=[
+ ui.progress(
+ label="Please be patient...",
+ caption=(
+ f"{convert_file_size(self._seen_so_far)} of "
+ f"{convert_file_size(self._size)} "
+ f"({self._percentage:.2f}%)"
+ ),
+ value=self._percentage / 100,
+ )
+ ],
+ )
+ await self._q.page.save()
+
+ async def poll(self):
+ """Update wave ui"""
+
+ while self._percentage / 100 < 1:
+ await self.update_ui()
+ await self._q.sleep(0.1)
+ await self.update_ui()
+
+
+def s3_download_coroutine(q: Q, filename: str):
+ download_folder = f"{get_data_dir(q)}/tmp"
+ download_folder = get_valid_temp_data_folder(q, download_folder)
+
+ if os.path.exists(download_folder):
+ shutil.rmtree(download_folder)
+ os.makedirs(download_folder, exist_ok=True)
+
+ downloaded_zip = f"{download_folder}/{filename.split('/')[-1]}"
+
+ q.page["dataset/import"] = ui.form_card(box="content", items=[])
+ return downloaded_zip, download_folder
+
+
+def extract_if_zip(file, actual_path):
+ if file.endswith("zip"):
+ with zipfile.ZipFile(file, "r") as zip_ref:
+ zip_ref.extractall(actual_path)
+
+ os.remove(file)
+ clean_macos_artifacts(actual_path)
+
+
+async def s3_download(
+ q: Q, bucket, filename, aws_access_key, aws_secret_key
+) -> Tuple[str, str]:
+ """Downloads a file from s3
+
+ Args:
+ q: Q
+ bucket: s3 bucket name
+ filename: filename to download
+ aws_access_key: s3 access key
+ aws_secret_key: s3 secret key
+
+ Returns:
+ Download location path
+ """
+ bucket = bucket.replace("s3://", "")
+ if bucket[-1] == os.sep:
+ bucket = bucket[:-1]
+
+ bucket = bucket.split(os.sep)[0]
+
+ s3 = s3_session(aws_access_key, aws_secret_key)
+
+ file, s3_path = s3_download_coroutine(q, filename)
+
+ progress = S3Progress(
+ q, (s3.meta.client.head_object(Bucket=bucket, Key=filename))["ContentLength"]
+ )
+
+ poll_future = asyncio.create_task(progress.poll())
+
+ def download_file():
+ s3.Bucket(bucket).download_file(filename, file, Callback=progress.progress)
+
+ await q.run(download_file)
+ await poll_future
+
+ extract_if_zip(file, s3_path)
+
+ return s3_path, "".join(filename.split("/")[-1].split(".")[:-1])
+
+
+def azure_file_options(conn_string: str, container: str) -> List[str]:
+ """Returns all zip files in the target azure datalake container
+
+ Args:
+ conn_string: connection string
+ container: container including sub-paths
+
+ Returns:
+ - List of files in storage or empty list in case of access error
+
+ """
+
+ try:
+ service_client = DataLakeServiceClient.from_connection_string( # type: ignore
+ conn_string
+ )
+
+ container_split = container.split(os.sep)
+ container = container_split[0]
+
+ folder = "/".join(container_split[1:])
+
+ file_system_client = service_client.get_file_system_client(
+ file_system=container
+ )
+
+ files = file_system_client.get_paths(path=folder)
+ files = next(files.by_page()) # type: ignore[arg-type]
+ files = [x.name for x in files] # type: ignore[assignment]
+ return filter_valid_files(files)
+
+ except Exception as e:
+ logger.warning(f"Can't load Azure datasets list: {e}")
+ return []
+
+
+async def download_progress(q: Q, title, seen_so_far, total_len):
+ if seen_so_far is not None and total_len is not None:
+ percentage = seen_so_far / total_len
+ value = percentage
+ caption = (
+ f"{convert_file_size(seen_so_far)} of "
+ f"{convert_file_size(total_len)} "
+ f"({percentage * 100:.2f}%)"
+ )
+ else:
+ value = None
+ caption = None
+
+ q.page["meta"].dialog = ui.dialog(
+ title=title,
+ blocking=True,
+ items=[ui.progress(label="Please be patient...", caption=caption, value=value)],
+ )
+ await q.page.save()
+
+
+async def azure_download(
+ q: Q, conn_string: str, container: str, filename: str
+) -> Tuple[str, str]:
+ """Downloads a file from azure
+
+ Args:
+ q: Q
+ conn_string: connection string
+ container: container
+ filename: filename to download
+
+ Returns:
+ Download location path
+ """
+
+ service_client = DataLakeServiceClient.from_connection_string( # type: ignore
+ conn_string
+ )
+
+ container_split = container.split(os.sep)
+ container = container_split[0]
+
+ file_system_client = service_client.get_file_system_client(file_system=container)
+
+ filename_split = filename.split(os.sep)
+ folder = "/".join(filename_split[:-1])
+ filename = filename_split[-1]
+
+ rnd_folder = "".join(random.choice(string.digits) for i in range(10))
+ azure_path = f"{get_data_dir(q)}/tmp_{rnd_folder}"
+ azure_path = get_valid_temp_data_folder(q, azure_path)
+
+ if os.path.exists(azure_path):
+ shutil.rmtree(azure_path)
+ os.makedirs(azure_path, exist_ok=True)
+
+ file = f"{azure_path}/{filename}"
+
+ file_client = file_system_client.get_file_client(f"{folder}/{filename}")
+
+ download = file_client.download_file()
+
+ blocks = download.chunks()
+
+ seen_so_far = 0
+ with open(file, "wb") as local_file:
+ for block in blocks:
+ local_file.write(block)
+
+ seen_so_far += len(block)
+
+ await download_progress(
+ q,
+ "Azure Datalake file download in progress",
+ seen_so_far,
+ len(blocks), # type: ignore[arg-type]
+ )
+
+ extract_if_zip(file, azure_path)
+
+ return azure_path, "".join(filename.split(".")[:-1])
+
+
+async def local_download(q: Q, filename: str) -> Tuple[str, str]:
+ """Downloads a file from local path
+
+ Args:
+ q: Q
+ filename: filename to download
+
+ Returns:
+ Download location path
+ """
+
+ local_path = f"{get_data_dir(q)}/tmp"
+ local_path = get_valid_temp_data_folder(q, local_path)
+
+ if os.path.exists(local_path):
+ shutil.rmtree(local_path)
+ os.makedirs(local_path, exist_ok=True)
+
+ shutil.copy2(filename, local_path)
+
+ zip_file = f"{local_path}/{filename.split('/')[-1]}"
+ extract_if_zip(zip_file, local_path)
+
+ return local_path, "".join(filename.split("/")[-1].split(".")[:-1])
+
+
+async def kaggle_download(
+ q: Q, command: str, kaggle_access_key: str, kaggle_secret_key: str
+) -> Tuple[str, str]:
+ """ "Downloads a file from kaggle
+
+ Args:
+ q: Q
+ command: kaggle api command
+ kaggle_access_key: kaggle access key
+ kaggle_secret_key: kaggle secret key
+
+ Returns:
+ Download location path
+ """
+
+ kaggle_path = f"{get_data_dir(q)}/tmp"
+ kaggle_path = get_valid_temp_data_folder(q, kaggle_path)
+
+ if os.path.exists(kaggle_path):
+ shutil.rmtree(kaggle_path)
+ os.makedirs(kaggle_path, exist_ok=True)
+
+ command_run = []
+ if kaggle_access_key != "":
+ command_run += ["env", f"KAGGLE_USERNAME={kaggle_access_key}"]
+ if kaggle_secret_key != "":
+ command_run += ["env", f"KAGGLE_KEY={kaggle_secret_key}"]
+ command_run += command.split(" ") + ["-p", kaggle_path]
+ subprocess.run(command_run)
+
+ try:
+ zip_file = f"{kaggle_path}/{command.split(' ')[-1].split('/')[-1]}.zip"
+ with zipfile.ZipFile(zip_file, "r") as zip_ref:
+ zip_ref.extractall(kaggle_path)
+ os.remove(zip_file)
+ except Exception:
+ pass
+
+ clean_macos_artifacts(kaggle_path)
+
+ for f in glob.glob(kaggle_path + "/*"):
+ if ".zip" in f and zip_file not in f:
+ with zipfile.ZipFile(f, "r") as zip_ref:
+ zip_ref.extractall(kaggle_path)
+
+ clean_macos_artifacts(kaggle_path)
+
+ return kaggle_path, "".join(command.split(" ")[-1].split("/")[-1])
+
+
+async def huggingface_download(
+ q: Q, huggingface_dataset: str, huggingface_split: str
+) -> Tuple[str, str]:
+ """Downloads a dataset from Hugging Face
+
+ Args:
+ q: Q
+ huggingface_dataset: HF dataset
+ huggingface_split: Dataset split
+
+ Returns:
+ Download location path
+ """
+
+ huggingface_path = f"{get_data_dir(q)}/tmp"
+ huggingface_path = get_valid_temp_data_folder(q, huggingface_path)
+
+ if os.path.exists(huggingface_path):
+ shutil.rmtree(huggingface_path)
+ os.makedirs(huggingface_path, exist_ok=True)
+
+ token = q.client["dataset/import/huggingface_api_token"]
+ if token == "":
+ token = None
+
+ # Download the dataset
+ dataset = load_dataset(
+ huggingface_dataset, split=huggingface_split, token=token
+ ).to_pandas()
+ filename = f"{huggingface_dataset.split('/')[-1]}_{huggingface_split}"
+ dataset_path = os.path.join(huggingface_path, f"{filename}.pq")
+ dataset.to_parquet(dataset_path, index=False)
+
+ return huggingface_path, filename
+
+
+async def h2o_drive_download(q: Q, filename) -> Tuple[str, str]:
+ """Downloads a file from H2O Drive
+
+ Args:
+ q: Q
+ filename: filename to download
+
+ Returns:
+ Download location path
+ """
+ drive = await h2o_drive.Drive(q.auth.access_token)
+ my_home_space = drive.my_bucket().home()
+
+ local_path = f"{get_data_dir(q)}/tmp"
+ local_path = get_valid_temp_data_folder(q, local_path)
+
+ if os.path.exists(local_path):
+ shutil.rmtree(local_path)
+ os.makedirs(local_path, exist_ok=True)
+
+ zip_file = f"{local_path}/{filename.split('/')[-1]}"
+
+ await my_home_space.download_file(filename, zip_file)
+
+ extract_if_zip(zip_file, local_path)
+
+ return local_path, "".join(filename.split("/")[-1].split(".")[:-1])
+
+
+async def h2o_drive_file_options(q: Q) -> List[str] | Exception:
+ """ "Returns all zip files in the H2O Drive
+
+ Args:
+
+ Returns:
+ List of zip files in bucket or Exception in case of access error
+
+ """
+ try:
+ drive = await h2o_drive.Drive(q.auth.access_token)
+ my_home_space = drive.my_bucket().home()
+
+ files = []
+ for h2o_drive_file in await my_home_space.list_objects():
+ files.append(h2o_drive_file.key)
+
+ files = filter_valid_files(files)
+ return files
+
+ except Exception as e:
+ logger.warning(f"Can't connect to H2O Drive: {e}")
+ return e
+
+
+def clean_error(error: str):
+ """Cleans some error messages
+
+ Args:
+ error: original error message
+
+ Returns:
+ Cleaned error message
+
+ """
+
+ if "UNIQUE constraint failed: datasets.name" in error:
+ error = "Dataset name already exists, please choose a different one."
+ elif "No such file or directory" in error:
+ error = "Import failed."
+
+ return error
+
+
+def remove_model_type(problem_type: str) -> str:
+ """Removes model type from problem type
+
+ Args:
+ problem_type: problem type
+
+ Returns:
+ Cleaned raw problem type
+
+ """
+ if "_config_" in problem_type:
+ problem_type = problem_type.split("_config_")[0] + "_config"
+ return problem_type
+
+
+def add_model_type(problem_type: str, model_type: str) -> str:
+ """Adds model type to problem type
+
+ Args:
+ problem_type: problem type
+ model_type: model type
+
+ Returns:
+ problem type including model type
+
+ """
+ problem_type = remove_model_type(problem_type)
+ if model_type != "":
+ problem_type = f"{problem_type}_{model_type}"
+ return problem_type
+
+
+def get_problem_categories() -> List[Tuple[str, str]]:
+ """Returns all available problem category choices
+
+ Returns:
+ List of tuples, each containing the raw problem category name
+ and the problem category name as label.
+ """
+
+ problem_categories: List[Tuple[str, str]] = []
+ for c in default_cfg.problem_categories:
+ cc = (c, make_label(c))
+ problem_categories.append(cc)
+ return problem_categories
+
+
+def get_problem_types(category: Optional[str] = None) -> List[Tuple[str, str]]:
+ """Returns all problem type choices
+
+ Args:
+ category: optional category to filter for
+
+ Returns:
+ List of tuples, each containing the raw problem type name
+ and the problem type name as label.
+ """
+ problem_types: List[Tuple[str, str]] = []
+ for c in default_cfg.problem_types:
+ if category is not None and not c.startswith(category):
+ continue
+ cc = (c, make_label("_".join(c.split("_")[1:]).replace("_config", "")))
+ problem_types.append(cc)
+
+ return problem_types
+
+
+def get_model_types(problem_type: str) -> List[Tuple[str, str]]:
+ """Returns all model types for a given problem type
+
+ Args:
+ problem_type: problem type name
+
+ Returns:
+ List of model types and their labels
+ """
+
+ model_types = []
+ for c in sorted(os.listdir("llm_studio/python_configs")):
+ if "_config_" not in c:
+ continue
+ if problem_type in c:
+ c = c.replace(".py", "").split("_config_")[1]
+ model_types.append((c, make_label(c[1:])))
+
+ return model_types
+
+
+def get_dataset(
+ k: str,
+ v: Any,
+ q: Q,
+ limit: Optional[List[str]] = None,
+ pre: str = "experiment/start",
+) -> Tuple[List[str], Any]:
+ """
+ Get the dataset and the preliminary default value for a setting.
+ The default value may still be overridden by the `possible_values.DatasetValue`
+ instances if it is not a valid choice.
+
+ Args:
+ k: key for the setting
+ v: value for the setting
+ q: Q
+ limit: list of keys to limit
+ pre: prefix for client key
+
+ Returns:
+ List of possible values, the preliminary default value.
+ """
+
+ if q.client[f"{pre}/dataset"] is None:
+ dataset_id = 1
+ else:
+ dataset_id = int(q.client[f"{pre}/dataset"])
+
+ dataset = q.client.app_db.get_dataset(dataset_id)
+
+ if dataset is None:
+ return None, ""
+
+ dataset = dataset.__dict__
+
+ dataset_cfg = load_config_yaml(dataset["config_file"]).dataset.__dict__
+
+ for kk, vv in dataset_cfg.items():
+ dataset[kk] = vv
+
+ dataset["dataframe"] = q.client[f"{pre}/cfg/dataframe"]
+
+ if q.client[f"{pre}/cfg_mode/from_dataset"] and (limit is None or k in limit):
+ v = dataset[k] if k in dataset else v
+
+ if limit is not None and k not in limit:
+ return None, v
+
+ # we need to not reset dataset settings when changing expert mode
+ if q.client[f"{pre}/cfg_mode/from_dataset_args"]:
+ v = q.client[f"{pre}/cfg/{k}"]
+
+ return dataset, v
+
+
+def _get_ui_element(
+ k: str,
+ v: Any,
+ poss_values: Any,
+ type_annotation: Type,
+ tooltip: str,
+ trigger: bool,
+ q: Q,
+ pre: str = "",
+) -> Any:
+ """Returns a single ui element for a given config entry
+
+ Args:
+ k: key
+ v: value
+ poss_values: possible values
+ type_annotation: type annotation
+ tooltip: tooltip
+ trigger: flag for triggering the element
+ q: Q
+ pre: optional prefix for ui key
+ get_default: flag for whether to get the default values
+
+ Returns:
+ Ui element
+
+ """
+ assert type_annotation in KNOWN_TYPE_ANNOTATIONS
+
+ # Overwrite current values with values from yaml
+ if pre == "experiment/start/cfg/":
+ if q.args["experiment/upload_yaml"] and "experiment/yaml_data" in q.client:
+ if (k in q.client["experiment/yaml_data"].keys()) and (
+ k != "experiment_name"
+ ):
+ q.client[pre + k] = q.client["experiment/yaml_data"][k]
+
+ if type_annotation in (int, float):
+ if not isinstance(poss_values, possible_values.Number):
+ raise ValueError(
+ "Type annotations `int` and `float` need a `possible_values.Number`!"
+ )
+
+ val = q.client[pre + k] if q.client[pre + k] is not None else v
+
+ min_val = (
+ type_annotation(poss_values.min) if poss_values.min is not None else None
+ )
+ max_val = (
+ type_annotation(poss_values.max) if poss_values.max is not None else None
+ )
+
+ # Overwrite default maximum values with user_settings
+ if f"set_max_{k}" in q.client:
+ max_val = q.client[f"set_max_{k}"]
+
+ if isinstance(poss_values.step, (float, int)):
+ step_val = type_annotation(poss_values.step)
+ elif poss_values.step == "decad" and val < 1:
+ step_val = 10 ** -len(str(int(1 / val)))
+ else:
+ step_val = 1
+
+ if min_val is None or max_val is None:
+ t = [
+ # TODO: spinbox `trigger` https://github.com/h2oai/wave/pull/598
+ ui.spinbox(
+ name=pre + k,
+ label=make_label(k),
+ value=val,
+ # TODO: open issue in wave to make spinbox optionally unbounded
+ max=max_val if max_val is not None else 1e12,
+ min=min_val if min_val is not None else -1e12,
+ step=step_val,
+ tooltip=tooltip,
+ )
+ ]
+ else:
+ t = [
+ ui.slider(
+ name=pre + k,
+ label=make_label(k),
+ value=val,
+ min=min_val,
+ max=max_val,
+ step=step_val,
+ tooltip=tooltip,
+ trigger=trigger,
+ )
+ ]
+ elif type_annotation == bool:
+ val = q.client[pre + k] if q.client[pre + k] is not None else v
+
+ t = [
+ ui.toggle(
+ name=pre + k,
+ label=make_label(k),
+ value=val,
+ tooltip=tooltip,
+ trigger=trigger,
+ )
+ ]
+ elif type_annotation in (str, Tuple[str, ...]):
+ if poss_values is None:
+ val = q.client[pre + k] if q.client[pre + k] is not None else v
+
+ title_label = make_label(k)
+
+ t = [
+ ui.textbox(
+ name=pre + k,
+ label=title_label,
+ value=val,
+ required=False,
+ password=False,
+ tooltip=tooltip,
+ trigger=trigger,
+ multiline=False,
+ )
+ ]
+ else:
+ if isinstance(poss_values, possible_values.String):
+ options = list(poss_values.values)
+ allow_custom = poss_values.allow_custom
+ placeholder = poss_values.placeholder
+ else:
+ options = list(poss_values)
+ allow_custom = False
+ placeholder = None
+
+ is_tuple = type_annotation == Tuple[str, ...]
+
+ v = q.client[pre + k] if q.client[pre + k] is not None else v
+ if isinstance(v, str):
+ v = [v]
+
+ # `v` might be a tuple of strings here but Wave only accepts lists
+ v = list(v)
+
+ if allow_custom:
+ if not all(isinstance(option, str) for option in options):
+ raise ValueError(
+ "Combobox cannot handle (value, name) pairs for options."
+ )
+ if is_tuple:
+ choices = list(set(list(options) + list(v)))
+ else:
+ if isinstance(v, list):
+ for option in v:
+ if option not in options:
+ options.append(option)
+ choices = list(options)
+ else:
+ raise ValueError("Expected a list.")
+
+ t = [
+ ui.combobox(
+ name=pre + k,
+ label=make_label(k),
+ value=None if is_tuple else v[0],
+ values=v if is_tuple else None,
+ choices=choices,
+ tooltip=tooltip,
+ placeholder=placeholder,
+ trigger=trigger,
+ )
+ ]
+ else:
+ choices = [
+ (
+ ui.choice(option, option)
+ if isinstance(option, str)
+ else ui.choice(option[0], option[1])
+ )
+ for option in options
+ ]
+
+ t = [
+ ui.dropdown(
+ name=pre + k,
+ label=make_label(k),
+ value=None if is_tuple else v[0],
+ values=v if is_tuple else None,
+ required=False,
+ choices=choices,
+ tooltip=tooltip,
+ placeholder=placeholder,
+ trigger=trigger,
+ )
+ ]
+
+ return t
+
+
+def get_dataset_elements(cfg: DefaultConfigProblemBase, q: Q) -> List:
+ """For a given configuration setting return the according dataset ui components.
+
+ Args:
+ cfg: DefaultConfigProblemBase configuration settings
+ q: Q
+
+ Returns:
+ List of ui elements
+ """
+
+ cfg_dict = cfg.__dict__
+ type_annotations = cfg.get_annotations()
+
+ cfg_dict = {key: cfg_dict[key] for key in cfg._get_order()}
+
+ items = []
+ for k, v in cfg_dict.items():
+ # Show some fields only during dataset import
+ if k.startswith("_") or cfg._get_visibility(k) == -1:
+ continue
+
+ if not (
+ check_dependencies(
+ cfg=cfg, pre="dataset/import", k=k, q=q, dataset_import=True
+ )
+ ):
+ continue
+ tooltip = cfg._get_tooltips(k)
+
+ trigger = False
+ if k in default_cfg.dataset_trigger_keys or k == "data_format":
+ trigger = True
+
+ if type_annotations[k] in KNOWN_TYPE_ANNOTATIONS:
+ if k in default_cfg.dataset_keys:
+ dataset = cfg_dict.copy()
+ dataset["path"] = q.client["dataset/import/path"]
+
+ for kk, vv in q.client["dataset/import/cfg"].__dict__.items():
+ dataset[kk] = vv
+
+ for trigger_key in default_cfg.dataset_trigger_keys:
+ if q.client[f"dataset/import/cfg/{trigger_key}"] is not None:
+ dataset[trigger_key] = q.client[
+ f"dataset/import/cfg/{trigger_key}"
+ ]
+ if (
+ q.client["dataset/import/cfg/data_format"] is not None
+ and k == "data_format"
+ ):
+ v = q.client["dataset/import/cfg/data_format"]
+
+ dataset["dataframe"] = q.client["dataset/import/cfg/dataframe"]
+
+ type_annotation = type_annotations[k]
+ poss_values, v = cfg._get_possible_values(
+ field=k,
+ value=v,
+ type_annotation=type_annotation,
+ dataset_fn=lambda k, v: (
+ dataset,
+ dataset[k] if k in dataset else v,
+ ),
+ )
+
+ if k == "train_dataframe" and v != "None":
+ q.client["dataset/import/cfg/dataframe"] = read_dataframe(v)
+
+ q.client[f"dataset/import/cfg/{k}"] = v
+
+ t = _get_ui_element(
+ k,
+ v,
+ poss_values,
+ type_annotation,
+ tooltip=tooltip,
+ trigger=trigger,
+ q=q,
+ pre="dataset/import/cfg/",
+ )
+ else:
+ t = []
+ elif dataclasses.is_dataclass(v):
+ elements_group = get_dataset_elements(cfg=v, q=q)
+ t = elements_group
+ else:
+ raise _get_type_annotation_error(v, type_annotations[k])
+
+ items += t
+
+ return items
+
+
+def check_dependencies(
+ cfg: DefaultConfigProblemBase, pre: str, k: str, q: Q, dataset_import: bool = False
+):
+ """Checks all dependencies for a given key
+
+ Args:
+ cfg: DefaultConfigProblemBase configuration settings
+ pre: prefix for client keys
+ k: key to be checked
+ q: Q
+ dataset_import: flag whether dependencies are checked in dataset import
+
+ Returns:
+ True if dependencies are met
+ """
+
+ dependencies = cfg._get_nesting_dependencies(k)
+
+ if dependencies is None:
+ dependencies = []
+ # Do not respect some nesting during the dataset import
+ if dataset_import:
+ dependencies = [x for x in dependencies if x.key not in ["validation_strategy"]]
+ # Do not respect some nesting during the create experiment
+ else:
+ dependencies = [x for x in dependencies if x.key not in ["data_format"]]
+
+ if len(dependencies) > 0:
+ all_deps = 0
+ for d in dependencies:
+ if (
+ not dataset_import
+ and q.client["experiment/start/grid_search"]
+ and cfg._get_grid_search_values(d.key) is not None
+ ):
+ dependency_values = q.client[f"{pre}/cfg/{d.key}_grid_search"]
+ else:
+ if isinstance(q.client[f"{pre}/cfg/{d.key}"], (list, tuple)):
+ dependency_values = q.client[f"{pre}/cfg/{d.key}"]
+ else:
+ dependency_values = [q.client[f"{pre}/cfg/{d.key}"]]
+
+ all_deps += d.check(dependency_values)
+ return all_deps == len(dependencies)
+
+ return True
+
+
+def is_visible(k: str, cfg: DefaultConfigProblemBase, q: Q) -> bool:
+ """Returns a flag whether a given key should be visible on UI.
+
+ Args:
+ k: name of the hyperparameter
+ cfg: configuration settings,
+ q: Q
+ Returns:
+ List of ui elements
+ """
+
+ visibility = 1
+
+ if visibility < cfg._get_visibility(k):
+ return False
+
+ return True
+
+
+def get_grid_value(v: Any, type_annotation: Any) -> List[str]:
+ """Handles .0 for floats in the grid search
+
+ Args:
+ v: value of the hyperparameter
+ type_annotation: type of the parameter
+ Returns:
+ List with a grid search element
+ """
+
+ v: str = str(v)
+
+ if type_annotation == float and v.endswith(".0"):
+ v = v[:-2]
+
+ return [v]
+
+
+def get_ui_elements_for_cfg(
+ cfg: DefaultConfigProblemBase,
+ q: Q,
+ limit: Optional[List[str]] = None,
+ pre: str = "experiment/start",
+) -> List:
+ """For a given configuration setting return the according ui components.
+
+ Args:
+ cfg: configuration settings
+ q: Q
+ limit: optional list of keys to limit
+ pre: prefix for client keys
+ parent_cfg: parent config class.
+
+ Returns:
+ List of ui elements
+ """
+ items = []
+
+ cfg_dict = cfg.__dict__
+ type_annotations = cfg.get_annotations()
+
+ cfg_dict = {key: cfg_dict[key] for key in cfg._get_order()}
+
+ for k, v in cfg_dict.items():
+ if k.startswith("_") or cfg._get_visibility(k) < 0:
+ if q.client[f"{pre}/cfg_mode/from_cfg"]:
+ q.client[f"{pre}/cfg/{k}"] = v
+ continue
+ else:
+ type_annotation = type_annotations[k]
+ poss_values, v = cfg._get_possible_values(
+ field=k,
+ value=v,
+ type_annotation=type_annotation,
+ dataset_fn=partial(get_dataset, q=q, limit=limit, pre=pre),
+ )
+
+ if k in default_cfg.dataset_keys:
+ # reading dataframe
+ if k == "train_dataframe" and (v != ""):
+ q.client[f"{pre}/cfg/dataframe"] = read_dataframe(v, meta_only=True)
+ q.client[f"{pre}/cfg/{k}"] = v
+ elif k in default_cfg.dataset_extra_keys:
+ _, v = get_dataset(k, v, q=q, limit=limit, pre=pre)
+ q.client[f"{pre}/cfg/{k}"] = v
+ elif q.client[f"{pre}/cfg_mode/from_cfg"]:
+ q.client[f"{pre}/cfg/{k}"] = v
+ q.client[f"{pre}/cfg/{k}_grid_search"] = get_grid_value(
+ v, type_annotation
+ )
+ # Overwrite current default values with user_settings
+ if q.client[f"{pre}/cfg_mode/from_default"] and f"default_{k}" in q.client:
+ q.client[f"{pre}/cfg/{k}"] = q.client[f"default_{k}"]
+
+ if not (check_dependencies(cfg=cfg, pre=pre, k=k, q=q)):
+ continue
+
+ if not is_visible(k=k, cfg=cfg, q=q):
+ if type_annotation not in KNOWN_TYPE_ANNOTATIONS:
+ _ = get_ui_elements_for_cfg(cfg=v, q=q, limit=limit, pre=pre)
+ elif q.client[f"{pre}/cfg_mode/from_cfg"]:
+ q.client[f"{pre}/cfg/{k}"] = v
+
+ continue
+
+ tooltip = cfg._get_tooltips(k)
+
+ trigger = False
+ q.client[f"{pre}/trigger_ks"] = ["train_dataframe"]
+ q.client[f"{pre}/trigger_ks"] += cfg._get_nesting_triggers()
+ if k in q.client[f"{pre}/trigger_ks"]:
+ trigger = True
+
+ if (
+ pre == "experiment/start"
+ and q.client[f"{pre}/grid_search"]
+ and cfg._get_grid_search_values(k)
+ ):
+
+ grid_name = f"{pre}/cfg/{k}_grid_search"
+ add_choice = []
+
+ # Value is not in the grid range, add to choices
+ if v not in cfg._get_grid_search_values(k):
+ add_choice = get_grid_value(v, type_annotation)
+
+ v = get_grid_value(v, type_annotation)
+ v = q.client[grid_name] if q.client[grid_name] is not None else v
+
+ t = [
+ ui.message_bar(
+ type="info",
+ text=f"**{make_label(k)}** is a grid search hyperparameter.",
+ )
+ ]
+
+ # len(add_choice) == 1 added due to a strange bug, that iscustom will get
+ # overwritten to None in special cases
+ allow_custom = cfg._get_grid_search_iscustom(k) or len(add_choice) == 1
+
+ if allow_custom:
+ cust_choices: list[str] = [
+ str(c) for c in cfg._get_grid_search_values(k)
+ ] + add_choice
+ t += [
+ ui.combobox(
+ name=grid_name,
+ label=make_label(k) + " (grid search)",
+ values=v,
+ required=False,
+ choices=cust_choices,
+ tooltip=tooltip,
+ trigger=trigger,
+ )
+ ]
+ else:
+ choices: list[Choice] = [
+ ui.choice(str(c), str(c)) for c in cfg._get_grid_search_values(k)
+ ]
+
+ t += [
+ ui.dropdown(
+ name=grid_name,
+ label=make_label(k) + " (grid search)",
+ values=v,
+ required=False,
+ choices=choices,
+ tooltip=tooltip,
+ trigger=trigger,
+ )
+ ]
+ elif type_annotation in KNOWN_TYPE_ANNOTATIONS:
+ if limit is not None and k not in limit:
+ continue
+
+ t = _get_ui_element(
+ k=k,
+ v=v,
+ poss_values=poss_values,
+ type_annotation=type_annotation,
+ tooltip=tooltip,
+ trigger=trigger,
+ q=q,
+ pre=f"{pre}/cfg/",
+ )
+ elif dataclasses.is_dataclass(v):
+ if limit is not None and k in limit:
+ elements_group = get_ui_elements_for_cfg(
+ cfg=v, q=q, limit=None, pre=pre
+ )
+ else:
+ elements_group = get_ui_elements_for_cfg(
+ cfg=v, q=q, limit=limit, pre=pre
+ )
+
+ if k == "dataset" and pre != "experiment/start":
+ # get all the datasets available
+ df_datasets = q.client.app_db.get_datasets_df()
+ if not q.client[f"{pre}/dataset"]:
+ if len(df_datasets) >= 1:
+ q.client[f"{pre}/dataset"] = str(df_datasets["id"].iloc[-1])
+ else:
+ q.client[f"{pre}/dataset"] = "1"
+
+ elements_group = [
+ ui.dropdown(
+ name=f"{pre}/dataset",
+ label="Dataset",
+ required=True,
+ value=q.client[f"{pre}/dataset"],
+ choices=[
+ ui.choice(str(row["id"]), str(row["name"]))
+ for _, row in df_datasets.iterrows()
+ ],
+ trigger=True,
+ tooltip=tooltip,
+ )
+ ] + elements_group
+
+ if len(elements_group) > 0:
+ t = [
+ ui.separator(
+ name=k + "_expander", label=make_label(k, appendix=" settings")
+ )
+ ]
+ else:
+ t = []
+
+ t += elements_group
+ else:
+ raise _get_type_annotation_error(v, type_annotations[k])
+
+ items += t
+
+ q.client[f"{pre}/prev_dataset"] = q.client[f"{pre}/dataset"]
+
+ return items
+
+
+def parse_ui_elements(
+ cfg: DefaultConfigProblemBase, q: Q, limit: List | str = "", pre: str = ""
+) -> Any:
+ """Sets configuration settings with arguments from app
+
+ Args:
+ cfg: configuration
+ q: Q
+ limit: optional list of keys to limit
+ pre: prefix for keys
+
+ Returns:
+ Configuration with settings overwritten from arguments
+ """
+
+ cfg_dict = cfg.__dict__
+ type_annotations = cfg.get_annotations()
+ for k, v in cfg_dict.items():
+ if k.startswith("_") or cfg._get_visibility(k) == -1:
+ continue
+
+ if (
+ len(limit) > 0
+ and k not in limit
+ and type_annotations[k] in KNOWN_TYPE_ANNOTATIONS
+ ):
+ continue
+
+ if (
+ pre == "experiment/start/cfg/"
+ and q.client["experiment/start/grid_search"]
+ and cfg._get_grid_search_values(k)
+ ):
+ value = q.client[f"{pre}{k}_grid_search"]
+ setattr(cfg, k, value)
+ elif type_annotations[k] in KNOWN_TYPE_ANNOTATIONS:
+ value = q.client[f"{pre}{k}"]
+
+ if type_annotations[k] == Tuple[str, ...]:
+ if isinstance(value, str):
+ value = [value]
+ elif value is None:
+ value = ()
+ else:
+ value = tuple(value)
+ if isinstance(type_annotations[k], str) and isinstance(value, list):
+ # fix for combobox outputting custom values as list in wave 0.22
+ value = value[0]
+ setattr(cfg, k, value)
+ elif dataclasses.is_dataclass(v):
+ setattr(cfg, k, parse_ui_elements(cfg=v, q=q, limit=limit, pre=pre))
+ else:
+ raise _get_type_annotation_error(v, type_annotations[k])
+
+ return cfg
+
+
+def get_experiment_status(path: str) -> Tuple[str, str]:
+ """Get status information from experiment.
+
+ Args:
+ path: path to experiment folder
+ Returns:
+ Tuple of experiment status and experiment info
+ """
+
+ try:
+ flag_json_path = f"{path}/flags.json"
+ if not os.path.exists(flag_json_path):
+ logger.debug(f"File {flag_json_path} does not exist yet.")
+ return "none", "none"
+ with open(flag_json_path) as file:
+ flags = json.load(file)
+ status = flags.get("status", "none")
+ info = flags.get("info", "none")
+
+ # Collect failed statuses from all GPUs
+ single_gpu_failures = []
+ for flag_json_path in glob.glob(f"{path}/flags?*.json"):
+ if os.path.exists(flag_json_path):
+ with open(flag_json_path) as file:
+ flags = json.load(file)
+ status = flags.get("status", "none")
+ info = flags.get("info", "none")
+
+ if status == "failed":
+ single_gpu_failures.append(info)
+ # Get the most detailed failure info
+ if len(single_gpu_failures) > 0:
+ detailed_gpu_failures = [x for x in single_gpu_failures if x != "See logs"]
+ if len(detailed_gpu_failures) > 0:
+ return "failed", detailed_gpu_failures[0]
+ else:
+ return "failed", single_gpu_failures[0]
+ return status, info
+
+ except Exception:
+ logger.debug("Could not get experiment status:", exc_info=True)
+ return "none", "none"
+
+
+def get_experiments_status(df: DataFrame) -> Tuple[List[str], List[str]]:
+ """For each experiment in given dataframe, return the status of the process
+
+ Args:
+ df: experiment dataframe
+
+ Returns:
+ A list with each status and a list with all infos
+ """
+
+ status_all = []
+ info_all = []
+ for idx, row in df.iterrows():
+ status, info = get_experiment_status(row.path)
+
+ if info == "none":
+ info = ""
+ info_all.append(info)
+
+ pid = row.process_id
+
+ zombie = False
+ try:
+ p = psutil.Process(pid)
+ zombie = p.status() == "zombie"
+ except psutil.NoSuchProcess:
+ pass
+ if not psutil.pid_exists(pid) or zombie:
+ running = False
+ else:
+ running = True
+
+ if running:
+ if status == "none":
+ status_all.append("queued")
+ elif status == "running":
+ status_all.append("running")
+ elif status == "queued":
+ status_all.append("queued")
+ elif status == "finished":
+ status_all.append("finished")
+ elif status == "stopped":
+ status_all.append("stopped")
+ elif status == "failed":
+ status_all.append("failed")
+ else:
+ status_all.append("finished")
+ else:
+ if status == "none":
+ status_all.append("failed")
+ elif status == "queued":
+ status_all.append("failed")
+ elif status == "running":
+ status_all.append("failed")
+ elif status == "finished":
+ status_all.append("finished")
+ elif status == "stopped":
+ status_all.append("stopped")
+ elif status == "failed":
+ status_all.append("failed")
+ else:
+ status_all.append("failed")
+
+ return status_all, info_all
+
+
+def get_experiments_info(df: DataFrame, q: Q) -> DefaultDict:
+ """For each experiment in given dataframe, return certain configuration settings
+
+ Args:
+ df: experiment dataframe
+ q: Q
+
+ Returns:
+ A dictionary of lists of additional information
+ """
+
+ info = defaultdict(list)
+ for _, row in df.iterrows():
+ try:
+ # load_config_yaml issues a warning if the yaml file contains keys
+ # that are no longer part of the dataclass fields.
+ # This can happen if the codebase has changed since the experiment was run.
+ # Ignore those warnings here
+
+ original_level = logging.getLogger().level
+ logging.getLogger().setLevel(logging.ERROR)
+ try:
+ cfg = load_config_yaml(f"{row.path}/cfg.yaml").__dict__
+ finally:
+ logging.getLogger().setLevel(original_level)
+
+ except Exception:
+ cfg = None
+
+ metric = ""
+ loss_function = ""
+
+ if cfg is not None:
+ try:
+ metric = cfg["prediction"].metric
+ loss_function = cfg["training"].loss_function
+ except KeyError:
+ metric = ""
+ loss_function = ""
+
+ charts_db_path = os.path.join(row.path, "charts.db")
+ if os.path.exists(charts_db_path):
+ with SqliteDict(charts_db_path) as logs:
+ if "internal" in logs.keys():
+ if "current_step" in logs["internal"].keys():
+ curr_step = int(logs["internal"]["current_step"]["values"][-1])
+ else:
+ curr_step = 0
+
+ if "total_training_steps" in logs["internal"].keys():
+ total_training_steps = int(
+ logs["internal"]["total_training_steps"]["values"][-1]
+ )
+ else:
+ total_training_steps = 0
+
+ if "current_val_step" in logs["internal"].keys():
+ curr_val_step = int(
+ logs["internal"]["current_val_step"]["values"][-1]
+ )
+ else:
+ curr_val_step = 0
+
+ if "total_validation_steps" in logs["internal"].keys():
+ total_validation_steps = int(
+ logs["internal"]["total_validation_steps"]["values"][-1]
+ )
+ else:
+ total_validation_steps = 0
+
+ curr_total_step = curr_step + curr_val_step
+
+ total_steps = max(total_training_steps + total_validation_steps, 1)
+
+ if (
+ "global_start_time" in logs["internal"].keys()
+ and curr_total_step > 0
+ ):
+ elapsed = (
+ time.time()
+ - logs["internal"]["global_start_time"]["values"][-1]
+ )
+ remaining_steps = total_steps - curr_total_step
+ eta = elapsed * (remaining_steps / curr_total_step)
+ if eta == 0:
+ eta = ""
+ else:
+ # if more than one day, show days
+ # need to subtract 1 day from time_took since strftime shows
+ # day of year which starts counting at 1
+ if eta > 86400:
+ eta = time.strftime(
+ "%-jd %H:%M:%S", time.gmtime(float(eta - 86400))
+ )
+ else:
+ eta = time.strftime("%H:%M:%S", time.gmtime(float(eta)))
+ else:
+ eta = "N/A"
+ else:
+ # Default values for when charts.db is not available
+ # (experiment deleted manually)
+ eta = "N/A"
+ total_steps = 1
+ curr_total_step = 0
+
+ if (
+ "validation" in logs
+ and metric in logs["validation"]
+ and logs["validation"][metric]["values"][-1] is not None
+ ):
+ score_val = np.round(logs["validation"][metric]["values"][-1], 4)
+ else:
+ score_val = ""
+
+ else:
+ logs = {}
+ eta = "N/A"
+ total_steps = 1
+ curr_total_step = 0
+ score_val = ""
+ logger.info(f"Experiment path {charts_db_path} not found.")
+
+ try:
+ dataset = q.client.app_db.get_dataset(row.dataset).name
+ except Exception:
+ dataset = ""
+
+ config_file = make_config_label(row.config_file)
+
+ info["config_file"].append(config_file)
+ info["dataset"].append(dataset)
+ info["loss"].append(loss_function)
+ info["metric"].append(metric)
+ info["eta"].append(eta)
+ info["val metric"].append(score_val)
+ info["progress"].append(f"{np.round(curr_total_step / total_steps, 2)}")
+
+ del cfg
+
+ return info
+
+
+def make_config_label(config_file: str) -> str:
+ """Makes a label from a config file name
+
+ Args:
+ config_file: config file name
+
+ Returns:
+ Label
+ """
+
+ config_file = config_file.replace(".yaml", "")
+ if "_config_" in config_file:
+ config_file_split = config_file.split("_config_")
+ config_file = (
+ f"{make_label(config_file_split[0])} "
+ f"({make_label(config_file_split[1][1:])})"
+ )
+ else:
+ config_file = make_label(config_file.replace("_config", ""))
+
+ return config_file
+
+
+def get_datasets_info(df: DataFrame, q: Q) -> Tuple[DataFrame, DefaultDict]:
+ """For each dataset in given dataframe, return certain configuration settings
+
+ Args:
+ df: dataset dataframe
+ q: Q
+
+ Returns:
+ A dictionary of lists of additional information
+ """
+
+ info = defaultdict(list)
+ for idx, row in df.iterrows():
+ config_file = q.client.app_db.get_dataset(row.id).config_file
+ path = row.path + "/"
+
+ try:
+ logging_level = logging.getLogger().level
+ logging.getLogger().setLevel(logging.ERROR)
+ cfg = load_config_yaml(config_file)
+ logging.getLogger().setLevel(logging_level)
+ except Exception as e:
+ logger.warning(f"Could not load configuration from {config_file}. {e}")
+ cfg = None
+
+ if cfg is not None:
+ cfg_dataset = cfg.dataset.__dict__
+
+ config_file = make_config_label(row.config_file.replace(path, ""))
+
+ info["problem type"].append(config_file)
+ info["train dataframe"].append(
+ cfg_dataset["train_dataframe"].replace(path, "")
+ )
+ info["validation dataframe"].append(
+ cfg_dataset["validation_dataframe"].replace(path, "")
+ )
+
+ info["labels"].append(cfg.dataset.answer_column)
+
+ del cfg, cfg_dataset
+ else:
+ df = df.drop(idx)
+
+ return df, info
+
+
+def get_experiments(
+ q: Q,
+ status: Union[Optional[str], Optional[List[str]]] = None,
+) -> pd.DataFrame:
+ """Return all experiments given certain restrictions
+
+ Args:
+ q: Q
+ status: option to filter for certain experiment status
+ Returns:
+ experiment df
+ """
+
+ df = q.client.app_db.get_experiments_df()
+
+ info = get_experiments_info(df, q)
+ for k, v in info.items():
+ df[k] = v
+
+ df["status"], df["info"] = get_experiments_status(df)
+
+ if status is not None:
+ if type(status) is str:
+ status = [status]
+ df = df[df["status"].isin(status)]
+
+ if len(df) > 0:
+ # make sure progress is 100% for finished experiments
+ df.loc[df.status == "finished", "progress"] = "1.0"
+
+ # make sure that if experiment is running the progress is at most 99%
+ df.loc[
+ (df.status == "running")
+ & (pd.to_numeric(df.progress, errors="coerce") > 0.99),
+ "progress",
+ ] = ".99"
+
+ df["info"] = np.where(
+ (df["status"] == "running") & (df["eta"] != ""),
+ df["eta"].apply(lambda x: f"ETA: {x}"),
+ df["info"],
+ )
+
+ return df
+
+
+def get_datasets(
+ q: Q,
+ show_experiment_datasets: bool = True,
+) -> pd.DataFrame:
+ """Return all datasets given certain restrictions
+
+ Args:
+ q: Q
+ show_experiment_datasets: whether to also show datasets linked to experiments
+
+ Returns:
+ dataset df
+ """
+
+ df = q.client.app_db.get_datasets_df()
+
+ df, info = get_datasets_info(df, q)
+ for k, v in info.items():
+ df[k] = v
+
+ for type in ["train", "validation"]:
+ col_name = f"{type}_rows"
+ if col_name not in df:
+ continue
+ rows = df[col_name].astype(float).map("{:.0f}".format)
+ del df[col_name]
+ rows[rows == "nan"] = "None"
+
+ if f"{type} dataframe" in df.columns:
+ idx = df.columns.get_loc(f"{type} dataframe") + 1
+ df.insert(idx, f"{type} rows", rows)
+
+ if not show_experiment_datasets:
+ experiment_datasets = get_experiments(q).dataset.unique()
+ df = df.loc[~df["name"].isin(experiment_datasets)]
+
+ return df
+
+
+def filter_grid_search_combination(grid: Dict[str, Any], cfg: Any) -> Dict[str, Any]:
+ """Filters grid search combination in order not to start multiple same experiments.
+
+ Args:
+ grid: grid combination from the full grid search
+ cfg: configuration settings
+
+ Returns:
+ Filtered grid combination after checking the dependencies
+ """
+
+ grid = grid.copy()
+ cfg_dict = cfg.__dict__
+
+ for k, v in cfg_dict.items():
+ if dataclasses.is_dataclass(v):
+ grid = filter_grid_search_combination(grid=grid, cfg=v)
+
+ if k in grid:
+ dependencies = cfg._get_nesting_dependencies(k)
+ if dependencies is None:
+ continue
+
+ if all(
+ [d.key in grid and not d.check([grid[d.key]]) for d in dependencies]
+ ):
+ grid.pop(k)
+
+ return grid
+
+
+def get_grid_search(cfg: Any, q: Q, pre: str) -> Dict[str, List]:
+ """Creates a dictionary with grid search values.
+
+ Args:
+ cfg: configuration settings
+ q: Q
+ pre: prefix for client keys
+ """
+
+ grid_search = {}
+ cfg_dict = cfg.__dict__
+
+ type_annotations = cfg.get_annotations()
+ for k, v in cfg_dict.items():
+ if k.startswith("_") or cfg._get_visibility(k) < 0:
+ continue
+ elif (
+ pre == "experiment/start"
+ and q.client["experiment/start/grid_search"]
+ and cfg._get_grid_search_values(k)
+ ):
+ if type_annotations[k] == bool:
+ grid_search[k] = [True if x == "True" else False for x in v]
+ else:
+ try:
+ grid_search[k] = [type_annotations[k](x) for x in v]
+ except ValueError:
+ raise GridCheckError(f"{make_label(k)}")
+ elif type_annotations[k] in KNOWN_TYPE_ANNOTATIONS:
+ pass
+ elif dataclasses.is_dataclass(v):
+ grid_search.update(get_grid_search(cfg=v, q=q, pre=pre))
+ else:
+ raise _get_type_annotation_error(v, type_annotations[k])
+
+ return grid_search
+
+
+def set_grid_to_cfg(cfg: Any, grid: Dict[str, List]) -> Any:
+ """Sets individual run config for the Grid Search.
+
+ Args:
+ cfg: configuration settings
+ grid: dictionary of a single grid search element
+ Returns:
+ config for the corresponding grid search run
+ """
+
+ cfg_dict = cfg.__dict__
+ type_annotations = cfg.get_annotations()
+
+ for k, v in cfg_dict.items():
+ if k.startswith("_") or cfg._get_visibility(k) < 0:
+ continue
+ elif k in grid and cfg._get_grid_search_values(k):
+ setattr(cfg, k, grid[k])
+ elif type_annotations[k] in KNOWN_TYPE_ANNOTATIONS:
+ pass
+ elif dataclasses.is_dataclass(v):
+ setattr(cfg, k, set_grid_to_cfg(cfg=v, grid=grid))
+ else:
+ raise _get_type_annotation_error(v, type_annotations[k])
+
+ return cfg
+
+
+def start_experiment(
+ cfg: DefaultConfigProblemBase, q: Q, pre: str, gpu_list: Optional[List] = None
+) -> None:
+ """Starts an experiment
+
+ Args:
+ cfg: DefaultConfigProblemBase configuration settings
+ q: Q
+ pre: prefix for client keys
+ gpu_list: list of GPUs available
+ """
+ if gpu_list is None:
+ gpu_list = cfg.environment.gpus
+
+ # Get queue of the processes to wait for
+ running_experiments = get_experiments(q=q)
+ running_experiments = running_experiments[
+ running_experiments.status.isin(["queued", "running"])
+ ]
+ all_process_queue = []
+ for _, row in running_experiments.iterrows():
+ for gpu_id in row["gpu_list"].split(","):
+ if gpu_id in gpu_list:
+ all_process_queue.append(row["process_id"])
+
+ process_queue = list(set(all_process_queue))
+
+ env_vars = {
+ "NEPTUNE_API_TOKEN": q.client["default_neptune_api_token"],
+ "WANDB_API_KEY": q.client["default_wandb_api_token"],
+ "OPENAI_API_KEY": q.client["default_openai_api_token"],
+ "GPT_EVAL_MAX": str(q.client["default_gpt_eval_max"]),
+ "HF_HUB_ENABLE_HF_TRANSFER": str(q.client["default_hf_hub_enable_hf_transfer"]),
+ }
+ if q.client["default_openai_azure"]:
+ env_vars.update(
+ {
+ "OPENAI_API_TYPE": "azure",
+ "OPENAI_API_BASE": q.client["default_openai_api_base"],
+ "OPENAI_API_VERSION": q.client["default_openai_api_version"],
+ "OPENAI_API_DEPLOYMENT_ID": q.client[
+ "default_openai_api_deployment_id"
+ ],
+ }
+ )
+ if q.client["default_huggingface_api_token"]:
+ env_vars.update({"HF_TOKEN": q.client["default_huggingface_api_token"]})
+
+ env_vars = {k: v or "" for k, v in env_vars.items()}
+
+ cfg = copy_config(cfg, q)
+ cfg.output_directory = f"{get_output_dir(q)}/{cfg.experiment_name}/"
+ os.makedirs(cfg.output_directory)
+ save_config_yaml(f"{cfg.output_directory}/cfg.yaml", cfg)
+
+ # Start the training process
+ p = start_process(
+ cfg=cfg, gpu_list=gpu_list, process_queue=process_queue, env_vars=env_vars
+ )
+
+ logger.info(f"Process: {p.pid}, Queue: {process_queue}, GPUs: {gpu_list}")
+
+ experiment = Experiment(
+ name=cfg.experiment_name,
+ mode="train",
+ dataset=q.client[f"{pre}/dataset"],
+ config_file=q.client[f"{pre}/cfg_file"],
+ path=cfg.output_directory,
+ seed=cfg.environment.seed,
+ process_id=p.pid,
+ gpu_list=",".join(gpu_list),
+ )
+
+ q.client.app_db.add_experiment(experiment)
+
+
+def get_frame_stats(frame):
+ non_numeric_cols = frame.select_dtypes(object).columns
+ is_str_cols = [
+ x
+ for x in non_numeric_cols
+ if frame[x].dropna().size and (frame[x].dropna().apply(type) == str).all()
+ ]
+ cols_to_drop = [x for x in non_numeric_cols if x not in is_str_cols]
+
+ if len(cols_to_drop): # drop array/list/non-str object columns
+ frame = frame.drop(columns=cols_to_drop)
+ non_numeric_cols = frame.select_dtypes(object).columns
+
+ if len(frame.columns) == 0:
+ return None
+
+ numeric_cols = [col for col in frame if col not in non_numeric_cols]
+
+ if len(non_numeric_cols) == 0 or len(numeric_cols) == 0:
+ stats = frame.describe()
+ if len(numeric_cols):
+ stats = stats.round(decimals=3)
+ stats.loc["unique"] = frame.nunique() # unique is part of describe for str
+
+ else:
+ stats1 = frame[non_numeric_cols].describe()
+ stats2 = frame[numeric_cols].describe().round(decimals=3)
+
+ stats2.loc["unique"] = frame[numeric_cols].nunique()
+ stats = (
+ stats1.reset_index()
+ .merge(stats2.reset_index(), how="outer", on="index")
+ .fillna("")
+ ).set_index("index")
+
+ stats = stats.T.reset_index().rename(columns={"index": "column"})
+
+ for col in ["count", "unique"]:
+ if col in stats:
+ stats[col] = stats[col].astype(int)
+
+ return stats
+
+
+def dir_file_table(current_path: str) -> pd.DataFrame:
+ results = [".."]
+ try:
+ if os.path.isdir(current_path):
+ files = os.listdir(current_path)
+ files = sorted([f for f in files if not f.startswith(".")], key=str.lower)
+ results.extend(files)
+ except Exception:
+ logger.error(f"Error while listing folder '{current_path}':", exc_info=True)
+
+ return pd.DataFrame({current_path: results})
+
+
+def get_download_link(q: Q, artifact_path):
+ new_path = os.path.relpath(artifact_path, get_output_dir(q))
+ new_path = os.path.join(get_download_dir(q), new_path)
+ url_path = os.path.relpath(new_path, get_output_dir(q))
+
+ if not os.path.exists(new_path):
+ os.makedirs(os.path.dirname(new_path), exist_ok=True)
+ os.symlink(os.path.abspath(artifact_path), os.path.abspath(new_path))
+
+ # return a relative path so that downloads work when the instance is
+ # behind a reverse proxy or being accessed by a public IP in a public
+ # cloud.
+
+ # add the BASE_URL to the path
+ if "H2O_CLOUD_ENVIRONMENT" in os.environ:
+ url_path = f"{os.environ['H2O_WAVE_BASE_URL']}/{url_path}"
+
+ return url_path
+
+
+def check_valid_upload_content(upload_path: str) -> Tuple[bool, str]:
+ if upload_path.endswith("zip"):
+ valid = zipfile.is_zipfile(upload_path)
+ error = "" if valid else "File is not a zip file"
+ else:
+ valid = is_valid_data_frame(upload_path)
+ error = "" if valid else "File does not have valid format"
+
+ if not valid:
+ os.remove(upload_path)
+
+ return valid, error
+
+
+def flatten_dict(d: collections.abc.MutableMapping) -> dict:
+ """
+ Adapted from https://stackoverflow.com/a/6027615
+ Does not work with nesting and mutiple keys with the same name!
+
+ Args:
+ d: dict style object
+ Return:
+ A flattened dict
+ """
+
+ items: List[Tuple[Any, Any]] = []
+ for k, v in d.items():
+ if isinstance(v, collections.abc.MutableMapping):
+ items.extend(flatten_dict(v).items())
+ else:
+ items.append((k, v))
+ return dict(items)
+
+
+def get_unique_name(expected_name, existing_names, is_invalid_function=None):
+ """
+ Return a new name that does not exist in list of existing names
+
+ Args:
+ expected_name: preferred name
+ existing_names: list of existing names
+ is_invalid_function: optional callable, to determine if the new name is
+ invalid
+ Return:
+ new name
+ """
+
+ new_name = expected_name
+ cnt = 1
+
+ while new_name in existing_names or (
+ is_invalid_function is not None and is_invalid_function(new_name)
+ ):
+ new_name = f"{expected_name}.{cnt}"
+ cnt += 1
+
+ return new_name
+
+
+def get_unique_dataset_name(q, dataset_name, include_all_folders=True):
+ """
+ Return a dataset name that does not exist yet
+
+ Args:
+ q: Q
+ dataset_name: preferred dataset name
+ include_all_folders: whether to also consider all (temp) dataset folders
+ Return:
+ new dataset_name
+ """
+ datasets_df = q.client.app_db.get_datasets_df()
+
+ existing_names = datasets_df["name"].values.tolist()
+ if include_all_folders:
+ existing_names.extend(os.listdir(get_data_dir(q)))
+
+ return get_unique_name(dataset_name, existing_names)
+
+
+def get_valid_temp_data_folder(q: Q, folder_path: str) -> str:
+ """
+ Return new temporary data folder path not associated with any existing dataset
+
+ Args:
+ q: Q
+ folder_path: original folder_path
+ Return:
+ new folder path not associated with any existing dataset
+ """
+ dirname = os.path.dirname(folder_path)
+ basename = os.path.basename(folder_path)
+ unique_name = get_unique_dataset_name(q, basename, include_all_folders=False)
+ return os.path.join(dirname, unique_name)
+
+
+def remove_temp_files(q: Q):
+ """
+ Remove any temp folders leftover from dataset import
+ """
+
+ datasets_df = q.client.app_db.get_datasets_df()
+ all_files = glob.glob(os.path.join(get_data_dir(q), "*"))
+ for file in all_files:
+ if not any([path in file for path in datasets_df["path"].values]):
+ if os.path.isdir(file):
+ shutil.rmtree(file)
+ else:
+ os.remove(file)
+
+
+def get_gpu_usage() -> float:
+ usage: float = 0.0
+ all_gpus: List[GPUtil.GPU] = GPUtil.getGPUs()
+ for gpu in all_gpus:
+ usage += float(gpu.load)
+
+ usage /= len(all_gpus)
+ return usage * 100.0
+
+
+def get_single_gpu_usage(sig_figs: int = 1, highlight: Optional[str] = None):
+ all_gpus = GPUtil.getGPUs()
+ items = []
+ for i, gpu in enumerate(all_gpus):
+ gpu_load = f"{round(gpu.load * 100, sig_figs)}%"
+ memory_used = get_size_str(
+ gpu.memoryUsed, sig_figs=1, input_unit="MB", output_unit="GB"
+ )
+ memory_total = get_size_str(
+ gpu.memoryTotal, sig_figs=1, input_unit="MB", output_unit="GB"
+ )
+
+ if highlight is not None:
+ gpu_load = f"**{gpu_load} **"
+ memory_used = f"**{memory_used} **"
+ memory_total = f"**{memory_total} **"
+
+ items.append(
+ ui.text(
+ f"GPU #{i + 1} - current utilization: {gpu_load} - "
+ f"VRAM usage: {memory_used} / {memory_total} - {gpu.name}"
+ )
+ )
+ return items
+
+
+def copy_config(cfg: DefaultConfigProblemBase, q: Q) -> Any:
+ """Makes a copy of the config
+
+ Args:
+ cfg: DefaultConfigProblemBase config object
+ Returns:
+ copy of the config
+ """
+ # make unique yaml file using uuid
+ os.makedirs(get_output_dir(q), exist_ok=True)
+ tmp_file = os.path.join(f"{get_output_dir(q)}/", str(uuid.uuid4()) + ".yaml")
+ save_config_yaml(tmp_file, cfg)
+ cfg = load_config_yaml(tmp_file)
+ os.remove(tmp_file)
+ return cfg
+
+
+def make_label(title: str, appendix: str = "") -> str:
+ """Cleans a label
+
+ Args:
+ title: title to clean
+ appendix: optional appendix
+
+ Returns:
+ Cleaned label
+
+ """
+ label = " ".join(w.capitalize() for w in title.split("_")) + appendix
+ label = label.replace("Llm", "LLM")
+ return label
+
+
+def get_cfg_list_items(cfg: DefaultConfigProblemBase) -> List:
+ items = parse_cfg_dataclass(cfg)
+ x = []
+ for item in items:
+ for k, v in item.items():
+ x.append(ui.stat_list_item(label=make_label(k), value=str(v)))
+ return x
+
+
+# https://stackoverflow.com/questions/2059482/temporarily-modify-the-current-processs-environment
+@contextlib.contextmanager
+def set_env(**environ):
+ """
+ Temporarily set the process environment variables.
+
+ >>> with set_env(PLUGINS_DIR='test/plugins'):
+ ... "PLUGINS_DIR" in os.environ
+ True
+
+ >>> "PLUGINS_DIR" in os.environ
+ False
+
+ :type environ: dict[str, unicode]
+ :param environ: Environment variables to set
+ """
+ old_environ = dict(os.environ)
+ os.environ.update(environ)
+ try:
+ yield
+ finally:
+ os.environ.clear()
+ os.environ.update(old_environ)
+
+
+def hf_repo_friendly_name(name: str) -> str:
+ """
+ Converts the given string into a huggingface-repository-friendly name.
+
+ • Repo id must use alphanumeric chars or '-', '_', and '.' allowed.
+ • '--' and '..' are forbidden
+ • '-' and '.' cannot start or end the name
+ • max length is 96
+ """
+ name = re.sub("[^0-9a-zA-Z]+", "-", name)
+ name = name[1:] if name.startswith("-") else name
+ name = name[:-1] if name.endswith("-") else name
+ name = name[:96]
+ return name
+
+
+def save_hf_yaml(
+ path: str, account_name: str, model_name: str, repo_id: Optional[str] = None
+):
+ with open(path, "w") as fp:
+ yaml.dump(
+ {
+ "account_name": account_name,
+ "model_name": model_name,
+ "repo_id": repo_id if repo_id else f"{account_name}/{model_name}",
+ },
+ fp,
+ indent=4,
+ )
diff --git a/llm_studio/app_utils/wave_utils.py b/llm_studio/app_utils/wave_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b26006db80610955c60a1a5e65f89128b51896c8
--- /dev/null
+++ b/llm_studio/app_utils/wave_utils.py
@@ -0,0 +1,365 @@
+import subprocess
+import sys
+import traceback
+from typing import Literal, TypedDict
+
+import pandas as pd
+from h2o_wave import Q, expando_to_dict, ui
+from h2o_wave.types import Component
+
+from llm_studio.app_utils.sections.common import clean_dashboard
+
+from .config import default_cfg
+
+
+class ThemeColors(TypedDict):
+ primary: str
+ background_color: str
+
+
+class WaveTheme:
+ _theme_colors: ThemeColors = {
+ "primary": "#FEC925",
+ "background_color": "#121212",
+ }
+
+ states = {
+ "zombie": "#E0E0E0",
+ "queued": "#B8B8B8",
+ "running": "#FFE52B",
+ "finished": "#92E95A",
+ "failed": "#DA0000",
+ "stopped": "#DA0000",
+ }
+ color = "#2196F3"
+ color_range = "#2196F3 #CC7722 #2CA02C #D62728 #9467BD #17BECF #E377C2 #DDAA22"
+
+ def __repr__(self) -> str:
+ return "WaveTheme"
+
+ def get_primary_color(self, q: Q):
+ primary_color = self._theme_colors["primary"]
+ return primary_color
+
+ def get_background_color(self, q: Q):
+ background_color = self._theme_colors["background_color"]
+ return background_color
+
+
+wave_theme = WaveTheme()
+
+
+def ui_table_from_df(
+ q: Q,
+ df: pd.DataFrame,
+ name: str,
+ sortables: list = None,
+ filterables: list = None,
+ searchables: list = None,
+ markdown_cells: list = None,
+ numerics: list = None,
+ times: list = None,
+ tags: list = None,
+ progresses: list = None,
+ min_widths: dict = None,
+ max_widths: dict = None,
+ link_col: str = None,
+ multiple: bool = False,
+ groupable: bool = False,
+ downloadable: bool = False,
+ resettable: bool = False,
+ height: str = None,
+ checkbox_visibility: str = None,
+ actions: dict = None,
+ max_char_length: int = 500,
+ cell_overflow: Literal["tooltip", "wrap"] = "tooltip",
+) -> Component:
+ """
+ Convert a Pandas dataframe into Wave ui.table format.
+ """
+
+ df = df.reset_index(drop=True)
+ sortables = sortables or []
+ filterables = filterables or []
+ searchables = searchables or []
+ numerics = numerics or []
+ times = times or []
+ tags = tags or []
+ progresses = progresses or []
+ markdown_cells = markdown_cells or []
+ min_widths = min_widths or {}
+ max_widths = max_widths or {}
+
+ if numerics == []:
+ numerics = df.select_dtypes(include=["number"]).columns.tolist()
+
+ cell_types = {}
+ for col in tags:
+ cell_types[col] = ui.tag_table_cell_type(
+ name="tags",
+ tags=[
+ ui.tag(label=state, color=wave_theme.states[state])
+ for state in wave_theme.states
+ ],
+ )
+ for col in progresses:
+ cell_types[col] = ui.progress_table_cell_type(
+ wave_theme.get_primary_color(q),
+ )
+ for col in markdown_cells:
+ # enables rendering of code in wave table
+ cell_types[col] = ui.markdown_table_cell_type()
+
+ columns = [
+ ui.table_column(
+ name=str(col),
+ label=str(col),
+ sortable=True if col in sortables else False,
+ filterable=True if col in filterables else False,
+ searchable=True if col in searchables else False,
+ data_type=(
+ "number" if col in numerics else ("time" if col in times else "string")
+ ),
+ cell_type=cell_types[col] if col in cell_types else None,
+ min_width=min_widths[col] if col in min_widths else None,
+ max_width=max_widths[col] if col in max_widths else None,
+ link=True if col == link_col else False,
+ cell_overflow=cell_overflow,
+ )
+ for col in df.columns.values
+ ]
+
+ if actions:
+ commands = [ui.command(name=key, label=val) for key, val in actions.items()]
+ action_column = ui.table_column(
+ name="actions",
+ label="action" if int(min_widths["actions"]) > 30 else "",
+ cell_type=ui.menu_table_cell_type(name="commands", commands=commands),
+ min_width=min_widths["actions"],
+ )
+ columns.append(action_column)
+
+ rows = []
+ for i, row in df.iterrows():
+ cells = []
+
+ for cell in row:
+ str_repr = str(cell)
+
+ if len(str_repr) >= max_char_length:
+ str_repr = str_repr[:max_char_length] + "..."
+
+ cells.append(str_repr)
+
+ rows.append(ui.table_row(name=str(i), cells=cells))
+
+ table = ui.table(
+ name=name,
+ columns=columns,
+ rows=rows,
+ multiple=multiple,
+ groupable=groupable,
+ downloadable=downloadable,
+ resettable=resettable,
+ height=height,
+ checkbox_visibility=checkbox_visibility,
+ )
+
+ return table
+
+
+def wave_utils_error_card(
+ q: Q,
+ box: str,
+ app_name: str,
+ github: str,
+ q_app: dict,
+ error: Exception,
+ q_user: dict,
+ q_client: dict,
+ q_events: dict,
+ q_args: dict,
+) -> ui.FormCard:
+ """
+ Card for handling crash.
+ """
+
+ q_app_str = (
+ "### q.app\n```"
+ + "\n".join(
+ [
+ f"{k}: {v}"
+ for k, v in q_app.items()
+ if "_key" not in k and "_token not in k"
+ ]
+ )
+ + "\n```"
+ )
+ q_user_str = (
+ "### q.user\n```"
+ + "\n".join(
+ [
+ f"{k}: {v}"
+ for k, v in q_user.items()
+ if "_key" not in k and "_token" not in k
+ ]
+ )
+ + "\n```"
+ )
+
+ q_client_str = (
+ "### q.client\n```"
+ + "\n".join(
+ [
+ f"{k}: {v}"
+ for k, v in q_client.items()
+ if "_key" not in k and "_token" not in k
+ ]
+ )
+ + "\n```"
+ )
+ q_events_str = (
+ "### q.events\n```"
+ + "\n".join(
+ [
+ f"{k}: {v}"
+ for k, v in q_events.items()
+ if "_key" not in k and "_token" not in k
+ ]
+ )
+ + "\n```"
+ )
+ q_args_str = (
+ "### q.args\n```"
+ + "\n".join(
+ [
+ f"{k}: {v}"
+ for k, v in q_args.items()
+ if "_key" not in k and "_token" not in k
+ ]
+ )
+ + "\n```"
+ )
+
+ type_, value_, traceback_ = sys.exc_info()
+ stack_trace = traceback.format_exception(type_, value_, traceback_)
+ git_version = subprocess.getoutput("git rev-parse HEAD")
+ if not q.app.wave_utils_stack_trace_str:
+ q.app.wave_utils_stack_trace_str = "### stacktrace\n" + "\n".join(stack_trace)
+
+ card = ui.form_card(
+ box=box,
+ items=[
+ ui.stats(
+ items=[
+ ui.stat(
+ label="",
+ value="Oops!",
+ caption="Something went wrong",
+ icon="Error",
+ icon_color="#CDDD38",
+ )
+ ],
+ justify="center",
+ ),
+ ui.separator(),
+ ui.text_l(content="Apologies for the inconvenience! "),
+ ui.buttons(
+ items=[
+ ui.button(name="home", label="Restart", primary=True),
+ ui.button(name="report_error", label="Report", primary=True),
+ ],
+ justify="center",
+ ),
+ ui.separator(visible=False),
+ ui.text(
+ content=f"""
+ To report this error,
+ please open an issues on Github {github}
+ with the details below: """,
+ visible=False,
+ ),
+ ui.text_l(content=f"Report Issue: {app_name}", visible=False),
+ ui.text_xs(content=q_app_str, visible=False),
+ ui.text_xs(content=q_user_str, visible=False),
+ ui.text_xs(content=q_client_str, visible=False),
+ ui.text_xs(content=q_events_str, visible=False),
+ ui.text_xs(content=q_args_str, visible=False),
+ ui.text_xs(content=q.app.wave_utils_stack_trace_str, visible=False),
+ ui.text_xs(content=f"### Error\n {error}", visible=False),
+ ui.text_xs(content=f"### Git Version\n {git_version}", visible=False),
+ ],
+ )
+
+ return card
+
+
+async def wave_utils_handle_error(q: Q, error: Exception):
+ """
+ Handle any app error.
+ """
+
+ await clean_dashboard(q, mode="error")
+
+ card_name = "wave_utils_error"
+
+ q.page[card_name] = wave_utils_error_card(
+ q,
+ box="content",
+ error=error,
+ app_name=f"{default_cfg.name} at {default_cfg.url}",
+ github=default_cfg.github,
+ q_app=expando_to_dict(q.app),
+ q_user=expando_to_dict(q.user),
+ q_client=expando_to_dict(q.client),
+ q_events=expando_to_dict(q.events),
+ q_args=expando_to_dict(q.args),
+ )
+ q.client.delete_cards.add("wave_utils_error")
+
+ await q.page.save()
+
+
+async def report_error(q: Q):
+ """
+ Report error details.
+ """
+ card_name = "wave_utils_error"
+ # Show card again. Required since card can be cleared
+ await wave_utils_handle_error(
+ q,
+ error=q.app.wave_utils_error_str,
+ )
+
+ q.page[card_name].items[4].separator.visible = True
+ q.page[card_name].items[5].text.visible = True
+ q.page[card_name].items[6].text_l.visible = True
+ q.page[card_name].items[7].text_xs.visible = True
+ q.page[card_name].items[8].text_xs.visible = True
+ q.page[card_name].items[9].text_xs.visible = True
+ q.page[card_name].items[10].text_xs.visible = True
+ q.page[card_name].items[11].text_xs.visible = True
+ q.page[card_name].items[12].text_xs.visible = True
+ q.page[card_name].items[13].text_xs.visible = True
+ q.page[card_name].items[14].text_xs.visible = True
+
+ await q.page.save()
+
+
+async def busy_dialog(
+ q: Q, title: str = "", text: str = "", force_wait: bool = False
+) -> None:
+ """Creates busy dialog"""
+
+ q.page["meta"].dialog = ui.dialog(
+ title=title,
+ primary=True,
+ items=[
+ ui.progress(label=text),
+ ],
+ blocking=True,
+ )
+ await q.page.save()
+ if force_wait:
+ await q.sleep(1)
+ q.page["meta"].dialog = None
diff --git a/llm_studio/download_default_datasets.py b/llm_studio/download_default_datasets.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b1ed5d7c82c58747451ccd4871c8ff31415fa07
--- /dev/null
+++ b/llm_studio/download_default_datasets.py
@@ -0,0 +1,44 @@
+import os
+
+import pandas as pd
+from datasets import load_dataset
+
+
+def download_default_datasets_to_local_folder() -> None:
+ """
+ Downloads the default datasets to a local folder.
+
+ The temporary folder is given by the ENV var H2O_LLM_STUDIO_DEMO_DATASETS.
+ If the ENV var is not set, this function will raise an error.
+ The datasets are transformed to parquet format and saved in the folder.
+ """
+
+ path = os.environ.get("H2O_LLM_STUDIO_DEMO_DATASETS")
+ if path is None:
+ raise ValueError("H2O_LLM_STUDIO_DEMO_DATASETS is not set.")
+
+ if not os.path.exists(path):
+ os.makedirs(path, exist_ok=True)
+
+ # Prepare Causal Language Modeling Dataset
+ ds = load_dataset("OpenAssistant/oasst2")
+ train = ds["train"].to_pandas()
+ val = ds["validation"].to_pandas()
+ df = pd.concat([train, val], axis=0).reset_index(drop=True)
+ df.to_parquet(os.path.join(path, "causal_language_modeling.pq"), index=False)
+
+ # Prepare DPO Modeling Dataset
+ df = load_dataset("Intel/orca_dpo_pairs")["train"].to_pandas()
+ df.to_parquet(os.path.join(path, "dpo_modeling.pq"), index=False)
+
+ # Prepare Classification Modeling Dataset
+ df = load_dataset("stanfordnlp/imdb")["train"].to_pandas()
+ df.to_parquet(os.path.join(path, "classification_modeling.pq"), index=False)
+
+ # Prepare Regression Modeling Dataset
+ df = load_dataset("nvidia/HelpSteer2")["train"].to_pandas()
+ df.to_parquet(os.path.join(path, "regression_modeling.pq"), index=False)
+
+
+if __name__ == "__main__":
+ download_default_datasets_to_local_folder()
diff --git a/llm_studio/prompt.py b/llm_studio/prompt.py
new file mode 100644
index 0000000000000000000000000000000000000000..8102dcbb447aff1e19ab1b549cc70b9f5240221b
--- /dev/null
+++ b/llm_studio/prompt.py
@@ -0,0 +1,153 @@
+import os
+
+from llm_studio.src.utils.config_utils import load_config_yaml
+
+os.environ["TOKENIZERS_PARALLELISM"] = "false"
+os.environ["OMP_NUM_THREADS"] = "1"
+os.environ["MKL_NUM_THREADS"] = "1"
+os.environ["OPENBLAS_NUM_THREADS"] = "1"
+os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
+os.environ["NUMEXPR_NUM_THREADS"] = "1"
+os.environ["TOKENIZERS_PARALLELISM"] = "false"
+
+
+import argparse
+
+import numpy as np
+import torch
+
+from llm_studio.src.datasets.text_utils import get_tokenizer
+from llm_studio.src.utils.modeling_utils import load_checkpoint, set_generation_config
+
+
+def parse_param(cfg, prompt):
+ prompt = prompt.replace("--", "")
+ parts = prompt.split(" ")
+ args = [" ".join(parts[i : i + 2]) for i in range(0, len(parts), 2)]
+ for arg in args:
+ splitted_arg = arg.split(" ")
+ setattr(
+ cfg.prediction,
+ splitted_arg[0],
+ type(getattr(cfg.prediction, splitted_arg[0]))(splitted_arg[1]),
+ )
+ print(
+ f"Permanently changed {splitted_arg[0]} to",
+ getattr(cfg.prediction, splitted_arg[0]),
+ )
+ return cfg
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Sample prompting.")
+ parser.add_argument(
+ "-e",
+ "--experiment",
+ type=str,
+ required=True,
+ help="Name of the experiment output folder",
+ )
+ parser.add_argument(
+ "-d", "--device", type=str, required=False, default="cuda:0", help="Device"
+ )
+
+ args, unknown = parser.parse_known_args()
+ DEVICE = args.device
+
+ cfg = load_config_yaml(os.path.join(args.experiment, "cfg.yaml"))
+
+ cfg.training.epochs = 0
+
+ cfg.environment._device = DEVICE
+ cfg.environment._local_rank = 0
+
+ cfg.tokenizer.padding_quantile = 0
+
+ cfg.environment.mixed_precision = True
+ cfg.architecture.gradient_checkpointing = False
+ cfg.architecture.pretrained = False
+
+ cfg.prediction.max_length_inference = 256
+
+ if cfg.dataset.text_prompt_start == "":
+ cfg.dataset.text_prompt_start = "\n"
+
+ # cfg.prediction.min_length_inference = 2
+ # cfg.prediction.max_length_inference = 256
+ # cfg.prediction.repetition_penalty = 1.5
+ # cfg.prediction.temperature = 0.3
+ # cfg.prediction.num_beams = 2
+ # cfg.prediction.do_sample = False
+ # cfg.prediction.top_p = 0.9
+ # cfg.prediction.top_k = 40
+
+ tokenizer = get_tokenizer(cfg)
+
+ print("Loading model weights...")
+
+ with torch.device(DEVICE):
+ model = cfg.architecture.model_class(cfg)
+ cfg.architecture.pretrained_weights = os.path.join(
+ args.experiment, "checkpoint.pth"
+ )
+ load_checkpoint(cfg, model, strict=True)
+
+ model = model.to(DEVICE).eval()
+ model.backbone.use_cache = True
+ model.backbone = set_generation_config(model.backbone, cfg.prediction)
+
+ print()
+ print("=============")
+ print(
+ "You can change inference parameters on the fly by typing --param value, "
+ "such as --num_beams 4. You can also chain them such as --num_beams 4 "
+ "--top_k 30."
+ )
+ print()
+
+ while True:
+ prompt = input("Please enter some prompt (type 'exit' to stop): ")
+
+ try:
+ if prompt.lower() == "exit":
+ break
+
+ if prompt.lower().startswith("--"):
+ cfg = parse_param(cfg, prompt)
+ model.backbone = set_generation_config(model.backbone, cfg.prediction)
+ continue
+
+ prompt = cfg.dataset.dataset_class.parse_prompt(cfg, prompt)
+
+ print(prompt)
+
+ inputs = cfg.dataset.dataset_class.encode(
+ tokenizer, prompt, cfg.tokenizer.max_length, "left"
+ )
+ inputs["prompt_input_ids"] = inputs.pop("input_ids").unsqueeze(0).to(DEVICE)
+ inputs["prompt_attention_mask"] = (
+ inputs.pop("attention_mask").unsqueeze(0).to(DEVICE)
+ )
+
+ output = {}
+ with torch.no_grad():
+ with torch.cuda.amp.autocast():
+ output["predicted_answer_ids"] = (
+ model.generate(inputs, cfg).detach().cpu()
+ )
+
+ predicted_text = [
+ tokenizer.decode(ids, skip_special_tokens=True)
+ for ids in output["predicted_answer_ids"]
+ ]
+ output["predicted_text"] = np.array(predicted_text)
+
+ output = cfg.dataset.dataset_class.clean_output(output, cfg)
+
+ output = output["predicted_text"][0]
+
+ print(output)
+ print()
+ except Exception as e:
+ print("Error: {}".format(e))
+ print("Something went wrong, please try again.")
diff --git a/llm_studio/publish_to_hugging_face.py b/llm_studio/publish_to_hugging_face.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fc8ccc55434fa106e122011db4b9bd2aa504d23
--- /dev/null
+++ b/llm_studio/publish_to_hugging_face.py
@@ -0,0 +1,90 @@
+import argparse
+import logging
+import os
+import sys
+
+from huggingface_hub.constants import _is_true
+
+sys.path.append(os.path.dirname(os.path.dirname(__file__)))
+
+from llm_studio.app_utils.hugging_face_utils import publish_model_to_hugging_face
+from llm_studio.app_utils.utils import hf_repo_friendly_name
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="")
+ parser.add_argument(
+ "-p",
+ "--path_to_experiment",
+ required=True,
+ help="Path to the experiment data",
+ default=argparse.SUPPRESS,
+ )
+
+ parser.add_argument(
+ "-d",
+ "--device",
+ required=False,
+ help="'cpu' or 'cuda:0', if the GPU device id is 0",
+ default="cuda:0",
+ )
+
+ parser.add_argument(
+ "-a",
+ "--api_key",
+ required=False,
+ help="Hugging Face API Key",
+ default=argparse.SUPPRESS,
+ )
+
+ parser.add_argument(
+ "-u",
+ "--user_id",
+ required=False,
+ help="Hugging Face User ID",
+ default=argparse.SUPPRESS,
+ )
+
+ parser.add_argument(
+ "-m",
+ "--model_name",
+ required=False,
+ help="Hugging Face Model Name",
+ default=argparse.SUPPRESS,
+ )
+
+ parser.add_argument(
+ "-s",
+ "--safe_serialization",
+ required=False,
+ help="A flag indicating whether safe serialization should be used.",
+ default=True,
+ )
+
+ parser_args, unknown = parser.parse_known_args(sys.argv)
+
+ path_to_experiment = parser_args.path_to_experiment
+ device = parser_args.device
+ safe_serialization = parser_args.safe_serialization
+
+ api_key = getattr(parser_args, "api_key", "")
+ user_id = getattr(parser_args, "user_id", "")
+ model_name = getattr(parser_args, "model_name", "")
+
+ # If the model_name argument is not provided,
+ # the function derives a model name from the last folder name
+ if model_name == "":
+ path_to_experiment = path_to_experiment.rstrip("/")
+ model_name = hf_repo_friendly_name(os.path.basename(path_to_experiment))
+
+ try:
+ publish_model_to_hugging_face(
+ path_to_experiment=path_to_experiment,
+ device=device,
+ api_key=api_key,
+ user_id=user_id,
+ model_name=model_name,
+ safe_serialization=safe_serialization,
+ hf_transfer=_is_true(os.getenv("HF_HUB_ENABLE_HF_TRANSFER", "1")),
+ )
+ except Exception:
+ logging.error("Exception occurred during the run:", exc_info=True)
diff --git a/llm_studio/python_configs/__init__.py b/llm_studio/python_configs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/python_configs/base.py b/llm_studio/python_configs/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f51d2c6488a75d507edb19b7769bf2c44abbe3a
--- /dev/null
+++ b/llm_studio/python_configs/base.py
@@ -0,0 +1,251 @@
+import dataclasses
+import logging
+from dataclasses import dataclass, fields
+from typing import Any, Dict, List, Optional, Sequence, Set, Tuple
+
+from llm_studio.src import possible_values
+from llm_studio.src.nesting import Dependency, Nesting
+from llm_studio.src.order import Order
+from llm_studio.src.tooltips import tooltips
+
+logger = logging.getLogger(__name__)
+
+
+def _get_bases_below_parent(cls: type, parent: type, bases=None) -> Set[type]:
+ if bases is None:
+ bases = set()
+
+ if parent not in cls.__bases__:
+ for base in cls.__bases__:
+ bases.update(_get_bases_below_parent(base, parent, bases))
+ else:
+ # don't support multiple inheritance when
+ # inherting directly from the parent
+ assert len(cls.__bases__) == 1
+
+ bases.add(cls)
+
+ return bases
+
+
+@dataclass
+class DefaultConfig:
+ """
+ Template for any configuration file
+ """
+
+ def __post_init__(self) -> None:
+ self._possible_values: Dict[str, Any] = {k: None for k in self.__dict__}
+ self._visibility: Dict[str, int] = {k: 0 for k in self.__dict__}
+ self._grid_search_values: Dict[str, Any] = {k: None for k in self.__dict__}
+ self._grid_search_iscustom: Dict[str, Any] = {k: None for k in self.__dict__}
+
+ # go up the class hierarchy until we are one below the `DefaultConfig`
+ bases = _get_bases_below_parent(self.__class__, DefaultConfig)
+
+ # there must be exactly one unique class up the class hierarchy
+ # which inherits directly from the `DefaultConfig`
+ assert len(bases) == 1
+ base = next(iter(bases))
+
+ # initialize the order to the fields this class has
+ self._order = Order([field.name for field in fields(base)])
+
+ # initialize nesting dependencies
+ self._nesting = Nesting()
+
+ def _get_possible_values(
+ self, field: str, value: Any, type_annotation: type, dataset_fn=None
+ ) -> Optional[Tuple[Optional[possible_values.Value], Any]]:
+ """
+ Returns a set of possible values for the field provided, and the current value.
+
+ Args:
+ field: the field
+ value: the preliminary value of the field.
+ type_annotation: Type Annotation of the field.
+ dataset_fn: A function returning a tuple (dataset, value). Will be called
+ if the possible values depend on the dataset.
+
+ Returns:
+ Possible values for the field, the current value.
+ """
+
+ poss_values = self._possible_values.get(field, None)
+
+ if isinstance(poss_values, possible_values.DatasetValue):
+ if dataset_fn is None:
+ raise ValueError(
+ f"{poss_values} needs a dataset to compute possible values!\n"
+ "`dataset_fn` must be provided."
+ )
+
+ dataset, value = dataset_fn(field, value)
+ poss_values, value = poss_values.get_value(
+ dataset=dataset, value=value, type_annotation=type_annotation
+ )
+ elif isinstance(poss_values, Sequence):
+ if all(isinstance(x, (float, int)) for x in poss_values):
+ poss_values = possible_values.Number(
+ min=poss_values[0], max=poss_values[1], step=poss_values[2]
+ )
+ elif all(isinstance(x, str) for x in poss_values):
+ poss_values = possible_values.String(tuple(poss_values))
+ else:
+ raise ValueError(
+ f"Could not interpret {poss_values} as any possible value class."
+ )
+
+ return poss_values, value
+
+ def _get_tooltips(self, field: str, predict: bool = False) -> Optional[str]:
+ """
+ Returns a tooltip for the field provided
+ """
+ return tooltips.get(f"experiments_{field}", None)
+
+ def _get_visibility(self, field: str) -> Optional[int]:
+ """Returns a visibility level for the field provided.
+ 0 -- visible in the Wave app
+ -1 -- not visible in the Wave App
+ -2 -- visible in Dataset Import, but not visible in Create Experiment
+ """
+
+ return self._visibility.get(field, None)
+
+ def _get_grid_search_values(self, field: str) -> Optional[Tuple]:
+ """Returns a Tuple of possible values for Grid Search."""
+
+ return self._grid_search_values.get(field, None)
+
+ def _get_grid_search_iscustom(self, field: str) -> Optional[Tuple]:
+ """Returns "True" if this param is customizable in grid search mode.
+
+ Returns False if not customizable.
+ """
+
+ return self._grid_search_iscustom.get(field, None)
+
+ def _get_nesting_triggers(self) -> Set[str]:
+ """Returns a Set of keys other elements are depending on"""
+
+ return self._nesting.triggers
+
+ def _get_nesting_dependencies(self, key: str) -> List[Dependency] | None:
+ """Returns a all dependencies for a given key"""
+
+ if key in self._nesting.dependencies:
+ dependencies = self._nesting.dependencies[key]
+ else:
+ dependencies = None
+ return dependencies
+
+ def _get_order(self, warn_if_unset=True) -> List[str]:
+ """
+ Returns the order in which to show the keys in the config.
+
+ Args:
+ warn_if_unset: Whether to log a warning if order is unset for multiple keys.
+
+ Returns:
+ A list of the same length and with same elements as `self.__dict__.keys()`.
+ """
+
+ keys = self.__dict__.keys()
+
+ ordered_keys = [key for key in self._order if key in keys]
+ unordered_keys = list(set(keys) - set(ordered_keys))
+
+ unordered_ui_keys = [
+ key
+ for key in unordered_keys
+ if not (key.startswith("_") or self._get_visibility(key) == -1)
+ ]
+
+ # warn if there is more than one key without order.
+ # one is not problematic since it will just always be last
+ if warn_if_unset and len(unordered_ui_keys) > 1:
+ logger.warning(f"No order set for keys: {unordered_ui_keys}.")
+
+ return ordered_keys + unordered_keys
+
+ @classmethod
+ def get_annotations(cls) -> Dict[str, Any]:
+ """Returns type annotations through all the Parent config classes"""
+
+ d: Dict[str, Any] = {}
+ for c in cls.mro()[::-1]:
+ try:
+ d.update(**c.__annotations__)
+ except AttributeError:
+ # object, at least, has no __annotations__ attribute.
+ pass
+ return d
+
+ @classmethod
+ def from_dict(cls, d: dict):
+ """Creates a config object from a dictionary"""
+ d_filtered = {k: v for k, v in d.items() if k in cls.get_annotations()}
+ if len(d) != len(d_filtered):
+ logger.warning(
+ f"Keys {set(d.keys()) - set(d_filtered.keys())} are not in the config."
+ )
+ return cls(**d_filtered) # mypy: ignore
+
+
+@dataclass
+class DefaultConfigProblemBase(DefaultConfig):
+ """
+ Base class for all problem configs.
+ Defines the interface for all problem configs.
+ """
+
+ experiment_name: str
+ output_directory: str
+ llm_backbone: str
+
+ dataset: Any
+ tokenizer: Any
+ architecture: Any
+ training: Any
+ augmentation: Any
+ prediction: Any
+ environment: Any
+ logging: Any
+
+ @property
+ def problem_type(self) -> str:
+ """
+ Parse problem_type from config filename,
+ for example: text_causal_language_modeling_config.py -> causal_language_modeling
+ """
+ return type(self).__dict__["__module__"].split(".")[-1].replace("_config", "")
+
+ @classmethod
+ def from_dict(cls, cfg_dict: dict):
+ class_fields = {f.name: f for f in dataclasses.fields(cls)}
+
+ # Prepare arguments for creating a new dataclass instance
+ init_args = {}
+ for field_name, field_obj in class_fields.items():
+ if hasattr(field_obj.type, "from_dict"):
+ attr_value = cfg_dict.get(field_name, {})
+ init_args[field_name] = field_obj.type.from_dict(attr_value)
+ else:
+ # Use the value from cfg_dict,
+ # or the field's default value if not available in cfg_dict
+ init_args[field_name] = cfg_dict.get(field_name, field_obj.default)
+
+ return cls(**init_args)
+
+ def check(self) -> Dict[str, List]:
+ """
+ Checks for errors (incompatible settings) for the specific problem type.
+ Returns:
+ A dictionary with three keys:
+ - "title": A list of error titles.
+ - "message": A list of error messages.
+ - "type": A list of error types, can be "error", "warning", "deprecated"
+ """
+ errors: Dict[str, List] = {"title": [], "message": [], "type": []}
+ return errors
diff --git a/llm_studio/python_configs/cfg_checks.py b/llm_studio/python_configs/cfg_checks.py
new file mode 100644
index 0000000000000000000000000000000000000000..24ade406fea454ccbe4f5ac380e07e512376b75e
--- /dev/null
+++ b/llm_studio/python_configs/cfg_checks.py
@@ -0,0 +1,111 @@
+import logging
+import os
+from typing import Dict, List
+
+import torch
+
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.src.utils.export_utils import get_size_str
+
+logger = logging.getLogger(__name__)
+
+
+def check_config_for_errors(cfg: DefaultConfigProblemBase) -> dict:
+ """
+ Checks the configuration for consistency.
+ Parameters:
+ - cfg (DefaultConfigProblemBase):
+ The config object to be checked.
+
+ Returns:
+ A dictionary with two keys:
+ - "title": A list of error titles.
+ - "message": A list of error messages.
+ """
+ errors = check_for_common_errors(cfg)
+ problem_type_errors = cfg.check()
+ errors["title"].extend(problem_type_errors["title"])
+ errors["message"].extend(problem_type_errors["message"])
+ errors["type"].extend(problem_type_errors["type"])
+ return errors
+
+
+def check_for_common_errors(cfg: DefaultConfigProblemBase) -> dict:
+ errors: Dict[str, List] = {"title": [], "message": [], "type": []}
+ if not len(cfg.environment.gpus) > 0:
+ errors["title"] += ["No GPU selected"]
+ errors["message"] += [
+ "Please select at least one GPU to start the experiment! "
+ ]
+ errors["type"].append("error")
+
+ if len(cfg.environment.gpus) > torch.cuda.device_count():
+ errors["title"] += ["More GPUs selected than available"]
+ errors["message"] += [
+ f"There are {cfg.environment.gpus} GPUs selected but only "
+ f"{torch.cuda.device_count()} GPUs available."
+ "This error can happen when you start from an experiment configuration "
+ "that was created on a different machine. Please deselect all GPUs and "
+ "select the GPUs you want to use again. "
+ ]
+ errors["type"].append("error")
+
+ stats = os.statvfs(".")
+ available_size = stats.f_frsize * stats.f_bavail
+ if available_size < default_cfg.min_experiment_disk_space:
+ errors["title"] += ["Not enough disk space."]
+ errors["message"] += [
+ f"Not enough disk space. Available space is {get_size_str(available_size)}."
+ f" Required space is "
+ f"{get_size_str(default_cfg.min_experiment_disk_space)}. "
+ "Experiment has not started. "
+ "Please ensure that you have enough disk space before "
+ "starting the experiment."
+ ]
+ errors["type"].append("error")
+
+ # see create_nlp_backbone
+ if (
+ cfg.architecture.backbone_dtype in ["int4", "int8"]
+ and not cfg.architecture.pretrained
+ ):
+ errors["title"] += ["Quantization without pretrained weights."]
+ errors["message"] += [
+ "Quantization is only supported for pretrained models. "
+ "Please enable pretrained model or disable quantization."
+ ]
+ errors["type"].append("error")
+
+ if (
+ not cfg.training.lora
+ and cfg.architecture.backbone_dtype not in ["bfloat16", "float32"]
+ and cfg.training.epochs > 0
+ ):
+ errors["title"] += [f"Pure {cfg.architecture.backbone_dtype} training."]
+ errors["message"] += [
+ f"When not using LORA, {cfg.architecture.backbone_dtype} training will "
+ "likely lead to unstable training. "
+ "Please use LORA or set Backbone Dtype to bfloat16 or float32."
+ ]
+ errors["type"].append("warning")
+
+ if cfg.environment.use_deepspeed and cfg.architecture.backbone_dtype in [
+ "int8",
+ "int4",
+ ]:
+ errors["title"] += ["Deepspeed does not support quantization."]
+ errors["message"] += [
+ "Deepspeed do not support backbone type "
+ f"{cfg.architecture.backbone_dtype}. "
+ "Please set backbone type to float16 or bfloat16 for using deepspeed."
+ ]
+ errors["type"].append("error")
+ if cfg.environment.use_deepspeed and len(cfg.environment.gpus) < 2:
+ errors["title"] += ["Deepspeed not supported for single GPU."]
+ errors["message"] += [
+ "Deepspeed does not support single GPU training. "
+ "Please select more than one GPU or disable deepspeed."
+ ]
+ errors["type"].append("error")
+ return errors
diff --git a/llm_studio/python_configs/text_causal_classification_modeling_config.py b/llm_studio/python_configs/text_causal_classification_modeling_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2f5b7fdca9b1c0d854d067da64072e9de40014d
--- /dev/null
+++ b/llm_studio/python_configs/text_causal_classification_modeling_config.py
@@ -0,0 +1,260 @@
+import os
+from dataclasses import dataclass, field
+from typing import Any, Dict, List, Tuple
+
+import llm_studio.src.datasets.text_causal_classification_ds
+import llm_studio.src.plots.text_causal_classification_modeling_plots
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.python_configs.base import DefaultConfig, DefaultConfigProblemBase
+from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigNLPAugmentation,
+ ConfigNLPCausalLMArchitecture,
+ ConfigNLPCausalLMDataset,
+ ConfigNLPCausalLMEnvironment,
+ ConfigNLPCausalLMLogging,
+ ConfigNLPCausalLMTokenizer,
+ ConfigNLPCausalLMTraining,
+)
+from llm_studio.src import possible_values
+from llm_studio.src.losses import text_causal_classification_modeling_losses
+from llm_studio.src.metrics import text_causal_classification_modeling_metrics
+from llm_studio.src.models import text_causal_classification_modeling_model
+from llm_studio.src.utils.modeling_utils import generate_experiment_name
+
+
+@dataclass
+class ConfigNLPCausalClassificationDataset(ConfigNLPCausalLMDataset):
+ dataset_class: Any = (
+ llm_studio.src.datasets.text_causal_classification_ds.CustomDataset
+ )
+ system_column: str = "None"
+ prompt_column: Tuple[str, ...] = ("instruction", "input")
+ answer_column: Tuple[str, ...] = ("label", "output") # type: ignore
+ num_classes: int = 1
+ parent_id_column: str = "None"
+
+ text_system_start: str = ""
+ text_prompt_start: str = ""
+ text_answer_separator: str = ""
+
+ add_prompt_answer_tokens: bool = False
+
+ add_eos_token_to_system: bool = False
+ add_eos_token_to_prompt: bool = False
+ add_eos_token_to_answer: bool = False
+
+ _allowed_file_extensions: Tuple[str, ...] = ("csv", "pq", "parquet")
+
+ def __post_init__(self):
+ self.prompt_column = (
+ tuple(
+ self.prompt_column,
+ )
+ if isinstance(self.prompt_column, str)
+ else tuple(self.prompt_column)
+ )
+ super().__post_init__()
+
+ self._possible_values["num_classes"] = (1, 100, 1)
+
+ self._visibility["system_column"] = -1
+ self._visibility["parent_id_column"] = -1
+ self._visibility["text_system_start"] = -1
+ self._visibility["add_prompt_answer_tokens"] = -1
+ self._visibility["add_eos_token_to_system"] = -1
+ self._visibility["add_eos_token_to_answer"] = -1
+ self._visibility["personalize"] = -1
+ self._visibility["chatbot_name"] = -1
+ self._visibility["chatbot_author"] = -1
+ self._visibility["mask_prompt_labels"] = -1
+ self._visibility["only_last_answer"] = -1
+
+
+@dataclass
+class ConfigNLPCausalClassificationTraining(ConfigNLPCausalLMTraining):
+ loss_class: Any = text_causal_classification_modeling_losses.Losses
+ loss_function: str = "BinaryCrossEntropyLoss"
+
+ learning_rate: float = 0.0001
+ differential_learning_rate_layers: Tuple[str, ...] = ("classification_head",)
+ differential_learning_rate: float = 0.00001
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._possible_values["loss_function"] = self.loss_class.names()
+
+ self._possible_values["differential_learning_rate_layers"] = (
+ possible_values.String(
+ values=("backbone", "embed", "classification_head"),
+ allow_custom=False,
+ placeholder="Select optional layers...",
+ )
+ )
+
+
+@dataclass
+class ConfigNLPCausalClassificationTokenizer(ConfigNLPCausalLMTokenizer):
+ max_length: int = 512
+
+ def __post_init__(self):
+ super().__post_init__()
+
+
+@dataclass
+class ConfigNLPCausalClassificationArchitecture(ConfigNLPCausalLMArchitecture):
+ model_class: Any = text_causal_classification_modeling_model.Model
+
+ def __post_init__(self):
+ super().__post_init__()
+
+
+@dataclass
+class ConfigNLPCausalClassificationAugmentation(ConfigNLPAugmentation):
+ skip_parent_probability: float = 0.0
+ random_parent_probability: float = 0.0
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._visibility["skip_parent_probability"] = -1
+ self._visibility["random_parent_probability"] = -1
+
+
+@dataclass
+class ConfigNLPCausalClassificationPrediction(DefaultConfig):
+ metric_class: Any = text_causal_classification_modeling_metrics.Metrics
+ metric: str = "AUC"
+ batch_size_inference: int = 0
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ self._possible_values["metric"] = self.metric_class.names()
+ self._possible_values["batch_size_inference"] = (0, 512, 1)
+
+ self._visibility["metric_class"] = -1
+
+
+@dataclass
+class ConfigNLPCausalClassificationEnvironment(ConfigNLPCausalLMEnvironment):
+ _model_card_template: str = "text_causal_classification_model_card_template.md"
+ _summary_card_template: str = (
+ "text_causal_classification_experiment_summary_card_template.md"
+ )
+
+ def __post_init__(self):
+ super().__post_init__()
+
+
+@dataclass
+class ConfigNLPCausalClassificationLogging(ConfigNLPCausalLMLogging):
+ plots_class: Any = (
+ llm_studio.src.plots.text_causal_classification_modeling_plots.Plots
+ )
+
+
+@dataclass
+class ConfigProblemBase(DefaultConfigProblemBase):
+ output_directory: str = f"output/{os.path.basename(__file__).split('.')[0]}"
+ experiment_name: str = field(default_factory=generate_experiment_name)
+ llm_backbone: str = (
+ "h2oai/h2o-danube3-500m-chat"
+ if "h2oai/h2o-danube3-500m-chat" in default_cfg.default_causal_language_models
+ else default_cfg.default_causal_language_models[0]
+ )
+
+ dataset: ConfigNLPCausalClassificationDataset = field(
+ default_factory=ConfigNLPCausalClassificationDataset
+ )
+ tokenizer: ConfigNLPCausalClassificationTokenizer = field(
+ default_factory=ConfigNLPCausalClassificationTokenizer
+ )
+ architecture: ConfigNLPCausalClassificationArchitecture = field(
+ default_factory=ConfigNLPCausalClassificationArchitecture
+ )
+ training: ConfigNLPCausalClassificationTraining = field(
+ default_factory=ConfigNLPCausalClassificationTraining
+ )
+ augmentation: ConfigNLPCausalClassificationAugmentation = field(
+ default_factory=ConfigNLPCausalClassificationAugmentation
+ )
+ prediction: ConfigNLPCausalClassificationPrediction = field(
+ default_factory=ConfigNLPCausalClassificationPrediction
+ )
+ environment: ConfigNLPCausalClassificationEnvironment = field(
+ default_factory=ConfigNLPCausalClassificationEnvironment
+ )
+ logging: ConfigNLPCausalClassificationLogging = field(
+ default_factory=ConfigNLPCausalClassificationLogging
+ )
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ self._visibility["output_directory"] = -1
+
+ self._possible_values["llm_backbone"] = possible_values.String(
+ values=default_cfg.default_causal_language_models,
+ allow_custom=True,
+ )
+
+ def check(self) -> Dict[str, List]:
+ errors: Dict[str, List] = {"title": [], "message": [], "type": []}
+
+ if isinstance(self.dataset.answer_column, str):
+ errors["title"].append("Invalid answer_column type")
+ errors["message"].append(
+ "Providing the answer_column as a string is deprecated. "
+ "Please provide the answer_column as a list."
+ )
+ errors["type"].append("deprecated")
+ self.dataset.answer_column = [self.dataset.answer_column]
+
+ if len(self.dataset.answer_column) > 1:
+ if self.training.loss_function == "CrossEntropyLoss":
+ errors["title"] += [
+ "CrossEntropyLoss not supported for multilabel classification"
+ ]
+ errors["message"] += [
+ "CrossEntropyLoss requires a single multi-class answer column, "
+ "but multiple answer columns are set."
+ ]
+ errors["type"].append("error")
+ if self.dataset.num_classes != len(self.dataset.answer_column):
+ errors["title"] += [
+ "Wrong number of classes for multilabel classification"
+ ]
+ error_msg = (
+ "Multilabel classification requires "
+ "num_classes == num_answer_columns, "
+ "but num_classes is set to {} and num_answer_columns is set to {}."
+ ).format(self.dataset.num_classes, len(self.dataset.answer_column))
+ errors["message"] += [error_msg]
+ errors["type"].append("error")
+ else:
+ if self.training.loss_function == "CrossEntropyLoss":
+ if self.dataset.num_classes == 1:
+ errors["title"] += ["CrossEntropyLoss requires num_classes > 1"]
+ errors["message"] += [
+ "CrossEntropyLoss requires num_classes > 1, "
+ "but num_classes is set to 1."
+ ]
+ errors["type"].append("error")
+ elif self.training.loss_function == "BinaryCrossEntropyLoss":
+ if self.dataset.num_classes != 1:
+ errors["title"] += [
+ "BinaryCrossEntropyLoss requires num_classes == 1"
+ ]
+ errors["message"] += [
+ "BinaryCrossEntropyLoss requires num_classes == 1, "
+ "but num_classes is set to {}.".format(self.dataset.num_classes)
+ ]
+ errors["type"].append("error")
+
+ if self.dataset.parent_id_column not in ["None", None]:
+ errors["title"] += ["Parent ID column is not supported for classification"]
+ errors["message"] += [
+ "Parent ID column is not supported for classification datasets."
+ ]
+ errors["type"].append("error")
+
+ return errors
diff --git a/llm_studio/python_configs/text_causal_language_modeling_config.py b/llm_studio/python_configs/text_causal_language_modeling_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..c276b805db0651bf1c335c07c9428f7bb7c37f3a
--- /dev/null
+++ b/llm_studio/python_configs/text_causal_language_modeling_config.py
@@ -0,0 +1,693 @@
+import logging
+import multiprocessing
+import os
+from dataclasses import dataclass, field
+from typing import Any, Dict, List, Tuple
+
+import torch
+
+import llm_studio.src.datasets.text_causal_language_modeling_ds
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.python_configs.base import DefaultConfig, DefaultConfigProblemBase
+from llm_studio.src import possible_values
+from llm_studio.src.augmentations.nlp_aug import BaseNLPAug
+from llm_studio.src.loggers import ExternalLoggers
+from llm_studio.src.losses import text_causal_language_modeling_losses
+from llm_studio.src.metrics import text_causal_language_modeling_metrics
+from llm_studio.src.models import text_causal_language_modeling_model
+from llm_studio.src.nesting import Dependency
+from llm_studio.src.optimizers import Optimizers
+from llm_studio.src.plots import text_causal_language_modeling_plots
+from llm_studio.src.schedulers import Schedulers
+from llm_studio.src.utils.data_utils import sanity_check
+from llm_studio.src.utils.modeling_utils import generate_experiment_name
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass
+class ConfigNLPCausalLMDataset(DefaultConfig):
+ dataset_class: Any = (
+ llm_studio.src.datasets.text_causal_language_modeling_ds.CustomDataset
+ )
+
+ personalize: bool = False
+ chatbot_name: str = "h2oGPT"
+ chatbot_author: str = "H2O.ai"
+
+ train_dataframe: str = "/path/to/train.csv"
+ validation_strategy: str = "automatic"
+ validation_dataframe: str = ""
+ validation_size: float = 0.01
+
+ data_sample: float = 1.0
+ data_sample_choice: Tuple[str, ...] = ("Train", "Validation")
+
+ system_column: str = "system"
+ prompt_column: Tuple[str, ...] = ("instruction", "input")
+ prompt_column_separator: str = "\\n\\n"
+ answer_column: str = "output"
+ parent_id_column: str = "parent_id"
+ id_column: str = "id"
+
+ text_system_start: str = "<|system|>"
+ text_prompt_start: str = "<|prompt|>"
+ text_answer_separator: str = "<|answer|>"
+
+ add_eos_token_to_system: bool = True
+ add_eos_token_to_prompt: bool = True
+ add_eos_token_to_answer: bool = True
+ limit_chained_samples: bool = False
+ mask_prompt_labels: bool = True
+ only_last_answer: bool = False
+
+ _allowed_file_extensions: Tuple[str, ...] = ("csv", "pq", "parquet")
+
+ def __post_init__(self):
+ self.prompt_column = (
+ tuple(
+ self.prompt_column,
+ )
+ if isinstance(self.prompt_column, str)
+ else tuple(self.prompt_column)
+ )
+ super().__post_init__()
+
+ self._possible_values["train_dataframe"] = possible_values.Files(
+ prefer_with=lambda path: "train" in path
+ )
+ self._possible_values["validation_strategy"] = possible_values.String(
+ values=(
+ ("custom", "Custom holdout validation"),
+ ("automatic", "Automatic holdout validation"),
+ ),
+ allow_custom=False,
+ )
+ self._possible_values["validation_dataframe"] = possible_values.Files(
+ add_none=True, prefer_with=lambda path: "val" in path
+ )
+ self._possible_values["validation_size"] = (0.01, 0.95, 0.01)
+ self._possible_values["data_sample"] = (0.01, 1, 0.01)
+ self._possible_values["data_sample_choice"] = ["Train", "Validation"]
+ self._possible_values["system_column"] = possible_values.Columns(
+ prefer_with=lambda column: column in ("system",), add_none=True
+ )
+ self._possible_values["prompt_column"] = possible_values.Columns(
+ prefer_with=lambda column: column
+ in ("instruction", "prompt", "question", "input", "user")
+ )
+ self._possible_values["answer_column"] = possible_values.Columns(
+ prefer_with=lambda column: column
+ in ("answer", "output", "response", "assistant", "chosen")
+ )
+ self._possible_values["parent_id_column"] = possible_values.Columns(
+ prefer_with=lambda column: column in ("parent", "parent_id"), add_none=True
+ )
+
+ self._possible_values["id_column"] = possible_values.Columns(
+ prefer_with=lambda column: column in ("id", "ID", "index"), add_none=True
+ )
+
+ self._nesting.add(
+ ["chatbot_name", "chatbot_author"],
+ [Dependency(key="personalize", value=True, is_set=True)],
+ )
+
+ self._nesting.add(
+ ["validation_dataframe"],
+ [Dependency(key="validation_strategy", value="custom", is_set=True)],
+ )
+
+ self._nesting.add(
+ ["validation_size"],
+ [Dependency(key="validation_strategy", value="automatic", is_set=True)],
+ )
+
+ self._nesting.add(
+ ["data_sample_choice"],
+ [Dependency(key="data_sample", value=1, is_set=False)],
+ )
+
+ self._nesting.add(
+ ["limit_chained_samples"],
+ [Dependency(key="parent_id_column", value="None", is_set=False)],
+ )
+
+ self._nesting.add(
+ ["id_column"],
+ [Dependency(key="parent_id_column", value="None", is_set=False)],
+ )
+
+ self._nesting.add(
+ ["text_system_start", "add_eos_token_to_system"],
+ [Dependency(key="system_column", value="None", is_set=False)],
+ )
+
+ self._nesting.add(
+ ["only_last_answer"],
+ [
+ Dependency(key="parent_id_column", value="None", is_set=False),
+ Dependency(key="mask_prompt_labels", value=True, is_set=True),
+ ],
+ )
+
+ self._visibility["dataset_class"] = -1
+
+
+@dataclass
+class ConfigNLPCausalLMTraining(DefaultConfig):
+ loss_class: Any = text_causal_language_modeling_losses.Losses
+ loss_function: str = "TokenAveragedCrossEntropy"
+ optimizer: str = "AdamW"
+
+ learning_rate: float = 0.0001
+ differential_learning_rate_layers: Tuple[str, ...] = ()
+ differential_learning_rate: float = 0.00001
+ freeze_layers: Tuple[str, ...] = ()
+
+ attention_implementation: str = "auto"
+ batch_size: int = 2
+ drop_last_batch: bool = True
+ epochs: int = 1
+ schedule: str = "Cosine"
+ min_learning_rate_ratio: float = 0.0
+ warmup_epochs: float = 0.0
+
+ weight_decay: float = 0.0
+ gradient_clip: float = 0.0
+ grad_accumulation: int = 1
+
+ lora: bool = True
+ use_dora: bool = False
+ lora_r: int = 4
+ lora_alpha: int = 16
+ lora_dropout: float = 0.05
+ use_rslora: bool = False
+ lora_target_modules: str = ""
+ lora_unfreeze_layers: Tuple[str, ...] = ()
+
+ save_checkpoint: str = "last"
+ evaluation_epochs: float = 1.0
+ evaluate_before_training: bool = False
+ train_validation_data: bool = False
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._possible_values["loss_function"] = self.loss_class.names()
+ self._possible_values["optimizer"] = Optimizers.names()
+
+ self._possible_values["learning_rate"] = possible_values.Number(
+ step=1e-9, min=1e-9
+ )
+ self._possible_values["differential_learning_rate_layers"] = (
+ possible_values.String(
+ values=("backbone", "embed", "head"),
+ allow_custom=True,
+ placeholder="Select optional layers...",
+ )
+ )
+ self._possible_values["differential_learning_rate"] = self._possible_values[
+ "learning_rate"
+ ]
+ self._possible_values["freeze_layers"] = possible_values.String(
+ values=("embed", "layer", "head"),
+ allow_custom=True,
+ placeholder="Select optional layers to freeze...",
+ )
+ self._possible_values["attention_implementation"] = possible_values.String(
+ values=(
+ ("auto", "Auto"),
+ ("eager", "Eager"),
+ ("flash_attention_2", "Flash Attention 2"),
+ ("sdpa", "SDPA"),
+ ),
+ allow_custom=False,
+ )
+
+ self._possible_values["batch_size"] = (1, 256, 1)
+ self._possible_values["epochs"] = (0, 10, 1)
+ self._possible_values["schedule"] = Schedulers.names()
+ self._possible_values["min_learning_rate_ratio"] = (0.0, 0.1, 0.0001)
+ self._possible_values["warmup_epochs"] = (0.0, 5.0, 0.05)
+
+ self._possible_values["weight_decay"] = possible_values.Number(step=1e-5, min=0)
+ self._possible_values["gradient_clip"] = (0.0, 10.0, 0.1)
+ self._possible_values["grad_accumulation"] = (1, 8, 1)
+
+ self._possible_values["lora_r"] = (1, 256, 1)
+ self._possible_values["lora_alpha"] = (1, 256, 1)
+ self._possible_values["lora_dropout"] = (0.0, 0.5, 0.01)
+ self._possible_values["lora_unfreeze_layers"] = possible_values.String(
+ values=("embed", "head"),
+ allow_custom=True,
+ placeholder="Select optional layers to unfreeze...",
+ )
+
+ self._possible_values["save_checkpoint"] = possible_values.String(
+ values=(
+ ("last", "Last"),
+ ("best", "Best"),
+ ("each_evaluation_epoch", "Each evaluation epoch"),
+ ("disable", "Disable"),
+ ),
+ allow_custom=False,
+ )
+
+ self._possible_values["evaluation_epochs"] = (0.01, 1, 0.01)
+
+ self._grid_search_values["loss_function"] = self._possible_values[
+ "loss_function"
+ ]
+ self._grid_search_values["learning_rate"] = (
+ 0.000001,
+ 0.000005,
+ 0.00001,
+ 0.00005,
+ 0.0001,
+ 0.0003,
+ 0.0005,
+ )
+ self._grid_search_values["differential_learning_rate"] = (
+ 0.000001,
+ 0.000005,
+ 0.00001,
+ 0.00005,
+ 0.0001,
+ 0.0003,
+ 0.0005,
+ )
+ self._grid_search_values["weight_decay"] = (0.0, 0.01, 0.1, 0.2)
+ self._grid_search_values["warmup_epochs"] = (0.0, 0.25)
+ self._grid_search_values["gradient_clip"] = (0.0, 0.5, 1, 2, 4, 8)
+ self._grid_search_values["grad_accumulation"] = (1, 2, 4, 8, 16, 32)
+ self._grid_search_values["batch_size"] = (1, 2, 4, 8, 16, 32, 64)
+ self._grid_search_values["epochs"] = (1, 2, 4)
+ self._grid_search_values["lora_r"] = (2, 4, 8, 16, 32, 64, 128)
+ self._grid_search_values["lora_alpha"] = (4, 8, 16, 32, 64, 128, 256)
+
+ self._grid_search_iscustom["loss_function"] = False
+ self._grid_search_iscustom["learning_rate"] = True
+ self._grid_search_iscustom["differential_learning_rate"] = True
+ self._grid_search_iscustom["weight_decay"] = True
+ self._grid_search_iscustom["warmup_epochs"] = True
+ self._grid_search_iscustom["gradient_clip"] = True
+ self._grid_search_iscustom["grad_accumulation"] = True
+
+ self._visibility["loss_class"] = -1
+ self._visibility["drop_last_batch"] = -1
+ self._visibility["differential_learning_rate_layers"] = 1
+ self._visibility["differential_learning_rate"] = 1
+
+ self._nesting.add(
+ ["differential_learning_rate"],
+ [
+ Dependency(
+ key="differential_learning_rate_layers", value=None, is_set=False
+ )
+ ],
+ )
+ self._nesting.add(
+ ["freeze_layers"],
+ [Dependency(key="lora", value=False, is_set=True)],
+ )
+ self._nesting.add(
+ [
+ "use_dora",
+ "lora_r",
+ "lora_alpha",
+ "lora_dropout",
+ "use_rslora",
+ "lora_target_modules",
+ "lora_unfreeze_layers",
+ ],
+ [Dependency(key="lora", value=False, is_set=False)],
+ )
+ self._nesting.add(
+ ["min_learning_rate_ratio"],
+ [Dependency(key="schedule", value="Constant", is_set=False)],
+ )
+
+
+@dataclass
+class ConfigNLPCausalLMTokenizer(DefaultConfig):
+ max_length: int = 512
+ add_prompt_answer_tokens: bool = False
+ padding_quantile: float = 1.0
+ tokenizer_kwargs: str = '{"use_fast": true, "add_prefix_space": false}'
+ _tokenizer_mask_token_id: int = 0
+ _tokenizer_eos_token: str = ""
+ _vocab_length: int = 0
+ _stop_words_ids: List[int] = field(default_factory=list)
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._possible_values["max_length"] = (32, 1024 * 16, 32)
+ self._possible_values["padding_quantile"] = (0, 1, 0.01)
+
+ self._grid_search_values["max_length"] = (256, 512, 1024)
+
+ self._grid_search_iscustom["max_length"] = True
+
+ self._padding_side = "left"
+
+
+@dataclass
+class ConfigNLPCausalLMArchitecture(DefaultConfig):
+ model_class: Any = text_causal_language_modeling_model.Model
+ pretrained: bool = True
+
+ backbone_dtype: str = "int4"
+ gradient_checkpointing: bool = True
+ intermediate_dropout: float = 0
+ pretrained_weights: str = ""
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ self._possible_values["backbone_dtype"] = possible_values.String(
+ values=("float32", "bfloat16", "float16", "int8", "int4"),
+ allow_custom=False,
+ )
+ self._possible_values["intermediate_dropout"] = (0, 0.5, 0.05)
+
+ self._grid_search_values["intermediate_dropout"] = (0.0, 0.05, 0.1, 0.15)
+
+ self._grid_search_iscustom["intermediate_dropout"] = True
+
+ self._visibility["model_class"] = -1
+ self._visibility["pretrained"] = -1
+
+
+@dataclass
+class ConfigNLPAugmentation(DefaultConfig):
+ nlp_augmentations_class: Any = BaseNLPAug
+ token_mask_probability: float = 0.0
+ skip_parent_probability: float = 0.0
+ random_parent_probability: float = 0.0
+ neftune_noise_alpha: float = 0.0
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._possible_values["token_mask_probability"] = (0.0, 0.9, 0.05)
+ self._possible_values["skip_parent_probability"] = (0.0, 1.0, 0.05)
+ self._possible_values["random_parent_probability"] = (0.0, 1.0, 0.05)
+ self._possible_values["neftune_noise_alpha"] = (0.0, 15, 0.05)
+
+ self._grid_search_values["token_mask_probability"] = (0.0, 0.1, 0.2, 0.3)
+ self._grid_search_values["skip_parent_probability"] = (0.0, 0.1, 0.2, 0.3)
+ self._grid_search_values["random_parent_probability"] = (0.0, 0.1, 0.2, 0.3)
+ self._grid_search_values["neftune_noise_alpha"] = (0.0, 5, 10, 15)
+
+ self._grid_search_iscustom["token_mask_probability"] = True
+ self._grid_search_iscustom["skip_parent_probability"] = True
+ self._grid_search_iscustom["random_parent_probability"] = True
+ self._grid_search_iscustom["neftune_noise_alpha"] = True
+
+ self._visibility["nlp_augmentations_class"] = -1
+
+
+@dataclass
+class ConfigNLPCausalLMPrediction(DefaultConfig):
+ metric_class: Any = text_causal_language_modeling_metrics.Metrics
+ metric: str = "GPT"
+ metric_gpt_model: str = "gpt-3.5-turbo-0301"
+ metric_gpt_template: str = "general"
+
+ min_length_inference: int = 2
+ max_length_inference: int = 256
+ max_time: float = 0
+ batch_size_inference: int = 0
+
+ do_sample: bool = False
+ num_beams: int = 1
+ temperature: float = 0.0
+ repetition_penalty: float = 1.0
+ stop_tokens: str = ""
+ top_k: int = 0
+ top_p: float = 1.0
+
+ num_history: int = 4
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._possible_values["metric"] = self.metric_class.names()
+
+ self._possible_values["metric_gpt_model"] = possible_values.String(
+ values=(
+ "gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo-0613",
+ "gpt-4-0314",
+ "gpt-4-0613",
+ "gpt-4-1106-preview",
+ ),
+ allow_custom=True,
+ )
+ self._possible_values["metric_gpt_template"] = possible_values.String(
+ values=tuple(f.split(".")[0] for f in os.listdir("prompts"))
+ )
+
+ self._possible_values["batch_size_inference"] = (0, 512, 1)
+ self._possible_values["min_length_inference"] = (0, 1024, 1)
+ self._possible_values["max_length_inference"] = (1, 4096, 1)
+ self._possible_values["max_time"] = (0.0, 600.0, 1.0)
+
+ self._possible_values["num_beams"] = (1, 4, 1)
+ self._possible_values["temperature"] = (0, 10, 0.05)
+ self._possible_values["repetition_penalty"] = (1, 10, 0.025)
+ self._possible_values["top_k"] = (0, 100, 1)
+ self._possible_values["top_p"] = (0.5, 1, 0.05)
+ self._possible_values["num_history"] = (1, 50, 1)
+
+ self._visibility["metric_class"] = -1
+ # possible values for num_history are only used in chatbot tab
+ self._visibility["num_history"] = -1
+
+ self._nesting.add(
+ ["metric_gpt_model", "metric_gpt_template"],
+ [Dependency(key="metric", value="GPT", is_set=True)],
+ )
+
+
+@dataclass
+class ConfigNLPCausalLMEnvironment(DefaultConfig):
+ gpus: Tuple[str, ...] = tuple(str(x) for x in range(torch.cuda.device_count()))
+
+ mixed_precision: bool = True
+ mixed_precision_dtype: str = "bfloat16"
+
+ compile_model: bool = False
+ use_deepspeed: bool = False
+ deepspeed_method: str = "ZeRO2"
+ deepspeed_allgather_bucket_size: int = int(1e6)
+ deepspeed_reduce_bucket_size: int = int(1e6)
+ deepspeed_stage3_prefetch_bucket_size: int = int(1e6)
+ deepspeed_stage3_param_persistence_threshold: int = int(1e6)
+ # deepspeed_offload_optimizer: bool = False
+ # deepspeed_stage3_max_live_parameters: int = 1e9
+ # deepspeed_stage3_max_reuse_distance: int = 1e9
+
+ find_unused_parameters: bool = False
+ trust_remote_code: bool = True
+ huggingface_branch: str = "main"
+ number_of_workers: int = 4
+ seed: int = -1
+
+ _seed: int = 0 # internal seed set in train.py (equals seed if seed is not -1)
+ _distributed: bool = False
+ _distributed_inference: bool = True
+ _local_rank: int = 0
+ _world_size: int = 1
+ _curr_step: int = 0
+ _step_log_denominator: int = 1
+ _curr_val_step: int = 0
+ _rank: int = 0 # global rank
+ _device: str = "cuda"
+ _cpu_comm: Any = None
+ _model_card_template: str = "text_causal_language_modeling_model_card_template.md"
+ _summary_card_template: str = (
+ "text_causal_language_modeling_experiment_summary_card_template.md"
+ )
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._possible_values["gpus"] = possible_values.String(
+ values=tuple(
+ [(str(x), f"GPU #{x+1}") for x in range(torch.cuda.device_count())]
+ ),
+ allow_custom=False,
+ )
+
+ self._possible_values["mixed_precision_dtype"] = possible_values.String(
+ values=("bfloat16", "float16"),
+ allow_custom=False,
+ )
+
+ self._possible_values["number_of_workers"] = (1, multiprocessing.cpu_count(), 1)
+ self._possible_values["seed"] = possible_values.Number(step=1, min=-1)
+ self._possible_values["deepspeed_method"] = ["ZeRO2", "ZeRO3"]
+ self._possible_values["deepspeed_allgather_bucket_size"] = (
+ possible_values.Number(step=1, min=1e6)
+ )
+ self._possible_values["deepspeed_reduce_bucket_size"] = possible_values.Number(
+ step=1, min=1e6
+ )
+ self._possible_values["deepspeed_stage3_prefetch_bucket_size"] = (
+ possible_values.Number(step=1, min=1e6)
+ )
+ self._possible_values["deepspeed_stage3_param_persistence_threshold"] = (
+ possible_values.Number(step=1, min=1e6)
+ )
+ self._possible_values["deepspeed_stage3_max_live_parameters"] = (
+ possible_values.Number(step=1, min=1e6)
+ )
+ self._possible_values["deepspeed_stage3_max_reuse_distance"] = (
+ possible_values.Number(step=1, min=1e6)
+ )
+
+ self._nesting.add(
+ [
+ "mixed_precision_dtype",
+ ],
+ [Dependency(key="mixed_precision", value=True, is_set=True)],
+ )
+ self._nesting.add(
+ [
+ "deepspeed_method",
+ "deepspeed_reduce_bucket_size",
+ ],
+ [Dependency(key="use_deepspeed", value=True, is_set=True)],
+ )
+ self._nesting.add(
+ [
+ "deepspeed_allgather_bucket_size",
+ ],
+ [
+ Dependency(key="use_deepspeed", value=True, is_set=True),
+ Dependency(key="deepspeed_method", value="ZeRO2", is_set=True),
+ ],
+ )
+ self._nesting.add(
+ [
+ "deepspeed_stage3_prefetch_bucket_size",
+ "deepspeed_stage3_param_persistence_threshold",
+ # "deepspeed_offload_optimizer",
+ ],
+ [
+ Dependency(key="use_deepspeed", value=True, is_set=True),
+ Dependency(key="deepspeed_method", value="ZeRO3", is_set=True),
+ ],
+ )
+ # self._nesting.add(
+ # [
+ # "deepspeed_stage3_max_live_parameters",
+ # "deepspeed_stage3_max_reuse_distance",
+ # ],
+ # [Dependency(key="deepspeed_offload_optimizer", value=False, is_set=False)], # noqa: E501
+ # )
+
+
+@dataclass
+class ConfigNLPCausalLMLogging(DefaultConfig):
+ log_step_size: str = "absolute"
+ log_all_ranks: bool = False
+ logger: str = "None"
+ neptune_project: str = ""
+ wandb_project: str = ""
+ wandb_entity: str = ""
+ _neptune_debug: bool = False
+
+ plots_class: Any = text_causal_language_modeling_plots.Plots
+
+ # the actual logger, will be set dynamically at runtime
+ _logger: Any = None
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._possible_values["log_step_size"] = possible_values.String(
+ values=(
+ ("absolute", "Absolute"),
+ ("relative", "Relative"),
+ ),
+ allow_custom=False,
+ )
+ self._possible_values["logger"] = ExternalLoggers.names()
+
+ self._nesting.add(
+ ["neptune_project"],
+ [Dependency(key="logger", value="Neptune", is_set=True)],
+ )
+ self._nesting.add(
+ ["wandb_project", "wandb_entity"],
+ [Dependency(key="logger", value="W&B", is_set=True)],
+ )
+
+ self._visibility["plots_class"] = -1
+
+
+@dataclass
+class ConfigProblemBase(DefaultConfigProblemBase):
+ output_directory: str = f"output/{os.path.basename(__file__).split('.')[0]}"
+ experiment_name: str = field(default_factory=generate_experiment_name)
+ llm_backbone: str = (
+ "h2oai/h2o-danube3-500m-base"
+ if "h2oai/h2o-danube3-500m-base" in default_cfg.default_causal_language_models
+ else default_cfg.default_causal_language_models[0]
+ )
+
+ dataset: ConfigNLPCausalLMDataset = field(default_factory=ConfigNLPCausalLMDataset)
+ tokenizer: ConfigNLPCausalLMTokenizer = field(
+ default_factory=ConfigNLPCausalLMTokenizer
+ )
+ architecture: ConfigNLPCausalLMArchitecture = field(
+ default_factory=ConfigNLPCausalLMArchitecture
+ )
+ training: ConfigNLPCausalLMTraining = field(
+ default_factory=ConfigNLPCausalLMTraining
+ )
+ augmentation: ConfigNLPAugmentation = field(default_factory=ConfigNLPAugmentation)
+ prediction: ConfigNLPCausalLMPrediction = field(
+ default_factory=ConfigNLPCausalLMPrediction
+ )
+ environment: ConfigNLPCausalLMEnvironment = field(
+ default_factory=ConfigNLPCausalLMEnvironment
+ )
+ logging: ConfigNLPCausalLMLogging = field(default_factory=ConfigNLPCausalLMLogging)
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ self._visibility["output_directory"] = -1
+
+ self._possible_values["llm_backbone"] = possible_values.String(
+ values=default_cfg.default_causal_language_models,
+ allow_custom=True,
+ )
+
+ def check(self) -> Dict[str, List]:
+ # Define returned dictionary of errors/warnings
+ errors: Dict[str, List] = {"title": [], "message": [], "type": []}
+ logger.debug("Checking for common errors in the configuration.")
+ try:
+ sanity_check(self)
+ except AssertionError as exception:
+ logger.error(f"Experiment start. Sanity check failed: {exception}")
+ logger.error(f"Error while validating data: {exception}", exc_info=True)
+ # Remove end-of-line from exception
+ exception_str = str(exception).replace("\n", " ")
+ errors["title"] += ["Dataset Validation Error"]
+ errors["message"] += [exception_str]
+ errors["type"].append("error")
+
+ if self.prediction.temperature > 0 and not self.prediction.do_sample:
+ errors["title"] += ["Do sample needs to be enabled for temperature > 0"]
+ errors["message"] += [
+ "Please enable do sample if you want to use temperature > 0."
+ ]
+ errors["type"].append("warning")
+ if self.prediction.temperature == 0 and self.prediction.do_sample:
+ errors["title"] += ["Temperature needs to be > 0 for do sample"]
+ errors["message"] += [
+ "Please increase temperature if you want to use do sample."
+ ]
+ errors["type"].append("warning")
+ return errors
diff --git a/llm_studio/python_configs/text_causal_regression_modeling_config.py b/llm_studio/python_configs/text_causal_regression_modeling_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f6f448c4750c0694835a5f1cb3fe922ef79f014
--- /dev/null
+++ b/llm_studio/python_configs/text_causal_regression_modeling_config.py
@@ -0,0 +1,173 @@
+import os
+from dataclasses import dataclass, field
+from typing import Any, Dict, List, Tuple
+
+import llm_studio.src.datasets.text_causal_regression_ds
+import llm_studio.src.plots.text_causal_classification_modeling_plots
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.python_configs.base import DefaultConfig, DefaultConfigProblemBase
+from llm_studio.python_configs.text_causal_classification_modeling_config import (
+ ConfigNLPCausalClassificationAugmentation as ConfigNLPCausalRegressionAugmentation,
+)
+from llm_studio.python_configs.text_causal_classification_modeling_config import (
+ ConfigNLPCausalClassificationDataset,
+)
+from llm_studio.python_configs.text_causal_classification_modeling_config import (
+ ConfigNLPCausalClassificationLogging as ConfigNLPCausalRegressionLogging,
+)
+from llm_studio.python_configs.text_causal_classification_modeling_config import (
+ ConfigNLPCausalClassificationTokenizer as ConfigNLPCausalRegressionTokenizer,
+)
+from llm_studio.python_configs.text_causal_classification_modeling_config import (
+ ConfigNLPCausalClassificationTraining,
+)
+from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigNLPCausalLMArchitecture,
+ ConfigNLPCausalLMEnvironment,
+)
+from llm_studio.src import possible_values
+from llm_studio.src.losses import text_causal_regression_modeling_losses
+from llm_studio.src.metrics import text_causal_regression_modeling_metrics
+from llm_studio.src.models import text_causal_regression_modeling_model
+from llm_studio.src.utils.modeling_utils import generate_experiment_name
+
+
+@dataclass
+class ConfigNLPCausalRegressionDataset(ConfigNLPCausalClassificationDataset):
+ dataset_class: Any = llm_studio.src.datasets.text_causal_regression_ds.CustomDataset
+ num_classes: int = 1
+
+ def __post_init__(self):
+ self.prompt_column = (
+ tuple(
+ self.prompt_column,
+ )
+ if isinstance(self.prompt_column, str)
+ else tuple(self.prompt_column)
+ )
+ super().__post_init__()
+
+ self._visibility["num_classes"] = -1
+
+
+@dataclass
+class ConfigNLPCausalRegressionTraining(ConfigNLPCausalClassificationTraining):
+ loss_class: Any = text_causal_regression_modeling_losses.Losses
+ loss_function: str = "MSELoss"
+
+ learning_rate: float = 0.0001
+ differential_learning_rate_layers: Tuple[str, ...] = ("regression_head",)
+ differential_learning_rate: float = 0.00001
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._possible_values["loss_function"] = self.loss_class.names()
+
+ self._possible_values["differential_learning_rate_layers"] = (
+ possible_values.String(
+ values=("backbone", "embed", "regression_head"),
+ allow_custom=False,
+ placeholder="Select optional layers...",
+ )
+ )
+
+
+@dataclass
+class ConfigNLPCausalRegressionArchitecture(ConfigNLPCausalLMArchitecture):
+ model_class: Any = text_causal_regression_modeling_model.Model
+
+ def __post_init__(self):
+ super().__post_init__()
+
+
+@dataclass
+class ConfigNLPCausalRegressionPrediction(DefaultConfig):
+ metric_class: Any = text_causal_regression_modeling_metrics.Metrics
+ metric: str = "MSE"
+ batch_size_inference: int = 0
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ self._possible_values["metric"] = self.metric_class.names()
+ self._possible_values["batch_size_inference"] = (0, 512, 1)
+
+ self._visibility["metric_class"] = -1
+
+
+@dataclass
+class ConfigNLPCausalRegressionEnvironment(ConfigNLPCausalLMEnvironment):
+ _model_card_template: str = "text_causal_regression_model_card_template.md"
+ _summary_card_template: str = (
+ "text_causal_regression_experiment_summary_card_template.md"
+ )
+
+ def __post_init__(self):
+ super().__post_init__()
+
+
+@dataclass
+class ConfigProblemBase(DefaultConfigProblemBase):
+ output_directory: str = f"output/{os.path.basename(__file__).split('.')[0]}"
+ experiment_name: str = field(default_factory=generate_experiment_name)
+ llm_backbone: str = (
+ "h2oai/h2o-danube3-500m-chat"
+ if "h2oai/h2o-danube3-500m-chat" in default_cfg.default_causal_language_models
+ else default_cfg.default_causal_language_models[0]
+ )
+
+ dataset: ConfigNLPCausalRegressionDataset = field(
+ default_factory=ConfigNLPCausalRegressionDataset
+ )
+ tokenizer: ConfigNLPCausalRegressionTokenizer = field(
+ default_factory=ConfigNLPCausalRegressionTokenizer
+ )
+ architecture: ConfigNLPCausalRegressionArchitecture = field(
+ default_factory=ConfigNLPCausalRegressionArchitecture
+ )
+ training: ConfigNLPCausalRegressionTraining = field(
+ default_factory=ConfigNLPCausalRegressionTraining
+ )
+ augmentation: ConfigNLPCausalRegressionAugmentation = field(
+ default_factory=ConfigNLPCausalRegressionAugmentation
+ )
+ prediction: ConfigNLPCausalRegressionPrediction = field(
+ default_factory=ConfigNLPCausalRegressionPrediction
+ )
+ environment: ConfigNLPCausalRegressionEnvironment = field(
+ default_factory=ConfigNLPCausalRegressionEnvironment
+ )
+ logging: ConfigNLPCausalRegressionLogging = field(
+ default_factory=ConfigNLPCausalRegressionLogging
+ )
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ self._visibility["output_directory"] = -1
+
+ self._possible_values["llm_backbone"] = possible_values.String(
+ values=default_cfg.default_causal_language_models,
+ allow_custom=True,
+ )
+
+ def check(self) -> Dict[str, List]:
+ errors: Dict[str, List] = {"title": [], "message": [], "type": []}
+
+ if isinstance(self.dataset.answer_column, str):
+ errors["title"].append("Invalid answer_column type")
+ errors["message"].append(
+ "Providing the answer_column as a string is deprecated. "
+ "Please provide the answer_column as a list."
+ )
+ errors["type"].append("deprecated")
+ self.dataset.answer_column = [self.dataset.answer_column]
+
+ if self.dataset.parent_id_column not in ["None", None]:
+ errors["title"] += ["Parent ID column is not supported for regression"]
+ errors["message"] += [
+ "Parent ID column is not supported for regression datasets."
+ ]
+ errors["type"].append("error")
+
+ return errors
diff --git a/llm_studio/python_configs/text_dpo_modeling_config.py b/llm_studio/python_configs/text_dpo_modeling_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..06cc7c5d0cd40a9a0f302548981d7e6066bc3b0b
--- /dev/null
+++ b/llm_studio/python_configs/text_dpo_modeling_config.py
@@ -0,0 +1,139 @@
+import os
+from dataclasses import dataclass, field
+from typing import Any
+
+import llm_studio.src.datasets.text_dpo_modeling_ds
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigNLPAugmentation,
+ ConfigNLPCausalLMArchitecture,
+ ConfigNLPCausalLMDataset,
+ ConfigNLPCausalLMEnvironment,
+ ConfigNLPCausalLMLogging,
+ ConfigNLPCausalLMPrediction,
+ ConfigNLPCausalLMTokenizer,
+ ConfigNLPCausalLMTraining,
+)
+from llm_studio.src import possible_values
+from llm_studio.src.losses import text_dpo_modeling_losses
+from llm_studio.src.models import text_dpo_modeling_model
+from llm_studio.src.nesting import Dependency
+from llm_studio.src.plots import text_dpo_modeling_plots
+from llm_studio.src.utils.modeling_utils import generate_experiment_name
+
+
+@dataclass
+class ConfigDPODataset(ConfigNLPCausalLMDataset):
+ dataset_class: Any = llm_studio.src.datasets.text_dpo_modeling_ds.CustomDataset
+ # Always have full chat history.
+ # Chosen/Rejected prompt are only at the end of a conversation.
+ limit_chained_samples: bool = True
+
+ rejected_prompt_column: str = "None"
+ answer_column: str = "chosen_response"
+ rejected_answer_column: str = "rejected_response"
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._possible_values["rejected_prompt_column"] = possible_values.Columns(
+ prefer_with=lambda column: column
+ in (
+ "rejected_input",
+ "rejected_prompt",
+ "rejected_instruction",
+ "rejected_question",
+ ),
+ add_none=True,
+ )
+ self._possible_values["rejected_answer_column"] = possible_values.Columns(
+ prefer_with=lambda column: column
+ in (
+ "rejected_answer",
+ "rejected_response",
+ "rejected",
+ )
+ )
+
+ self._visibility["limit_chained_samples"] = -1
+ self._visibility["mask_prompt_labels"] = -1
+ self._order.insert("rejected_prompt_column", after="prompt_column")
+ self._order.insert("rejected_answer_column", after="answer_column")
+
+
+@dataclass
+class ConfigDPOTraining(ConfigNLPCausalLMTraining):
+ learning_rate: float = 1e-4 # relatively high as we use LORA
+ beta: float = 0.2
+ simpo_gamma: float = 1.0
+ gradient_clip: float = 10.0
+ loss_class: Any = text_dpo_modeling_losses.Losses
+ loss_function: str = "DPOLoss"
+ optimizer: str = "AdamW"
+ # Needs to be enabled as we need logits from original model, see forward pass
+ lora: bool = True
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._possible_values["beta"] = possible_values.Number(0.05, 1.0, 0.05)
+ self._possible_values["simpo_gamma"] = possible_values.Number(0.05, 2.0, 0.05)
+
+ self._grid_search_values["loss_function"] = None
+ self._grid_search_values["beta"] = (0.1, 0.15, 0.20, 0.25, 0.4, 0.5)
+ self._grid_search_values["simpo_gamma"] = (0.5, 0.75, 1, 1.25, 1.5, 1.75, 2)
+
+ self._grid_search_iscustom["beta"] = True
+ self._grid_search_iscustom["simpo_gamma"] = True
+
+ self._nesting.add(
+ ["simpo_gamma"],
+ [Dependency(key="loss_function", value="SimPOLoss", is_set=True)],
+ )
+
+ self._order.insert("beta", after="learning_rate")
+ self._order.insert("simpo_gamma", after="beta")
+
+
+@dataclass
+class ConfigDPOArchitecture(ConfigNLPCausalLMArchitecture):
+ model_class: Any = text_dpo_modeling_model.Model
+
+
+@dataclass
+class ConfigDPOPLogging(ConfigNLPCausalLMLogging):
+ plots_class: Any = text_dpo_modeling_plots.Plots
+
+
+@dataclass
+class ConfigProblemBase(DefaultConfigProblemBase):
+ output_directory: str = f"output/{os.path.basename(__file__).split('.')[0]}"
+ experiment_name: str = field(default_factory=generate_experiment_name)
+
+ llm_backbone: str = (
+ "h2oai/h2o-danube3-500m-chat"
+ if "h2oai/h2o-danube3-500m-chat" in default_cfg.default_causal_language_models
+ else default_cfg.default_causal_language_models[0]
+ )
+
+ dataset: ConfigDPODataset = field(default_factory=ConfigDPODataset)
+ tokenizer: ConfigNLPCausalLMTokenizer = field(
+ default_factory=ConfigNLPCausalLMTokenizer
+ )
+ architecture: ConfigDPOArchitecture = field(default_factory=ConfigDPOArchitecture)
+ training: ConfigDPOTraining = field(default_factory=ConfigDPOTraining)
+ augmentation: ConfigNLPAugmentation = field(default_factory=ConfigNLPAugmentation)
+ prediction: ConfigNLPCausalLMPrediction = field(
+ default_factory=ConfigNLPCausalLMPrediction
+ )
+ environment: ConfigNLPCausalLMEnvironment = field(
+ default_factory=ConfigNLPCausalLMEnvironment
+ )
+ logging: ConfigDPOPLogging = field(default_factory=ConfigDPOPLogging)
+
+ def __post_init__(self):
+ super().__post_init__()
+ self._visibility["output_directory"] = -1
+ self._possible_values["llm_backbone"] = possible_values.String(
+ values=default_cfg.default_causal_language_models,
+ allow_custom=True,
+ )
diff --git a/llm_studio/python_configs/text_sequence_to_sequence_modeling_config.py b/llm_studio/python_configs/text_sequence_to_sequence_modeling_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc7e18723f4f06e18aab49d98f99fc7aaec3ac77
--- /dev/null
+++ b/llm_studio/python_configs/text_sequence_to_sequence_modeling_config.py
@@ -0,0 +1,119 @@
+import os
+from dataclasses import dataclass, field
+from typing import Any, Dict, List
+
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigNLPAugmentation,
+ ConfigNLPCausalLMArchitecture,
+ ConfigNLPCausalLMDataset,
+ ConfigNLPCausalLMEnvironment,
+ ConfigNLPCausalLMLogging,
+ ConfigNLPCausalLMPrediction,
+ ConfigNLPCausalLMTokenizer,
+ ConfigNLPCausalLMTraining,
+)
+from llm_studio.src import possible_values
+from llm_studio.src.models import text_sequence_to_sequence_modeling_model
+from llm_studio.src.utils.modeling_utils import generate_experiment_name
+
+
+@dataclass
+class ConfigNLPSeq2SeqDataset(ConfigNLPCausalLMDataset):
+ text_system_start: str = ""
+ text_prompt_start: str = ""
+ text_answer_separator: str = ""
+
+ limit_chained_samples: bool = False
+ add_eos_token_to_system: bool = True
+ add_eos_token_to_prompt: bool = True
+ add_eos_token_to_answer: bool = True
+ mask_prompt_labels: bool = True
+
+ def __post_init__(self):
+ self.prompt_column = (
+ tuple(
+ self.prompt_column,
+ )
+ if isinstance(self.prompt_column, str)
+ else tuple(self.prompt_column)
+ )
+ super().__post_init__()
+
+ self._visibility["limit_chained_samples"] = -1
+ self._visibility["mask_prompt_labels"] = -1
+
+
+@dataclass
+class ConfigNLPSeq2SeqArchitecture(ConfigNLPCausalLMArchitecture):
+ model_class: Any = text_sequence_to_sequence_modeling_model.Model
+ backbone_dtype: str = "bfloat16"
+
+ def __post_init__(self):
+ super().__post_init__()
+
+
+@dataclass
+class ConfigNLPSeq2SeqEnvironment(ConfigNLPCausalLMEnvironment):
+ mixed_precision: bool = False
+
+ _model_card_template: str = (
+ "text_sequence_to_sequence_modeling_model_card_template.md"
+ )
+ _summary_card_template: str = (
+ "text_sequence_to_sequence_modeling_experiment_summary_card_template.md"
+ )
+
+ def __post_init__(self):
+ super().__post_init__()
+
+
+@dataclass
+class ConfigProblemBase(DefaultConfigProblemBase):
+ output_directory: str = f"output/{os.path.basename(__file__).split('.')[0]}"
+ experiment_name: str = field(default_factory=generate_experiment_name)
+ llm_backbone: str = (
+ "t5-small"
+ if "t5-small" in default_cfg.default_sequence_to_sequence_models
+ else default_cfg.default_sequence_to_sequence_models[0]
+ )
+
+ dataset: ConfigNLPSeq2SeqDataset = field(default_factory=ConfigNLPSeq2SeqDataset)
+ tokenizer: ConfigNLPCausalLMTokenizer = field(
+ default_factory=ConfigNLPCausalLMTokenizer
+ )
+ architecture: ConfigNLPSeq2SeqArchitecture = field(
+ default_factory=ConfigNLPSeq2SeqArchitecture
+ )
+ training: ConfigNLPCausalLMTraining = field(
+ default_factory=ConfigNLPCausalLMTraining
+ )
+ augmentation: ConfigNLPAugmentation = field(default_factory=ConfigNLPAugmentation)
+ prediction: ConfigNLPCausalLMPrediction = field(
+ default_factory=ConfigNLPCausalLMPrediction
+ )
+ environment: ConfigNLPSeq2SeqEnvironment = field(
+ default_factory=ConfigNLPSeq2SeqEnvironment
+ )
+ logging: ConfigNLPCausalLMLogging = field(default_factory=ConfigNLPCausalLMLogging)
+
+ def __post_init__(self):
+ super().__post_init__()
+
+ self._visibility["output_directory"] = -1
+
+ self._possible_values["llm_backbone"] = possible_values.String(
+ values=default_cfg.default_sequence_to_sequence_models,
+ allow_custom=True,
+ )
+
+ def check(self) -> Dict[str, List]:
+ errors: Dict[str, List] = {"title": [], "message": [], "type": []}
+ if self.prediction.temperature > 0 and not self.prediction.do_sample:
+ errors["title"] += ["Do sample needs to be enabled for temperature > 0"]
+ errors["message"] += [
+ "Please enable do sample if you want to use temperature > 0."
+ ]
+ errors["type"].append("warning")
+ return errors
diff --git a/llm_studio/src/__init__.py b/llm_studio/src/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/src/augmentations/__init__.py b/llm_studio/src/augmentations/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/src/augmentations/nlp_aug.py b/llm_studio/src/augmentations/nlp_aug.py
new file mode 100644
index 0000000000000000000000000000000000000000..f7fd2ed69f5765a6c9f947b03eda135698427f33
--- /dev/null
+++ b/llm_studio/src/augmentations/nlp_aug.py
@@ -0,0 +1,55 @@
+import logging
+from abc import abstractmethod
+from typing import Any, Dict
+
+import torch
+from torch import nn
+
+logger = logging.getLogger(__name__)
+
+
+class BaseNLPAug(nn.Module):
+ """Base class for NLP augmentation"""
+
+ def __init__(self, cfg: Any):
+ """
+ Args:
+ cfg: config with all the hyperparameters
+ """
+
+ super().__init__()
+ self.cfg = cfg
+
+ @abstractmethod
+ def forward(self, batch: Dict) -> Dict:
+ """Augmenting the batch.
+ This is an inplace operation.
+
+ Args:
+ batch: current batch
+
+ Returns:
+ augmented batch
+ """
+
+ if self.cfg.augmentation.token_mask_probability > 0:
+ input_ids = batch["input_ids"].clone()
+ # special_mask = ~batch["special_tokens_mask"].clone().bool()
+ mask = (
+ torch.bernoulli(
+ torch.full(
+ input_ids.shape,
+ float(self.cfg.augmentation.token_mask_probability),
+ )
+ )
+ .to(input_ids.device)
+ .bool()
+ # & special_mask
+ ).bool()
+ input_ids[mask] = self.cfg.tokenizer._tokenizer_mask_token_id
+ batch["input_ids"] = input_ids.clone()
+ batch["attention_mask"][mask] = 0
+ if batch["labels"].shape[1] == batch["input_ids"].shape[1]:
+ batch["labels"][mask] = -100
+
+ return batch
diff --git a/llm_studio/src/datasets/__init__.py b/llm_studio/src/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/src/datasets/conversation_chain_handler.py b/llm_studio/src/datasets/conversation_chain_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..9aa55b8f0ce70fac04612f404df41c1d00da257d
--- /dev/null
+++ b/llm_studio/src/datasets/conversation_chain_handler.py
@@ -0,0 +1,241 @@
+import logging
+from typing import Dict, List
+
+import numpy as np
+
+from llm_studio.src.datasets.text_utils import get_texts
+from llm_studio.src.utils.utils import PatchedAttribute
+
+logger = logging.getLogger(__name__)
+
+
+class ConversationChainHandler:
+ """
+ This class partitions the dataset into chains of conversations.
+ Each chain is comprised of a list of conversation rounds.
+ Each round within a conversation is represented as a triplet:
+ (system, prompt, answer).
+
+ The resulting structure of the chains is conditional on
+ the DataFrame's structure and configuration:
+
+ - Without a 'parent_id' in the DataFrame, each conversation chain is a single round.
+ So, for every `i`-th row in the DataFrame, 0 <= `i` < len(df),
+ the chain would look like: [(system_i, prompt_i, answer_i)]
+
+ - With a 'parent_id' in the DataFrame and
+ if `cfg.dataset.limit_chained_samples` is set to False,
+ each chain encapsulates all preceding conversations
+ for every `i`-th row in the DataFrame,
+ 0 <= `i` < len(df).
+ The resultant chain would take shape:
+ [(system_start_conversation_i,
+ prompt_start_conversation_i,
+ answer_start_conversation_i),
+ ...,
+ (system_i, prompt_i, answer_i)]
+
+ - With a 'parent_id' in the DataFrame and
+ if `cfg.dataset.limit_chained_samples` is set to True,
+ each conversation chain incorporates only full conversations.
+ The chain hence condenses into:
+ [(system_start_conversation_i,
+ prompt_start_conversation_i,
+ answer_start_conversation_i),
+ ...,
+ (system_end_conversation_i,
+ prompt_end_conversation_i,
+ answer_end_conversation_i)]
+ where `i` represents complete conversations only.
+ """
+
+ def __init__(
+ self,
+ df,
+ cfg,
+ ):
+ # Do not set self.cfg = cfg, as ConversationChainHandler
+ # will be used with PatchedAttribute context manager.
+ self.conversation_chain_ids = self.get_conversation_chain_ids(cfg, df)
+ self.prompts = get_texts(df, cfg)
+ self.answers = self.get_answers(df, cfg)
+ self.systems = self.get_systems(cfg, df)
+
+ def get_conversation_chain_ids(self, cfg, df):
+ """
+ Gets the conversation chain IDs for the given DataFrame.
+ E.g. if conversation_chain_ids = [[13, 44, 8], ...],
+ then the first conversation chain consists of
+ [df.iloc[13], df.iloc[44], df.iloc[8]]
+ with
+ - df.iloc[13] denotes the first round of the conversation
+ - df.iloc[44] denotes the second round of the conversation
+ - df.iloc[8] denotes the end of the conversation
+ if limit_chained_samples is True, df.iloc[13] will have no parent_id,
+ i.e. it is the start of the conversation.
+ """
+ if (
+ cfg.dataset.parent_id_column in ["None", None]
+ # Handle case where train Dataframe has conversation chains,
+ # but val Dataframe does not
+ or cfg.dataset.parent_id_column not in df.columns
+ ):
+ # no parent id column, so each triplet (system_i, prompt_i, answer_i)
+ # is a conversation chain
+ return [[idx] for idx in range(len(df))]
+
+ logging.info(f"Dataset columns:{df.columns}")
+ assert cfg.dataset.id_column in df.columns, (
+ "ID column is required for conversation chaining, \n"
+ "Set Id Column in the dataset configuration screen. \n"
+ f"Dataset has the following columns: {df.columns.to_list()}."
+ )
+ # sample and parent ids can have any dtype, such as str, int, float, etc.
+ # id column can be int, while parent_id column can be float
+ # (as some values are NaN) so we cast id to the same dtype
+ sample_ids = (
+ df[cfg.dataset.id_column]
+ .astype(df[cfg.dataset.parent_id_column].dtype)
+ .tolist()
+ )
+ parent_ids = df[cfg.dataset.parent_id_column].tolist()
+ # Some datasets may include parent ids that are not in the dataset.
+ sample_ids_set = set(sample_ids)
+ parent_ids = [idx if idx in sample_ids_set else "None" for idx in parent_ids]
+
+ id2parent_id = {
+ idx: parent_id
+ for idx, parent_id in zip(sample_ids, parent_ids)
+ if parent_id not in [None, "None"]
+ and (
+ not isinstance(parent_id, float)
+ or (not np.isnan(parent_id) and not np.isinf(parent_id))
+ )
+ }
+ if cfg.dataset.limit_chained_samples:
+ # end id == id is not a parent id of another conversation id
+ valid_parent_ids = set(id2parent_id.values())
+ conversation_end_ids = [
+ idx for idx in sample_ids if idx not in valid_parent_ids
+ ]
+ else:
+ conversation_end_ids = sample_ids
+ conversation_chain_ids = [
+ self.get_conversation_ids(id2parent_id, conversation_end_id)
+ for conversation_end_id in conversation_end_ids
+ ]
+ # map from df["id"] to enumeration index
+ dataframeid2idx = {id: idx for idx, id in enumerate(sample_ids)}
+ conversation_chain_ids = [
+ [dataframeid2idx[conversation_id] for conversation_id in conversation_ids]
+ for conversation_ids in conversation_chain_ids
+ ]
+ return conversation_chain_ids
+
+ def get_answers(self, df, cfg):
+ answer_column = cfg.dataset.answer_column
+ if isinstance(answer_column, (list, tuple)):
+ answers = []
+ for col in answer_column:
+ if col in df.columns:
+ answers.append(df[col].astype(str).tolist())
+ else:
+ answers.append(["" for _ in range(len(self.prompts))])
+ answers = [",".join(ans) for ans in zip(*answers)]
+ else:
+ if answer_column in df.columns:
+ answers = df[answer_column].astype(str).tolist()
+ else:
+ answers = ["" for _ in range(len(self.prompts))]
+ return answers
+
+ def get_systems(self, cfg, df):
+ if cfg.dataset.system_column != "None":
+ if cfg.dataset.system_column not in df.columns:
+ logger.warning(
+ f"System column '{cfg.dataset.system_column}' not found."
+ " Disabling functionality."
+ )
+ systems = ["" for _ in range(len(self.prompts))]
+ else:
+ systems = df[cfg.dataset.system_column].astype(str).tolist()
+ else:
+ systems = ["" for _ in range(len(self.prompts))]
+ return systems
+
+ @staticmethod
+ def get_conversation_ids(id2parent_id, end_id):
+ """
+ Gets the conversation chain for a given starting conversation ID.
+ Args:
+ id2parent_id: A dictionary containing the mapping of IDs
+ to its previous parent ID.
+ end_id: The ID of the end of the conversation in the chain.
+ Returns:
+ A list of conversation IDs representing the conversation chain.
+ The chain is ordered from the first conversation id to end_id in the chain.
+ """
+ # prevent infinite loops in case
+ # of circular parent chains (dataframe issue)
+ loop_counter = 0
+
+ conversation_chain_ids = [end_id]
+ parent_id = end_id
+ while parent_id in id2parent_id:
+ loop_counter += 1
+
+ parent_id = id2parent_id[parent_id]
+ conversation_chain_ids = [parent_id] + conversation_chain_ids
+ if loop_counter > 1000:
+ raise ValueError(
+ f"Parent chain of sample with idx {end_id} "
+ f"exceeds max loop count of 1000. "
+ f"Please ensure that parent chain is not circular."
+ )
+ return conversation_chain_ids
+
+ def __len__(self):
+ return len(self.conversation_chain_ids)
+
+ def __getitem__(self, idx):
+ """
+ Gets a single conversation chain.
+ The conversation may be:
+ - a single (system, prompt, answer) round,
+ if cfg.dataset.parent_id_column == "None" or
+ there is no parent_id for the conversation
+ - a conversation potentially starting somewhere in
+ the middle of the conversation, if the conversation
+ is chained and limit_chained_samples is set to False
+ - always a complete conversation, if the conversation is chained
+ and limit_chained_samples is True
+
+ """
+ prompts = [self.prompts[i] for i in self.conversation_chain_ids[idx]]
+ answers = [self.answers[i] for i in self.conversation_chain_ids[idx]]
+ systems = [self.systems[i] for i in self.conversation_chain_ids[idx]]
+ return {
+ "prompts": prompts,
+ "answers": answers,
+ "systems": systems,
+ }
+
+ def get_conversation_end_ids(self):
+ """
+ Gets the end conversation IDs for each conversation chain.
+ """
+ return [
+ conversation_chain[-1] for conversation_chain in self.conversation_chain_ids
+ ]
+
+
+def get_conversation_chains(
+ df, cfg, limit_chained_samples=True
+) -> List[Dict[str, List[str]]]:
+ with PatchedAttribute(cfg.dataset, "limit_chained_samples", limit_chained_samples):
+ conversation_chain_handler = ConversationChainHandler(df, cfg)
+ conversations = [
+ conversation
+ for conversation in conversation_chain_handler # type: ignore[attr-defined]
+ ]
+ return conversations
diff --git a/llm_studio/src/datasets/text_causal_classification_ds.py b/llm_studio/src/datasets/text_causal_classification_ds.py
new file mode 100644
index 0000000000000000000000000000000000000000..77b75de6a5851bac8062a3d12514961792806815
--- /dev/null
+++ b/llm_studio/src/datasets/text_causal_classification_ds.py
@@ -0,0 +1,118 @@
+import logging
+from typing import Any, Dict
+
+import numpy as np
+import pandas as pd
+import torch
+
+from llm_studio.src.datasets.text_causal_language_modeling_ds import (
+ CustomDataset as TextCausalLanguageModelingCustomDataset,
+)
+from llm_studio.src.utils.exceptions import LLMDataException
+
+logger = logging.getLogger(__name__)
+
+
+class CustomDataset(TextCausalLanguageModelingCustomDataset):
+ def __init__(self, df: pd.DataFrame, cfg: Any, mode: str = "train"):
+ super().__init__(df=df, cfg=cfg, mode=mode)
+ check_for_non_int_answers(cfg, df)
+ self.answers_int = df[cfg.dataset.answer_column].astype(int).values
+ max_value = np.max(self.answers_int)
+ min_value = np.min(self.answers_int)
+
+ if 1 < cfg.dataset.num_classes <= max_value:
+ raise LLMDataException(
+ "Number of classes is smaller than max label "
+ f"{max_value}. Please increase the setting accordingly."
+ )
+ elif cfg.dataset.num_classes == 1 and max_value > 1:
+ raise LLMDataException(
+ "For binary classification, max label should be 1 but is "
+ f"{max_value}."
+ )
+ if min_value < 0:
+ raise LLMDataException(
+ "Labels should be non-negative but min label is " f"{min_value}."
+ )
+ if min_value != 0 or max_value != np.unique(self.answers_int).size - 1:
+ logger.warning(
+ "Labels should start at 0 and be continuous but are "
+ f"{sorted(np.unique(self.answers_int))}."
+ )
+
+ if cfg.dataset.parent_id_column != "None":
+ raise LLMDataException(
+ "Parent ID column is not supported for classification datasets."
+ )
+
+ def __getitem__(self, idx: int) -> Dict:
+ sample = super().__getitem__(idx)
+ sample["class_label"] = self.answers_int[idx]
+ return sample
+
+ def postprocess_output(self, cfg, df: pd.DataFrame, output: Dict) -> Dict:
+ output["logits"] = output["logits"].float()
+
+ if cfg.training.loss_function == "CrossEntropyLoss":
+ output["probabilities"] = torch.softmax(output["logits"], dim=-1)
+ else:
+ output["probabilities"] = torch.sigmoid(output["logits"])
+
+ if len(cfg.dataset.answer_column) == 1:
+ if cfg.dataset.num_classes == 1:
+ output["predictions"] = (output["probabilities"] > 0.5).long()
+ else:
+ output["predictions"] = output["probabilities"].argmax(
+ dim=-1, keepdim=True
+ )
+ else:
+ output["predictions"] = (output["probabilities"] > 0.5).long()
+
+ preds = []
+ for col in np.arange(output["probabilities"].shape[1]):
+ preds.append(
+ np.round(output["probabilities"][:, col].cpu().numpy(), 3).astype(str)
+ )
+ preds = [",".join(pred) for pred in zip(*preds)]
+ output["predicted_text"] = preds
+ return super().postprocess_output(cfg, df, output)
+
+ def clean_output(self, output, cfg):
+ return output
+
+ @classmethod
+ def sanity_check(cls, df: pd.DataFrame, cfg: Any, mode: str = "train"):
+
+ for answer_col in cfg.dataset.answer_column:
+ assert answer_col in df.columns, (
+ f"Answer column {answer_col} not found in the " f"{mode} DataFrame."
+ )
+ assert df.shape[0] == df[answer_col].dropna().shape[0], (
+ f"The {mode} DataFrame"
+ f" column {answer_col}"
+ " contains missing values."
+ )
+
+ check_for_non_int_answers(cfg, df)
+
+
+def check_for_non_int_answers(cfg, df):
+ answers_non_int: list = []
+ for column in cfg.dataset.answer_column:
+ answers_non_int.extend(
+ x for x in df[column].values if not is_castable_to_int(x)
+ )
+ if len(answers_non_int) > 0:
+ raise LLMDataException(
+ f"Column {cfg.dataset.answer_column} contains non int items. "
+ f"Sample values: {answers_non_int[:5]}."
+ )
+
+
+def is_castable_to_int(s):
+ try:
+ int(s)
+ return True
+ except ValueError:
+ return False
diff --git a/llm_studio/src/datasets/text_causal_language_modeling_ds.py b/llm_studio/src/datasets/text_causal_language_modeling_ds.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a094ea96ac493ee5bafea1a812e3624cf34fe8e
--- /dev/null
+++ b/llm_studio/src/datasets/text_causal_language_modeling_ds.py
@@ -0,0 +1,592 @@
+import codecs
+import collections.abc
+import logging
+from typing import Any, Dict, List, Tuple, Union
+
+import numpy as np
+import pandas as pd
+import torch
+from torch.utils.data import Dataset
+
+from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler
+from llm_studio.src.datasets.text_utils import get_tokenizer
+
+logger = logging.getLogger(__name__)
+
+
+class CustomDataset(Dataset):
+ """Dataset for Causal Language modeling."""
+
+ def __init__(self, df: pd.DataFrame, cfg: Any, mode: str = "train"):
+ """
+ Args:
+ df: input DataFrame
+ cfg: config with all the hyperparameters
+ mode: dataset mode. One of {"train", "validation"}
+ """
+ self.cfg = cfg
+ self.mode = mode
+ self.df = df.copy()
+ self.tokenizer = get_tokenizer(self.cfg)
+ self.conversation_chain_handler = ConversationChainHandler(self.df, cfg)
+
+ def __len__(self) -> int:
+ return len(self.conversation_chain_handler)
+
+ def __getitem__(self, idx: int) -> Dict:
+ """Reads a single text observation."""
+ input_text_dict = self.conversation_chain_handler[idx]
+ input_text_dict["systems"] = [
+ self.parse_system(self.cfg, system) for system in input_text_dict["systems"]
+ ]
+ input_text_dict["prompts"] = [
+ self.parse_prompt(self.cfg, prompt) for prompt in input_text_dict["prompts"]
+ ]
+ input_text_dict["answers"] = [
+ self.parse_answer(self.cfg, answer) for answer in input_text_dict["answers"]
+ ]
+
+ sample = dict()
+ system_encoding, prompt_encodings, answer_encodings = self.get_encodings(
+ input_text_dict=input_text_dict
+ )
+
+ input_ids = torch.cat(
+ [
+ torch.cat([prompt_encoding, answer_encoding])
+ for prompt_encoding, answer_encoding in zip(
+ prompt_encodings, answer_encodings
+ )
+ ]
+ )
+
+ sample.update(self.get_labels(prompt_encodings, answer_encodings))
+ sample.update(
+ self.pad_tokens(
+ input_ids,
+ attention_mask=torch.ones_like(input_ids),
+ max_length=self.cfg.tokenizer.max_length,
+ pad_token_id=self.tokenizer.pad_token_id,
+ )
+ )
+
+ # get answer encodings
+ sample.update(
+ self.pad_tokens(
+ answer_encodings[-1],
+ attention_mask=torch.ones_like(answer_encodings[-1]),
+ max_length=self.cfg.tokenizer.max_length,
+ pad_token_id=self.tokenizer.pad_token_id,
+ direction="right",
+ prefix="answer_",
+ )
+ )
+
+ # Remove last answer from encoding to create the prompt for inference
+ answer_encodings[-1] = torch.empty(0)
+ prompt_input_ids = torch.cat(
+ [
+ torch.cat([prompt_encoding, answer_encoding])
+ for prompt_encoding, answer_encoding in zip(
+ prompt_encodings, answer_encodings
+ )
+ ]
+ )
+ sample.update(
+ self.pad_tokens(
+ prompt_input_ids,
+ attention_mask=torch.ones_like(prompt_input_ids),
+ max_length=self.cfg.tokenizer.max_length,
+ pad_token_id=self.tokenizer.pad_token_id,
+ prefix="prompt_",
+ )
+ )
+
+ return sample
+
+ @staticmethod
+ def parse_prompt(cfg: Any, prompt: str):
+ prompt = (
+ f"{codecs.decode(cfg.dataset.text_prompt_start, 'unicode_escape')}{prompt}"
+ )
+ if cfg.dataset.add_eos_token_to_prompt:
+ prompt += cfg.tokenizer._tokenizer_eos_token
+ prompt = (
+ f"{prompt}"
+ f"{codecs.decode(cfg.dataset.text_answer_separator, 'unicode_escape')}"
+ )
+ return prompt
+
+ @staticmethod
+ def parse_answer(cfg: Any, answer: str):
+ if cfg.dataset.add_eos_token_to_answer:
+ answer += cfg.tokenizer._tokenizer_eos_token
+ return answer
+
+ @staticmethod
+ def parse_system(cfg: Any, system: str):
+ # no system tokens if empty
+ if system == "":
+ return system
+ system = (
+ f"{codecs.decode(cfg.dataset.text_system_start, 'unicode_escape')}{system}"
+ )
+ if cfg.dataset.add_eos_token_to_system:
+ system += cfg.tokenizer._tokenizer_eos_token
+ return system
+
+ @staticmethod
+ def batch_to_device(
+ batch: Union[Dict, List, torch.Tensor], device: str
+ ) -> Union[Dict, List, torch.Tensor, str]:
+ """Function to send the batch to the device specified
+
+ Args:
+ batch: input batch
+ device: device to send the data to
+ Returns:
+ batch with the elements on the device specified
+ """
+ if isinstance(batch, torch.Tensor):
+ return batch.to(device)
+ elif isinstance(batch, (list, tuple)) and all(
+ isinstance(item, str) for item in batch
+ ):
+ # Do not move list of strings to device
+ return batch
+ elif isinstance(batch, collections.abc.Mapping):
+ return {
+ key: CustomDataset.batch_to_device(value, device)
+ for key, value in batch.items()
+ }
+ elif isinstance(batch, collections.abc.Sequence):
+ return [CustomDataset.batch_to_device(value, device) for value in batch]
+ else:
+ raise ValueError(f"Can not move {type(batch)} to device.")
+
+ @staticmethod
+ def preprocess_dataframe(df: pd.DataFrame, cfg: Any) -> pd.DataFrame:
+ """
+ Preprocesses the input dataframe
+
+ Args:
+ df: the full training dataframe
+ cfg: config
+ Returns:
+ the processed dataframe
+ """
+
+ def personalize(text):
+ text = text.replace("Open Assistant", cfg.dataset.chatbot_name)
+ text = text.replace("Open-Assistant", cfg.dataset.chatbot_name)
+ text = text.replace("open-assistant", cfg.dataset.chatbot_name)
+ text = text.replace("OpenAssistant", cfg.dataset.chatbot_name)
+ text = text.replace("open assistant", cfg.dataset.chatbot_name)
+ text = text.replace("Open Assistand", cfg.dataset.chatbot_name)
+ text = text.replace("Open Assitant", cfg.dataset.chatbot_name)
+ text = text.replace("Open Assistent", cfg.dataset.chatbot_name)
+ text = text.replace("Open Assisstant", cfg.dataset.chatbot_name)
+ text = text.replace("Open Assitent", cfg.dataset.chatbot_name)
+ text = text.replace("Open Assitiant", cfg.dataset.chatbot_name)
+ text = text.replace("Open Assistiant", cfg.dataset.chatbot_name)
+ text = text.replace("Open Assitan ", cfg.dataset.chatbot_name + " ")
+ text = text.replace("Open Assistan ", cfg.dataset.chatbot_name + " ")
+ text = text.replace("Open Asistant", cfg.dataset.chatbot_name)
+ text = text.replace("Open Assiant", cfg.dataset.chatbot_name)
+ text = text.replace("Assistant", cfg.dataset.chatbot_name)
+ text = text.replace("ChatGPT", cfg.dataset.chatbot_name)
+ text = text.replace("LAION AI", cfg.dataset.chatbot_author)
+ text = text.replace("LAION-AI", cfg.dataset.chatbot_author)
+ text = text.replace("LAION,", cfg.dataset.chatbot_author + ",")
+ text = text.replace("LAION.ai", cfg.dataset.chatbot_author)
+ text = text.replace("LAION.", cfg.dataset.chatbot_author + ".")
+ text = text.replace("LAION", cfg.dataset.chatbot_author)
+ text = text.replace("Laion AI", cfg.dataset.chatbot_author)
+ text = text.replace("OpenAI", cfg.dataset.chatbot_author)
+ text = text.replace("Open AI", cfg.dataset.chatbot_author)
+ text = text.replace("openai", cfg.dataset.chatbot_author)
+ text = text.replace("open ai", cfg.dataset.chatbot_author)
+
+ return text
+
+ if cfg.dataset.personalize:
+ for prompt_col in cfg.dataset.prompt_column:
+ df[prompt_col] = df[prompt_col].apply(personalize)
+ df[cfg.dataset.answer_column] = df[cfg.dataset.answer_column].apply(
+ personalize
+ )
+
+ return df
+
+ def get_train_collate_fn(self):
+ """
+ Returns train batch collate function for the PyTorch Dataloader.
+ By default returns None that uses the default PyTorch collate
+ """
+
+ return None
+
+ def get_validation_collate_fn(self):
+ """
+ Return validation batch collate function for the PyTorch Dataloader.
+ By default returns None that uses the default PyTorch collate
+ """
+
+ return None
+
+ def postprocess_batch_predictions(self, output: Dict) -> Dict:
+ if "predicted_answer_ids" in output.keys():
+ predicted_text = [
+ self.tokenizer.decode(ids, skip_special_tokens=True).strip()
+ for ids in output["predicted_answer_ids"]
+ ]
+
+ output["predicted_text"] = np.array(predicted_text)
+ del output["predicted_answer_ids"]
+ return output
+
+ @staticmethod
+ def clean_output(
+ output: Dict,
+ cfg: Any,
+ ):
+ output["predicted_text"] = output["predicted_text"].tolist()
+ for j in range(len(output["predicted_text"])):
+ curr_text = output["predicted_text"][j].strip()
+ for stop_token in cfg.tokenizer._stop_words:
+ if curr_text.find(stop_token) != -1:
+ curr_text = curr_text[: curr_text.find(stop_token)]
+ output["predicted_text"][j] = curr_text.strip()
+
+ return output
+
+ def postprocess_output(self, cfg, df: pd.DataFrame, output: Dict) -> Dict:
+ if not cfg.prediction.metric == "Perplexity":
+ output = self.clean_output(output, cfg)
+
+ output["target_text"] = self.conversation_chain_handler.answers
+
+ metric_func, _, _ = cfg.prediction.metric_class.get(cfg.prediction.metric)
+
+ if "GPT" in cfg.prediction.metric:
+ metrics, explanations = metric_func(
+ cfg,
+ output,
+ df,
+ raw_results=True,
+ )
+ output["explanations"] = explanations
+ else:
+ metrics = metric_func(
+ cfg,
+ output,
+ df,
+ )
+ output["metrics"] = metrics
+
+ return output
+
+ def format_output(
+ self, cfg, df: pd.DataFrame, output: Dict
+ ) -> Tuple[Dict, pd.DataFrame]:
+ output = {
+ key: value
+ for key, value in output.items()
+ if key not in ["loss", "target", "losses"]
+ }
+ output.pop("target_text", None)
+
+ # in case limit_chained_samples is True, only last answer is predicted
+ end_conversation_ids = (
+ self.conversation_chain_handler.get_conversation_end_ids()
+ )
+
+ if "predicted_text" in output.keys():
+ output["predicted_text"] = np.array(output["predicted_text"])
+
+ if "logits" in output.keys():
+ output["logits"] = np.array(output["logits"].float())
+
+ if isinstance(cfg.dataset.prompt_column, tuple):
+ for col in cfg.dataset.prompt_column:
+ output[col] = df.loc[end_conversation_ids, col].values
+ else:
+ output[cfg.dataset.prompt_column] = df.loc[
+ end_conversation_ids, cfg.dataset.prompt_column
+ ].values
+
+ if "predicted_text" in output.keys():
+ col_name = cfg.dataset.answer_column
+ if isinstance(col_name, list):
+ col_name = ", ".join(col_name)
+ df[f"pred_{col_name}"] = (
+ "NO ANSWER GENERATED. "
+ "ONLY LAST ANSWER OF A CONVERSATION IS PREDICTED."
+ )
+ df.loc[end_conversation_ids, f"pred_{col_name}"] = output["predicted_text"]
+ return output, df
+
+ @classmethod
+ def sanity_check(cls, df: pd.DataFrame, cfg: Any, mode: str = "train"):
+ """
+ Quick check whether Dataframe and configurations are correctly set.
+ """
+ if cfg.dataset.parent_id_column != "None":
+ assert (
+ cfg.dataset.id_column != cfg.dataset.parent_id_column
+ ), "'Id Column' should be different from 'Parent column'"
+
+ if (
+ cfg.dataset.parent_id_column is not None
+ and cfg.dataset.parent_id_column in df.columns
+ and cfg.dataset.id_column in df.columns
+ ):
+ assert (
+ df[cfg.dataset.parent_id_column] != df[cfg.dataset.id_column]
+ ).all(), (
+ f"Parent id column:{cfg.dataset.parent_id_column}"
+ " is the same as id column for some rows"
+ )
+ assert (df[cfg.dataset.parent_id_column].fillna("") == "").sum() > 0, (
+ "Did not find any conversation chain. "
+ "Please ensure that some parent ids are empty."
+ "\n"
+ "Conversations are chained using parent id, "
+ "start conversation record should have empty parent id."
+ "\n"
+ f"Parent id column checked:{cfg.dataset.parent_id_column}"
+ )
+
+ assert cfg.dataset.answer_column in df.columns, (
+ f"Answer column {cfg.dataset.answer_column} not found in the "
+ f"{mode} DataFrame."
+ )
+ assert df.shape[0] == df[[cfg.dataset.answer_column]].dropna().shape[0], (
+ f"The {mode} DataFrame"
+ f" column {cfg.dataset.answer_column}"
+ " contains missing values."
+ )
+ if cfg.dataset.parent_id_column != "None":
+ assert (
+ cfg.dataset.id_column in df.columns
+ ), "When using Parent Column, set 'Id Column' in the previous screen. "
+
+ if (
+ cfg.dataset.parent_id_column != "None"
+ and df[cfg.dataset.parent_id_column].notna().any()
+ ):
+ # Comprehensive checks for conversation chaining
+ parent_id_list = df[cfg.dataset.parent_id_column].tolist()
+ id_list = df[cfg.dataset.id_column].tolist()
+ # Track if any valid parent_id is found in the id_list
+ found_valid_parent = False
+
+ # 1. Check if at least one parent_id element is present in id_list
+ for pid in parent_id_list:
+ if pid is not None and not pd.isna(pid) and pid in id_list:
+ found_valid_parent = True
+ break
+
+ # If no valid parent_id is found in the id_list, raise an error
+ if not found_valid_parent:
+ raise AssertionError(
+ "None of the Parent IDs in the 'Parent Id Column' were found "
+ "in the 'Id Column'. "
+ "Please ensure that at least one ID in 'Parent Id Column' "
+ "is present in the 'Id Column'. "
+ f"Checked 'Parent Id Column': '{cfg.dataset.parent_id_column}', "
+ f"and 'Id Column': '{cfg.dataset.id_column}'."
+ )
+ # 2. Check if all elements in id_list are unique
+ if len(id_list) != len(set(id_list)):
+ raise AssertionError("ID list contains duplicate values.")
+ # 3. Check if parent_id[i] is not the same as id_list[i] (self-referencing)
+ for i in range(len(id_list)):
+ if parent_id_list[i] == id_list[i]:
+ raise AssertionError(f"ID '{id_list[i]}' is self-referencing.")
+ # 4. Check if there is at least one root element (where parent_id is None)
+ if not (None in parent_id_list or pd.isna(parent_id_list).any()):
+ raise AssertionError(
+ "There must be at least one root element (with no parent). "
+ "Currently, all records have a parent."
+ )
+
+ # 5. Check for circular references
+ def find_ancestor(node, parent_map):
+ seen = set()
+ while node in parent_map:
+ if node in seen:
+ raise AssertionError(
+ f"Circular reference detected for ID '{node}'."
+ )
+ seen.add(node)
+ node = parent_map[node]
+ return False
+
+ parent_map = {
+ id_list[i]: parent_id_list[i]
+ for i in range(len(id_list))
+ if parent_id_list[i] is not None
+ }
+
+ for id_ in id_list:
+ find_ancestor(id_, parent_map)
+
+ def get_labels(self, prompt_encodings, answer_encodings):
+ labels = torch.cat(
+ [
+ torch.cat([prompt_encoding, answer_encoding])
+ for prompt_encoding, answer_encoding in zip(
+ prompt_encodings, answer_encodings
+ )
+ ]
+ ).clone()
+
+ if self.cfg.dataset.mask_prompt_labels:
+ masks = []
+ for idx, (prompt_encoding, answer_encoding) in enumerate(
+ zip(prompt_encodings, answer_encodings)
+ ):
+ if (
+ not self.cfg.dataset.only_last_answer
+ or idx == len(answer_encodings) - 1
+ ):
+ mask = [
+ torch.ones_like(prompt_encoding),
+ torch.zeros_like(answer_encoding),
+ ]
+ else:
+ mask = [
+ torch.ones_like(prompt_encoding),
+ torch.ones_like(answer_encoding),
+ ]
+ masks.append(torch.cat(mask))
+ masks = torch.cat(masks).to(torch.bool)
+ labels.masked_fill_(masks, -100)
+ if self.cfg.tokenizer.max_length < len(labels):
+ labels = labels[-self.cfg.tokenizer.max_length :]
+
+ sample = dict(labels=torch.full((self.cfg.tokenizer.max_length,), -100))
+ sample["labels"][-len(labels) :] = labels
+ return sample
+
+ def get_encodings(self, input_text_dict: Dict[str, List[str]]):
+ """
+ Get encodings for a single conversation history.
+ Args:
+ input_text_dict: A dictionary containing the input text for a single sample.
+ Contains the keys "systems", "prompts", "answers".
+ System may be an empty string.
+ """
+ encodings = [
+ self._get_sample_encoding(system, prompt, answer)
+ for idx, (system, prompt, answer) in enumerate(
+ zip(
+ input_text_dict["systems"],
+ input_text_dict["prompts"],
+ input_text_dict["answers"],
+ )
+ )
+ ]
+
+ if self.mode == "train":
+ encodings = self.augment_data(encodings)
+
+ system_encoding = encodings[0][0]
+ prompt_encodings = [encoding[1] for encoding in encodings]
+ answer_encodings = [encoding[2] for encoding in encodings]
+ # concatenate system encoding with root prompt encoding
+ prompt_encodings[0] = torch.cat([system_encoding, prompt_encodings[0]])
+ return (
+ system_encoding,
+ prompt_encodings,
+ answer_encodings,
+ )
+
+ def augment_data(self, encodings):
+ parent_encodings = encodings[:-1]
+ # randomly skip parent
+ parent_encodings = [
+ encoding
+ for idx, encoding in enumerate(parent_encodings)
+ if np.random.random() > self.cfg.augmentation.skip_parent_probability
+ ]
+ # randomly replace parent with another parent
+ if np.random.random() < self.cfg.augmentation.random_parent_probability:
+ idx = np.random.randint(len(self.conversation_chain_handler.prompts))
+ parent_encodings = [
+ self._get_sample_encoding(
+ self.parse_system(
+ self.cfg, self.conversation_chain_handler.systems[idx]
+ ),
+ self.parse_prompt(
+ self.cfg, self.conversation_chain_handler.prompts[idx]
+ ),
+ self.conversation_chain_handler.answers[idx],
+ )
+ ] + parent_encodings[1:]
+ encodings = parent_encodings + [encodings[-1]]
+ return encodings
+
+ def _get_sample_encoding(self, system: str, prompt: str, answer: str) -> List:
+ if len(system) > 0:
+ system_encoding = self.encode(
+ self.tokenizer, system, self.cfg.tokenizer.max_length, "right"
+ )["input_ids"]
+ else:
+ system_encoding = torch.empty(0)
+ prompt_encoding = self.encode(
+ self.tokenizer, prompt, self.cfg.tokenizer.max_length, "left"
+ )["input_ids"]
+ answer_encoding = self.encode(
+ self.tokenizer, answer, self.cfg.tokenizer.max_length, "right"
+ )["input_ids"]
+
+ return [system_encoding, prompt_encoding, answer_encoding]
+
+ @staticmethod
+ def pad_tokens(
+ input_ids,
+ attention_mask,
+ max_length,
+ pad_token_id,
+ direction="left",
+ prefix="",
+ ):
+ sample = {}
+
+ if max_length < len(input_ids):
+ logger.info(f"Input exceeds max_length of {max_length}, truncating sample.")
+ input_ids = input_ids[-max_length:]
+ attention_mask = attention_mask[-max_length:]
+
+ if len(input_ids) > 0:
+ if direction == "left":
+ sample[f"{prefix}input_ids"] = torch.full((max_length,), pad_token_id)
+ sample[f"{prefix}input_ids"][-len(input_ids) :] = input_ids
+ sample[f"{prefix}attention_mask"] = torch.zeros(max_length)
+ sample[f"{prefix}attention_mask"][-len(input_ids) :] = attention_mask
+ else:
+ sample[f"{prefix}input_ids"] = torch.full((max_length,), pad_token_id)
+ sample[f"{prefix}input_ids"][: len(input_ids)] = input_ids
+ sample[f"{prefix}attention_mask"] = torch.zeros(max_length)
+ sample[f"{prefix}attention_mask"][: len(input_ids)] = attention_mask
+ else:
+ # Pad everything if empty (continued pretraining)
+ sample[f"{prefix}input_ids"] = torch.full((max_length,), pad_token_id)
+ sample[f"{prefix}attention_mask"] = torch.zeros(max_length)
+
+ return sample
+
+ @staticmethod
+ def encode(tokenizer, text: str, max_length: int, truncation_side: str) -> Dict:
+ encodings = tokenizer(text, return_tensors="pt", add_special_tokens=False)
+ encodings["input_ids"] = encodings["input_ids"][0]
+ encodings["attention_mask"] = encodings["attention_mask"][0]
+ if truncation_side == "right":
+ encodings["input_ids"] = encodings["input_ids"][:max_length]
+ encodings["attention_mask"] = encodings["attention_mask"][:max_length]
+ else:
+ encodings["input_ids"] = encodings["input_ids"][-max_length:]
+ encodings["attention_mask"] = encodings["attention_mask"][-max_length:]
+ return encodings
diff --git a/llm_studio/src/datasets/text_causal_regression_ds.py b/llm_studio/src/datasets/text_causal_regression_ds.py
new file mode 100644
index 0000000000000000000000000000000000000000..db9624873d2e0ebbb888e8e6abf7a8f026cda4e6
--- /dev/null
+++ b/llm_studio/src/datasets/text_causal_regression_ds.py
@@ -0,0 +1,55 @@
+import logging
+from typing import Any, Dict
+
+import numpy as np
+import pandas as pd
+
+from llm_studio.src.datasets.text_causal_language_modeling_ds import (
+ CustomDataset as TextCausalLanguageModelingCustomDataset,
+)
+from llm_studio.src.utils.exceptions import LLMDataException
+
+logger = logging.getLogger(__name__)
+
+
+class CustomDataset(TextCausalLanguageModelingCustomDataset):
+ def __init__(self, df: pd.DataFrame, cfg: Any, mode: str = "train"):
+ super().__init__(df=df, cfg=cfg, mode=mode)
+ self.answers_float = df[cfg.dataset.answer_column].astype(float).values
+
+ if cfg.dataset.parent_id_column != "None":
+ raise LLMDataException(
+ "Parent ID column is not supported for regression datasets."
+ )
+
+ def __getitem__(self, idx: int) -> Dict:
+ sample = super().__getitem__(idx)
+ sample["class_label"] = self.answers_float[idx]
+ return sample
+
+ def postprocess_output(self, cfg, df: pd.DataFrame, output: Dict) -> Dict:
+ output["predictions"] = output["predictions"].float()
+ preds = []
+ for col in np.arange(len(cfg.dataset.answer_column)):
+ preds.append(
+ np.round(output["predictions"][:, col].cpu().numpy(), 3).astype(str)
+ )
+ preds = [",".join(pred) for pred in zip(*preds)]
+ output["predicted_text"] = preds
+ return super().postprocess_output(cfg, df, output)
+
+ def clean_output(self, output, cfg):
+ return output
+
+ @classmethod
+ def sanity_check(cls, df: pd.DataFrame, cfg: Any, mode: str = "train"):
+
+ for answer_col in cfg.dataset.answer_column:
+ assert answer_col in df.columns, (
+ f"Answer column {answer_col} not found in the " f"{mode} DataFrame."
+ )
+ assert df.shape[0] == df[answer_col].dropna().shape[0], (
+ f"The {mode} DataFrame"
+ f" column {answer_col}"
+ " contains missing values."
+ )
diff --git a/llm_studio/src/datasets/text_dpo_modeling_ds.py b/llm_studio/src/datasets/text_dpo_modeling_ds.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c58b110504f5aacc7f5225a06b19ddaa7231fc3
--- /dev/null
+++ b/llm_studio/src/datasets/text_dpo_modeling_ds.py
@@ -0,0 +1,117 @@
+import logging
+from typing import Any, Dict
+
+import pandas as pd
+import torch
+
+import llm_studio.src.datasets.text_causal_language_modeling_ds as text_causal_language_modeling_ds # noqa: [F401]
+from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler
+from llm_studio.src.utils.utils import PatchedAttribute
+
+logger = logging.getLogger(__name__)
+
+
+class CustomDataset(text_causal_language_modeling_ds.CustomDataset):
+ """
+ Dataset for DPO optimization.
+ The data is assumed to be in the same format as for causal language modeling,
+ but an additional column with rejected answers is required.
+ For chained conversations, rejected answers are equal normal answers up to the
+ last answer. THe last answers are then different.
+ """
+
+ def __init__(self, df: pd.DataFrame, cfg: Any, mode: str = "train"):
+ assert (
+ cfg.dataset.limit_chained_samples
+ ), "Need to enable limit_chained_samples for dpo training"
+ super().__init__(df=df, cfg=cfg, mode=mode)
+
+ with PatchedAttribute(
+ cfg.dataset, "answer_column", cfg.dataset.rejected_answer_column
+ ):
+ if cfg.dataset.rejected_prompt_column != "None":
+ with PatchedAttribute(
+ cfg.dataset, "prompt_column", cfg.dataset.rejected_prompt_column
+ ):
+ self.conversation_chain_handler_rejected = ConversationChainHandler(
+ self.df, cfg
+ )
+ else:
+ self.conversation_chain_handler_rejected = ConversationChainHandler(
+ self.df, cfg
+ )
+
+ def __getitem__(self, idx: int) -> Dict:
+ """Reads a single text observation."""
+ chosen_sample = super().__getitem__(idx)
+ keys = ["input_ids", "attention_mask", "token_type_ids", "labels"]
+ prompt_keys = [
+ "prompt_input_ids",
+ "prompt_attention_mask",
+ "prompt_token_type_ids",
+ ]
+ prompt_sample = {k: v for k, v in chosen_sample.items() if k in prompt_keys}
+ chosen_sample = {
+ f"chosen_{k}": v for k, v in chosen_sample.items() if k in keys
+ }
+
+ with PatchedAttribute(
+ self, "conversation_chain_handler", self.conversation_chain_handler_rejected
+ ):
+ rejected_sample = {
+ f"rejected_{k}": v
+ for k, v in super().__getitem__(idx).items()
+ if k in keys
+ }
+
+ sample = {**chosen_sample, **rejected_sample, **prompt_sample}
+ return sample
+
+ def get_labels(self, prompt_encodings, answer_encodings):
+ """
+ Mask all but the last answer.
+ """
+ labels = torch.cat(
+ [
+ torch.cat(
+ [
+ torch.full_like(
+ prompt_encoding,
+ fill_value=-100,
+ ),
+ torch.full_like(
+ answer_encoding,
+ fill_value=-100,
+ ),
+ ]
+ )
+ for prompt_encoding, answer_encoding in zip(
+ prompt_encodings, answer_encodings
+ )
+ ]
+ ).clone()
+
+ if len(answer_encodings[-1]):
+ # empty answers would create a RuntimeError
+ labels[-len(answer_encodings[-1]) :] = answer_encodings[-1]
+
+ if self.cfg.dataset.add_eos_token_to_answer:
+ # eos_token may be equal to pad_token. Add the label back manually.
+ labels[-1] = self.tokenizer.eos_token_id
+ if self.cfg.tokenizer.max_length < len(labels):
+ labels = labels[-self.cfg.tokenizer.max_length :]
+
+ sample = dict(labels=torch.full((self.cfg.tokenizer.max_length,), -100))
+ sample["labels"][-len(labels) :] = labels
+ return sample
+
+ @classmethod
+ def sanity_check(cls, df: pd.DataFrame, cfg: Any, mode: str = "train"):
+ """
+ Quick check whether Dataframe and configurations are correctly set.
+ """
+ super().sanity_check(df=df, cfg=cfg, mode=mode)
+ assert cfg.dataset.rejected_answer_column in df.columns, (
+ f"Answer column {cfg.dataset.rejected_answer_column} not found in the "
+ f"{mode} DataFrame."
+ )
diff --git a/llm_studio/src/datasets/text_utils.py b/llm_studio/src/datasets/text_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..299c7a78278bead67c68d600d3f9d23f165fdb86
--- /dev/null
+++ b/llm_studio/src/datasets/text_utils.py
@@ -0,0 +1,138 @@
+import codecs
+import json
+import logging
+import os
+
+from pandas import DataFrame
+from transformers import AutoTokenizer
+
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+
+logger = logging.getLogger(__name__)
+
+
+def get_texts(df: DataFrame, cfg: DefaultConfigProblemBase):
+ if isinstance(cfg.dataset.prompt_column, str):
+ # single column dataset
+ texts = df[cfg.dataset.prompt_column].astype(str)
+ texts = texts.values
+ else:
+ # multi-column dataset - prepend (if necessary) and join
+ columns = list(cfg.dataset.prompt_column)
+
+ for column in columns:
+ df[column] = df[column].astype(str)
+
+ join_str = codecs.decode(cfg.dataset.prompt_column_separator, "unicode_escape")
+
+ texts = df[columns].astype(str)
+ texts = texts.apply(lambda x: join_str.join(x), axis=1).values
+
+ return texts
+
+
+def get_tokenizer(cfg: DefaultConfigProblemBase):
+
+ kwargs = dict(
+ revision=cfg.environment.huggingface_branch,
+ trust_remote_code=cfg.environment.trust_remote_code,
+ token=os.getenv("HF_TOKEN"),
+ )
+
+ # We will be able to remove this after
+ # https://github.com/huggingface/transformers/pull/30964
+ tokenizer_class = AutoTokenizer.from_pretrained(
+ cfg.llm_backbone, **kwargs
+ ).__class__
+ if tokenizer_class.__name__ in ["LlamaTokenizer", "LlamaTokenizerFast"]:
+ kwargs["from_slow"] = True
+
+ kwargs.update(json.loads(cfg.tokenizer.tokenizer_kwargs.strip()))
+
+ try:
+ tokenizer = AutoTokenizer.from_pretrained(cfg.llm_backbone, **kwargs)
+ except TypeError as e:
+ error_message = str(e)
+ if "token" in error_message:
+ # TypeError: RWForCausalLM.__init__() got
+ # an unexpected keyword argument 'token'
+ kwargs.pop("token")
+ tokenizer = AutoTokenizer.from_pretrained(cfg.llm_backbone, **kwargs)
+ elif "not a string" in error_message:
+ # https://github.com/h2oai/h2o-llmstudio/issues/623
+ kwargs.pop("add_prefix_space", None)
+ kwargs.pop("from_slow", None)
+ tokenizer = AutoTokenizer.from_pretrained(cfg.llm_backbone, **kwargs)
+ else:
+ raise e
+
+ tokenizer.padding_side = getattr(
+ cfg.tokenizer, "_padding_side", tokenizer.padding_side
+ )
+
+ tokenizer.add_bos_token = False
+ tokenizer.add_eos_token = False
+
+ # if the eos token is an empty string, we assign it to a token
+ if tokenizer.eos_token == "":
+ tokenizer.add_special_tokens({"eos_token": ""})
+ tokenizer.eos_token = ""
+
+ if tokenizer.pad_token is None:
+ if tokenizer.unk_token is not None:
+ tokenizer.pad_token = tokenizer.unk_token
+ else:
+ tokenizer.pad_token = tokenizer.eos_token
+ if tokenizer.bos_token is None:
+ tokenizer.bos_token = tokenizer.eos_token
+ if tokenizer.cls_token is None:
+ tokenizer.cls_token = tokenizer.eos_token
+
+ if tokenizer.unk_token_id is not None:
+ cfg.tokenizer._tokenizer_mask_token_id = tokenizer.unk_token_id
+ elif tokenizer.mask_token_id is not None:
+ cfg.tokenizer._tokenizer_mask_token_id = tokenizer.mask_token_id
+ elif tokenizer.pad_token_id is not None:
+ cfg.tokenizer._tokenizer_mask_token_id = tokenizer.pad_token_id
+ else:
+ # setting the mask token id to the last token in the vocabulary
+ # this usually is a safe choice and mostly refers to eos token
+ cfg.tokenizer._tokenizer_mask_token_id = len(tokenizer) - 1
+
+ cfg.tokenizer._tokenizer_eos_token = tokenizer.eos_token
+
+ if hasattr(cfg.prediction, "stop_tokens"):
+ set_stop_token_ids(cfg, tokenizer)
+ cfg.tokenizer._vocab_length = len(tokenizer)
+
+ return tokenizer
+
+
+def set_stop_token_ids(cfg, tokenizer):
+ cfg.tokenizer._stop_words = list(
+ filter(None, cfg.prediction.stop_tokens.split(","))
+ )
+ for stop_word in [
+ cfg.dataset.text_system_start,
+ cfg.dataset.text_prompt_start,
+ cfg.dataset.text_answer_separator,
+ ]:
+ stop_word = codecs.decode(stop_word, "unicode_escape").strip()
+ if (
+ stop_word != ""
+ and cfg.tokenizer.add_prompt_answer_tokens
+ and (stop_word not in tokenizer.get_vocab())
+ ):
+ tokenizer.add_tokens([stop_word])
+ cfg.tokenizer._stop_words.append(stop_word)
+ cfg.tokenizer._stop_words = [
+ stop_word for stop_word in cfg.tokenizer._stop_words if stop_word != ""
+ ]
+ cfg.tokenizer._stop_words_ids = []
+ for stop_word in set(cfg.tokenizer._stop_words):
+ cfg.tokenizer._stop_words_ids.append(
+ tokenizer(stop_word, return_tensors="pt", add_special_tokens=False)[
+ "input_ids"
+ ][0]
+ )
+ logger.info(f"Stop token ids: {cfg.tokenizer._stop_words_ids}")
diff --git a/llm_studio/src/loggers.py b/llm_studio/src/loggers.py
new file mode 100644
index 0000000000000000000000000000000000000000..d52dcd84c94b4d47d404c4c61b9837b0a22ab9a5
--- /dev/null
+++ b/llm_studio/src/loggers.py
@@ -0,0 +1,207 @@
+import dataclasses
+import logging
+import os
+from typing import Any, Dict, List, Optional
+
+import numpy as np
+from sqlitedict import SqliteDict
+
+from llm_studio.src.utils.plot_utils import PLOT_ENCODINGS
+
+logger = logging.getLogger(__name__)
+
+
+def get_cfg(cfg: Any) -> Dict:
+ """Returns simplified config elements
+
+ Args:
+ cfg: configuration
+
+ Returns:
+ Dict of config elements
+ """
+
+ items: Dict = {}
+ type_annotations = cfg.get_annotations()
+
+ cfg_dict = cfg.__dict__
+
+ cfg_dict = {key: cfg_dict[key] for key in cfg._get_order(warn_if_unset=False)}
+
+ for k, v in cfg_dict.items():
+ if k.startswith("_") or cfg._get_visibility(k) < 0:
+ continue
+
+ if any([x in k for x in ["api", "secret", "key"]]):
+ continue
+
+ if dataclasses.is_dataclass(v):
+ elements_group = get_cfg(cfg=v)
+ t = elements_group
+ items = {**items, **t}
+ else:
+ type_annotation = type_annotations[k]
+ if type_annotation == float:
+ items[k] = float(v)
+ else:
+ items[k] = v
+
+ return items
+
+
+class NeptuneLogger:
+ def __init__(self, cfg: Any):
+ import neptune as neptune
+ from neptune.utils import stringify_unsupported
+
+ if cfg.logging._neptune_debug:
+ mode = "debug"
+ else:
+ mode = "async"
+
+ self.logger = neptune.init_run(
+ project=cfg.logging.neptune_project,
+ api_token=os.getenv("NEPTUNE_API_TOKEN", ""),
+ name=cfg.experiment_name,
+ mode=mode,
+ capture_stdout=False,
+ capture_stderr=False,
+ source_files=[],
+ )
+
+ self.logger["cfg"] = stringify_unsupported(get_cfg(cfg))
+
+ def log(self, subset: str, name: str, value: Any, step: Optional[int] = None):
+ name = f"{subset}/{name}"
+ self.logger[name].append(value, step=step)
+
+
+class WandbLogger:
+ def __init__(self, cfg: Any) -> None:
+
+ os.environ["WANDB_DISABLE_CODE"] = "true"
+ os.environ["WANDB_DISABLE_GIT"] = "true"
+ os.environ["WANDB_ERROR_REPORTING"] = "false"
+ os.environ["WANDB_CONSOLE"] = "off"
+ os.environ["WANDB_IGNORE_GLOBS"] = "*.*"
+ os.environ["WANDB_HOST"] = "H2O LLM Studio"
+
+ import wandb
+
+ self.logger = wandb.init(
+ project=cfg.logging.wandb_project,
+ entity=cfg.logging.wandb_entity,
+ name=cfg.experiment_name,
+ config=get_cfg(cfg),
+ save_code=False,
+ )
+
+ def log(self, subset: str, name: str, value: Any, step: Optional[int] = None):
+ name = f"{subset}/{name}"
+ self.logger.log({name: value}, step=step)
+
+
+class LocalLogger:
+ def __init__(self, cfg: Any):
+ logging.getLogger("sqlitedict").setLevel(logging.ERROR)
+
+ self.logs = os.path.join(cfg.output_directory, "charts.db")
+
+ params = get_cfg(cfg)
+
+ with SqliteDict(self.logs) as logs:
+ logs["cfg"] = params
+ logs.commit()
+
+ def log(self, subset: str, name: str, value: Any, step: Optional[int] = None):
+ if subset in PLOT_ENCODINGS:
+ with SqliteDict(self.logs) as logs:
+ if subset not in logs:
+ subset_dict = dict()
+ else:
+ subset_dict = logs[subset]
+ subset_dict[name] = value
+ logs[subset] = subset_dict
+ logs.commit()
+ return
+
+ # https://github.com/h2oai/wave/issues/447
+ if np.isnan(value):
+ value = None
+ else:
+ value = float(value)
+ with SqliteDict(self.logs) as logs:
+ if subset not in logs:
+ subset_dict = dict()
+ else:
+ subset_dict = logs[subset]
+ if name not in subset_dict:
+ subset_dict[name] = {"steps": [], "values": []}
+
+ subset_dict[name]["steps"].append(step)
+ subset_dict[name]["values"].append(value)
+
+ logs[subset] = subset_dict
+ logs.commit()
+
+
+class DummyLogger:
+ def __init__(self, cfg: Optional[Any] = None):
+ return
+
+ def log(self, subset: str, name: str, value: Any, step: Optional[int] = None):
+ return
+
+
+class MainLogger:
+ """Main logger"""
+
+ def __init__(self, cfg: Any):
+ self.loggers = {
+ "local": LocalLogger(cfg),
+ "external": ExternalLoggers.get(cfg.logging.logger),
+ }
+
+ try:
+ self.loggers["external"] = self.loggers["external"](cfg)
+ except Exception as e:
+ logger.warning(
+ f"Error when initializing logger. "
+ f"Disabling custom logging functionality. "
+ f"Please ensure logger configuration is correct and "
+ f"you have a stable Internet connection: {e}"
+ )
+ self.loggers["external"] = DummyLogger(cfg)
+
+ def reset_external(self):
+ self.loggers["external"] = DummyLogger()
+
+ def log(self, subset: str, name: str, value: str | float, step: float = None):
+ for k, logger in self.loggers.items():
+ if "validation_predictions" in name and k == "external":
+ continue
+ if subset == "internal" and not isinstance(logger, LocalLogger):
+ continue
+ logger.log(subset=subset, name=name, value=value, step=step)
+
+
+class ExternalLoggers:
+ """ExternalLoggers factory."""
+
+ _loggers = {"None": DummyLogger, "Neptune": NeptuneLogger, "W&B": WandbLogger}
+
+ @classmethod
+ def names(cls) -> List[str]:
+ return list(cls._loggers.keys())
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ """Access to ExternalLoggers.
+
+ Args:
+ name: external loggers name
+ Returns:
+ A class to build the ExternalLoggers
+ """
+
+ return cls._loggers.get(name, DummyLogger)
diff --git a/llm_studio/src/losses/__init__.py b/llm_studio/src/losses/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/src/losses/text_causal_classification_modeling_losses.py b/llm_studio/src/losses/text_causal_classification_modeling_losses.py
new file mode 100644
index 0000000000000000000000000000000000000000..4eb0696c02a6a0526d2a4395279c9c44b8d77208
--- /dev/null
+++ b/llm_studio/src/losses/text_causal_classification_modeling_losses.py
@@ -0,0 +1,50 @@
+import logging
+from typing import Any, KeysView
+
+from torch import Tensor, nn
+
+logger = logging.getLogger(__name__)
+
+
+class CrossEntropyLoss(nn.Module):
+ def __init__(self, cfg: Any):
+ super().__init__()
+ self.cfg = cfg
+ self.loss_fn = nn.CrossEntropyLoss()
+
+ def forward(self, logits: Tensor, labels: Tensor) -> Tensor:
+ return self.loss_fn(logits, labels.reshape(-1).long())
+
+
+class BinaryCrossEntropyLoss(nn.Module):
+ def __init__(self, cfg: Any):
+ super().__init__()
+ self.cfg = cfg
+ self.loss_fn = nn.BCEWithLogitsLoss()
+
+ def forward(self, logits: Tensor, labels: Tensor) -> Tensor:
+ return self.loss_fn(logits, labels)
+
+
+class Losses:
+ """Losses factory."""
+
+ _losses = {
+ "CrossEntropyLoss": CrossEntropyLoss,
+ "BinaryCrossEntropyLoss": BinaryCrossEntropyLoss,
+ }
+
+ @classmethod
+ def names(cls) -> KeysView:
+ return cls._losses.keys()
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ """Access to Losses.
+
+ Args:
+ name: losses name
+ Returns:
+ A class to build the Losses
+ """
+ return cls._losses.get(name, CrossEntropyLoss)
diff --git a/llm_studio/src/losses/text_causal_language_modeling_losses.py b/llm_studio/src/losses/text_causal_language_modeling_losses.py
new file mode 100644
index 0000000000000000000000000000000000000000..bef2e0392160a6e957b15201517868905d534f17
--- /dev/null
+++ b/llm_studio/src/losses/text_causal_language_modeling_losses.py
@@ -0,0 +1,63 @@
+import logging
+from typing import Any, KeysView
+
+from torch import nn
+
+logger = logging.getLogger(__name__)
+
+
+class TokenAveragedCrossEntropyLoss(nn.Module):
+ def __init__(self, cfg: Any):
+ super().__init__()
+ self.cfg = cfg
+ self.loss_fn = nn.CrossEntropyLoss()
+
+ def forward(self, logits, labels):
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+
+ shift_logits = shift_logits.view(-1, shift_logits.size(-1))
+ shift_labels = shift_labels.view(-1)
+
+ return self.loss_fn(shift_logits, shift_labels)
+
+
+class SampleAveragedCrossEntropyLoss(nn.Module):
+ def __init__(self, cfg: Any):
+ super().__init__()
+ self.cfg = cfg
+ self.loss_fn = nn.CrossEntropyLoss()
+
+ def forward(self, logits, labels):
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+
+ loss = 0
+ for i in range(labels.shape[0]):
+ loss += self.loss_fn(shift_logits[i], shift_labels[i])
+ loss /= labels.shape[0]
+ return loss
+
+
+class Losses:
+ """Losses factory."""
+
+ _losses = {
+ "TokenAveragedCrossEntropy": TokenAveragedCrossEntropyLoss,
+ "SampleAveragedCrossEntropy": SampleAveragedCrossEntropyLoss,
+ }
+
+ @classmethod
+ def names(cls) -> KeysView:
+ return cls._losses.keys()
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ """Access to Losses.
+
+ Args:
+ name: losses name
+ Returns:
+ A class to build the Losses
+ """
+ return cls._losses.get(name, TokenAveragedCrossEntropyLoss)
diff --git a/llm_studio/src/losses/text_causal_regression_modeling_losses.py b/llm_studio/src/losses/text_causal_regression_modeling_losses.py
new file mode 100644
index 0000000000000000000000000000000000000000..4415b9393a3cdf11e2e863c9875818bb90d26ce6
--- /dev/null
+++ b/llm_studio/src/losses/text_causal_regression_modeling_losses.py
@@ -0,0 +1,50 @@
+import logging
+from typing import Any, KeysView
+
+from torch import Tensor, nn
+
+logger = logging.getLogger(__name__)
+
+
+class MSELoss(nn.Module):
+ def __init__(self, cfg: Any):
+ super().__init__()
+ self.cfg = cfg
+ self.loss_fn = nn.MSELoss()
+
+ def forward(self, logits: Tensor, labels: Tensor) -> Tensor:
+ return self.loss_fn(logits, labels)
+
+
+class MAELoss(nn.Module):
+ def __init__(self, cfg: Any):
+ super().__init__()
+ self.cfg = cfg
+ self.loss_fn = nn.L1Loss()
+
+ def forward(self, logits: Tensor, labels: Tensor) -> Tensor:
+ return self.loss_fn(logits, labels)
+
+
+class Losses:
+ """Losses factory."""
+
+ _losses = {
+ "MSELoss": MSELoss,
+ "MAELoss": MAELoss,
+ }
+
+ @classmethod
+ def names(cls) -> KeysView:
+ return cls._losses.keys()
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ """Access to Losses.
+
+ Args:
+ name: losses name
+ Returns:
+ A class to build the Losses
+ """
+ return cls._losses.get(name, MSELoss)
diff --git a/llm_studio/src/losses/text_dpo_modeling_losses.py b/llm_studio/src/losses/text_dpo_modeling_losses.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7fd24851146ed84c60beb8f89ee1ac8457182af
--- /dev/null
+++ b/llm_studio/src/losses/text_dpo_modeling_losses.py
@@ -0,0 +1,232 @@
+"""
+Loss Implementation based upon
+https://github.com/eric-mitchell/direct-preference-optimization
+https://github.com/huggingface/trl
+"""
+
+import logging
+from typing import Any, KeysView
+
+import torch
+import torch.nn.functional as F
+from torch import nn
+
+logger = logging.getLogger(__name__)
+
+
+class DPOLoss(nn.Module):
+ """
+ Implements
+ "Direct Preference Optimization:
+ Your Language Model is Secretly a Reward Model"
+ from https://arxiv.org/abs/2305.18290
+ """
+
+ def __init__(self, cfg: Any):
+ super().__init__()
+ self.cfg = cfg
+ self.requires_reference_model = True
+
+ def forward(
+ self,
+ policy_chosen_logps: torch.FloatTensor,
+ policy_rejected_logps: torch.FloatTensor,
+ reference_chosen_logps: torch.FloatTensor,
+ reference_rejected_logps: torch.FloatTensor,
+ ):
+ pi_logratios = policy_chosen_logps - policy_rejected_logps
+ ref_logratios = reference_chosen_logps - reference_rejected_logps
+
+ losses = self.get_losses(logits=pi_logratios - ref_logratios)
+ chosen_rewards = (
+ self.cfg.training.beta
+ * (policy_chosen_logps - reference_chosen_logps).detach()
+ )
+ rejected_rewards = (
+ self.cfg.training.beta
+ * (policy_rejected_logps - reference_rejected_logps).detach()
+ )
+
+ return losses.mean(), chosen_rewards.mean(), rejected_rewards.mean()
+
+ def get_losses(self, logits):
+ # The beta is a temperature parameter for the DPO loss,
+ # typically something in the range of 0.1 to 0.5.
+ # We ignore the reference model as beta -> 0.
+ # The label_smoothing parameter encodes our uncertainty about the labels and
+ # calculates a conservative DPO loss.
+
+ # For now, set label_smoothing to 0 (original DPO loss).
+ # See https://ericmitchell.ai/cdpo.pdf for more details
+ label_smoothing = 0
+
+ losses = (
+ -F.logsigmoid(self.cfg.training.beta * logits) * (1 - label_smoothing)
+ - F.logsigmoid(-self.cfg.training.beta * logits) * label_smoothing
+ )
+ return losses
+
+
+class DPOHingeLoss(DPOLoss):
+ def get_losses(self, logits):
+ losses = torch.relu(1 - self.cfg.training.beta * logits)
+ return losses
+
+
+class DPOIPOLoss(DPOLoss):
+ """
+ Implements "A General Theoretical Paradigm
+ to Understand Learning from Human Preferences"
+ from https://arxiv.org/pdf/2310.12036.pdf
+ """
+
+ def get_losses(self, logits):
+ # eqn (17) of the https://arxiv.org/pdf/2310.12036.pdf
+ # where beta is the real, positive KL parameter for the IPO loss,
+ # denoted by tau in the paper (see also eqn (6)).
+ losses = (logits - 1 / (2 * self.cfg.training.beta)) ** 2
+ return losses
+
+
+class KTOPairLoss(nn.Module):
+ """
+ Implements original paired KTO implementation
+ Adopted from https://github.com/ContextualAI/HALOs
+ and https://github.com/huggingface/trl
+ """
+
+ def __init__(self, cfg: Any):
+ super().__init__()
+ self.cfg = cfg
+ self.requires_reference_model = True
+
+ def forward(
+ self,
+ policy_chosen_logps: torch.FloatTensor,
+ policy_rejected_logps: torch.FloatTensor,
+ reference_chosen_logps: torch.FloatTensor,
+ reference_rejected_logps: torch.FloatTensor,
+ ):
+ chosen_KL = (policy_chosen_logps - reference_chosen_logps).mean().clamp(min=0)
+ rejected_KL = (
+ (policy_rejected_logps - reference_rejected_logps).mean().clamp(min=0)
+ )
+
+ chosen_logratios = policy_chosen_logps - reference_chosen_logps
+ rejected_logratios = policy_rejected_logps - reference_rejected_logps
+ losses = torch.cat(
+ (
+ 1
+ - F.sigmoid(self.cfg.training.beta * (chosen_logratios - rejected_KL)),
+ 1
+ - F.sigmoid(self.cfg.training.beta * (chosen_KL - rejected_logratios)),
+ ),
+ 0,
+ )
+
+ chosen_rewards = (
+ self.cfg.training.beta
+ * (policy_chosen_logps - reference_chosen_logps).detach()
+ ).float()
+ rejected_rewards = (
+ self.cfg.training.beta
+ * (policy_rejected_logps - reference_rejected_logps).detach()
+ ).float()
+
+ return losses.mean(), chosen_rewards.mean(), rejected_rewards.mean()
+
+
+class CPOLoss(nn.Module):
+ """
+ Implements CPO Loss https://arxiv.org/abs/2401.08417
+ Adopted from https://github.com/huggingface/trl
+ """
+
+ def __init__(self, cfg: Any):
+ super().__init__()
+ self.cfg = cfg
+ self.requires_reference_model = False
+
+ def forward(
+ self,
+ policy_chosen_logps: torch.FloatTensor,
+ policy_rejected_logps: torch.FloatTensor,
+ ):
+
+ logits = policy_chosen_logps - policy_rejected_logps
+
+ losses = self.get_losses(logits)
+
+ chosen_rewards = (self.cfg.training.beta * policy_chosen_logps.detach()).float()
+ rejected_rewards = (
+ self.cfg.training.beta * policy_rejected_logps.detach()
+ ).float()
+
+ return losses.mean(), chosen_rewards.mean(), rejected_rewards.mean()
+
+ def get_losses(self, logits):
+ label_smoothing = 0
+
+ losses = (
+ -F.logsigmoid(self.cfg.training.beta * logits) * (1 - label_smoothing)
+ - F.logsigmoid(-self.cfg.training.beta * logits) * label_smoothing
+ )
+ return losses
+
+
+class SimPOLoss(CPOLoss):
+ """
+ Implements SimPO Loss https://arxiv.org/abs/2405.14734
+ Adopted from https://github.com/princeton-nlp/SimPO
+ and https://github.com/huggingface/trl
+ """
+
+ def get_losses(self, logits):
+ label_smoothing = 0
+ gamma = self.cfg.training.simpo_gamma
+ gamma_logratios = gamma / self.cfg.training.beta
+ logits = logits - gamma_logratios
+
+ losses = (
+ -F.logsigmoid(self.cfg.training.beta * logits) * (1 - label_smoothing)
+ - F.logsigmoid(-self.cfg.training.beta * logits) * label_smoothing
+ )
+ return losses
+
+
+class Losses:
+ """Losses factory."""
+
+ _losses = {
+ "DPOLoss": DPOLoss,
+ "DPOHingeLoss": DPOHingeLoss,
+ "DPOIPOLoss": DPOIPOLoss,
+ "KTOPairLoss": KTOPairLoss,
+ "CPOLoss": CPOLoss,
+ "SimPOLoss": SimPOLoss,
+ }
+
+ @classmethod
+ def names(cls) -> KeysView:
+ return cls._losses.keys()
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ """Access to Losses.
+ Args:
+ name: losses name
+ Returns:
+ A class to build the Losses
+ """
+ return cls._losses.get(name, DPOLoss)
+
+
+# see https://github.com/huggingface/trl/commit/29d439a2043edf4455b05cae5a1e2ade69d22794
+LOSS_REDUCTION = {
+ "DPOLoss": False,
+ "KTOPairLoss": False,
+ "DPOHingeLoss": True,
+ "DPOIPOLoss": True,
+ "CPOLoss": False,
+ "SimPOLoss": True,
+}
diff --git a/llm_studio/src/metrics/__init__.py b/llm_studio/src/metrics/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/src/metrics/text_causal_classification_modeling_metrics.py b/llm_studio/src/metrics/text_causal_classification_modeling_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..325bde7bec2da8e43d46293f15df7e7ef977085b
--- /dev/null
+++ b/llm_studio/src/metrics/text_causal_classification_modeling_metrics.py
@@ -0,0 +1,175 @@
+from typing import Any, Dict, List, Tuple, Union
+
+import numpy as np
+import pandas as pd
+from numpy.typing import NDArray
+from sklearn.metrics import log_loss, roc_auc_score
+
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+
+
+def accuracy_score(
+ cfg: DefaultConfigProblemBase,
+ results: Dict,
+ val_df: pd.DataFrame,
+ raw_results: bool = False,
+) -> Union[NDArray, Tuple[NDArray, List[str]]]:
+ """Calculate accuracy score.
+
+ Only considers the predicted value (results["predictions"]) and target value
+ (results["target_text"]).
+ It supports both binary and multiclass classification.
+
+ Args:
+ cfg: DefaultConfigProblemBase, ignored
+ results: Dict, model results including 'predictions' and 'target_text'
+ val_df: pd.DataFrame, validation dataframe
+ raw_results: bool, ignored
+
+ Returns:
+ Numpy array of 0.0 or 1.0 for each sample
+
+ Raises:
+ ValueError: If input data is invalid or inconsistent
+ """
+ predictions = np.array(results["predictions"])
+ target = np.array(
+ [[int(t) for t in text.split(",")] for text in results["target_text"]]
+ )
+
+ # Input validation
+ if len(target) != len(predictions):
+ raise ValueError(
+ f"Length of target ({len(target)}) and predicted ({len(predictions)}) "
+ "should be the same."
+ )
+ if len(target) == 0:
+ raise ValueError("No data to calculate accuracy score")
+
+ return (predictions == target).mean(axis=1).reshape(-1).astype("float")
+
+
+def auc_score(
+ cfg: DefaultConfigProblemBase,
+ results: Dict,
+ val_df: pd.DataFrame,
+ raw_results: bool = False,
+) -> Union[NDArray, Tuple[NDArray, List[str]]]:
+ """Calculate Area Under the ROC Curve (AUC) score.
+
+ This function computes the AUC score using the predicted logits and target values.
+ It supports both binary and multiclass classification.
+
+ Args:
+ cfg: DefaultConfigProblemBase, configuration
+ results: Dict, model results including 'logits' and 'target_text'
+ val_df: pd.DataFrame, ignored
+ raw_results: bool, ignored
+
+ Returns:
+ float: AUC score for binary classification
+ NDArray: AUC scores for multiclass classification (one-vs-rest)
+
+ Raises:
+ ValueError: If input data is invalid or inconsistent
+ """
+ logits = np.array(results["logits"])
+ target = np.array(
+ [[int(t) for t in text.split(",")] for text in results["target_text"]]
+ )
+
+ # Input validation
+ if len(target) != len(logits):
+ raise ValueError(
+ f"Length of target ({len(target)}) and logits ({len(logits)}) "
+ "should be the same."
+ )
+ if len(target) == 0:
+ raise ValueError("No data to calculate AUC score.")
+
+ if target.shape[1] == 1 and cfg.dataset.num_classes > 1:
+ target = np.eye(cfg.dataset.num_classes)[target.reshape(-1)]
+ return roc_auc_score(target, logits, multi_class="ovr")
+
+
+def logloss_score(
+ cfg: DefaultConfigProblemBase,
+ results: Dict,
+ val_df: pd.DataFrame,
+ raw_results: bool = False,
+) -> Union[NDArray, Tuple[NDArray, List[str]]]:
+ """Calculate the Log Loss (Cross-Entropy Loss) score.
+
+ This function computes the log loss using the predicted probabilities and target.
+ It supports binary, multiclass, and multilabel classification.
+
+ Args:
+ cfg: DefaultConfigProblemBase, configuration
+ results: Dict, model results including 'probabilities' and 'target_text'
+ val_df: pd.DataFrame, ignored
+ raw_results: bool, ignored
+
+ Returns:
+ float: Log Loss score
+
+ Raises:
+ ValueError: If input data is invalid or inconsistent
+ """
+ predictions = np.array(results["probabilities"])
+ target = np.array(
+ [[int(t) for t in text.split(",")] for text in results["target_text"]]
+ )
+
+ # Input validation
+ if len(target) != len(predictions):
+ raise ValueError(
+ f"Length of target ({len(target)}) and predictions ({len(predictions)}) "
+ "should be the same."
+ )
+ if len(target) == 0:
+ raise ValueError("No data to calculate log loss.")
+
+ # Handle multilabel case
+ if len(cfg.dataset.answer_column) > 1:
+ log_losses = []
+ for col in range(len(cfg.dataset.answer_column)):
+ log_losses.append(log_loss(target[:, col], predictions[:, col]))
+ return np.mean(log_losses)
+
+ # Handle binary and multiclass cases
+ if cfg.dataset.num_classes > 1:
+ target = np.eye(cfg.dataset.num_classes)[target.reshape(-1)]
+ return log_loss(target, predictions)
+
+
+class Metrics:
+ """
+ Metrics factory. Returns:
+ - metric value
+ - should it be maximized or minimized
+ - Reduce function
+
+ Maximized or minimized is needed for early stopping (saving best checkpoint)
+ Reduce function to generate a single metric value, usually "mean" or "none"
+ """
+
+ _metrics = {
+ "AUC": (auc_score, "max", "mean"),
+ "Accuracy": (accuracy_score, "max", "mean"),
+ "LogLoss": (logloss_score, "min", "mean"),
+ }
+
+ @classmethod
+ def names(cls) -> List[str]:
+ return sorted(cls._metrics.keys())
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ """Access to Metrics.
+
+ Args:
+ name: metrics name
+ Returns:
+ A class to build the Metrics
+ """
+ return cls._metrics.get(name, cls._metrics["LogLoss"])
diff --git a/llm_studio/src/metrics/text_causal_language_modeling_metrics.py b/llm_studio/src/metrics/text_causal_language_modeling_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..5659dce556289c182a30423463f0a4866de3ffce
--- /dev/null
+++ b/llm_studio/src/metrics/text_causal_language_modeling_metrics.py
@@ -0,0 +1,242 @@
+import logging
+import os
+from typing import Any, Dict, List, Tuple, Union
+
+import numpy as np
+import pandas as pd
+import torch
+from joblib import Parallel, delayed
+from numpy.typing import NDArray
+from openai import AzureOpenAI, OpenAI
+from sacrebleu import BLEU
+from torch import nn
+from tqdm import tqdm
+
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.src.datasets.text_utils import get_texts
+from llm_studio.src.utils.logging_utils import TqdmToLogger
+
+logger = logging.getLogger(__name__)
+
+
+LLM_RETRY_ATTEMPTS = int(os.getenv("LLM_RETRY_ATTEMPTS", 3))
+LLM_TIMEOUT = int(os.getenv("LLM_TIMEOUT", 60))
+
+
+def sacrebleu_score(
+ cfg: DefaultConfigProblemBase, results: Dict, val_df: pd.DataFrame
+) -> NDArray:
+ """
+ Calculate BLEU scores for predicted texts against target texts.
+
+ This function computes the BLEU score for each pair of predicted and target texts.
+ It handles empty target texts by assigning a score of 0.0.
+ BLEU scores are given in the range [0.0, 100.0].
+
+ Args:
+ cfg: DefaultConfigProblemBase, ignored
+ results: Dict, containing 'predicted_text' and 'target_text' lists
+ val_df: pd.DataFrame, ignored
+
+ Returns:
+ NDArray: An array of BLEU scores for each text pair
+
+ Note:
+ - Empty target texts are assigned a score of 0.0
+ """
+ # Input validation
+ if len(results["target_text"]) != len(results["predicted_text"]):
+ raise ValueError(
+ f"Length of target_text ({len(results['target_text'])}) and predicted_text "
+ f"({len(results['predicted_text'])}) should be the same."
+ )
+ if len(results["target_text"]) == 0:
+ raise ValueError("No data to calculate BLEU score")
+
+ scores = []
+ for predicted_text, target_text in zip(
+ results["predicted_text"], results["target_text"]
+ ):
+ if target_text == "":
+ score = 0.0
+ else:
+ score = (
+ BLEU(effective_order=True)
+ .sentence_score(predicted_text, [target_text])
+ .score
+ )
+ scores.append(score)
+ return np.array(scores)
+
+
+def get_openai_client() -> AzureOpenAI | OpenAI:
+ if os.getenv("OPENAI_API_TYPE", "open_ai") == "azure":
+ endpoint = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
+ client: AzureOpenAI | OpenAI = AzureOpenAI(
+ api_key=os.getenv("OPENAI_API_KEY", ""),
+ azure_deployment=os.getenv("OPENAI_API_DEPLOYMENT_ID"),
+ # https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning
+ api_version=os.getenv("OPENAI_API_VERSION", "2023-05-15"),
+ # https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource
+ azure_endpoint=endpoint,
+ max_retries=LLM_RETRY_ATTEMPTS,
+ timeout=LLM_TIMEOUT, # unit is seconds
+ )
+ logger.info("Using Microsoft Azure Endpoint for OpenAI API")
+ logger.info(f"Endpoint: {endpoint}")
+ else:
+ client = OpenAI(
+ api_key=os.getenv("OPENAI_API_KEY", ""),
+ base_url=os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"),
+ max_retries=LLM_RETRY_ATTEMPTS,
+ timeout=LLM_TIMEOUT, # unit is seconds
+ )
+ return client
+
+
+def call_openai_api(template: str, model: str):
+ client = get_openai_client()
+ response = client.chat.completions.create(
+ model=model,
+ messages=[
+ {
+ "role": "system",
+ "content": "You are a helpful and precise assistant "
+ "for checking the quality of the answer.",
+ },
+ {
+ "role": "user",
+ "content": template,
+ },
+ ],
+ temperature=0.0,
+ max_tokens=1024,
+ )
+ ret = response.choices[0].message.content
+ try:
+ score = float(ret.split("SCORE:")[-1].split()[0].split("/")[0])
+ except ValueError:
+ raise ValueError(f"Could not parse score from response: {ret}")
+ return score, ret
+
+
+def rate_reply(filled_eval_template: str, model: str):
+ try:
+ return call_openai_api(filled_eval_template, model)
+ except Exception as e:
+ logger.warning(f"Exception caught in api call: {e}")
+ return 0.0, ""
+
+
+def gpt_score(
+ cfg: DefaultConfigProblemBase,
+ results: Dict,
+ val_df: pd.DataFrame,
+ raw_results: bool = False,
+) -> Union[NDArray, Tuple[NDArray, List[str]]]:
+ vdf = val_df.copy()
+ vdf["_PROMPT"] = get_texts(val_df, cfg)
+ vdf["_PREDICTED_TEXT"] = results["predicted_text"]
+ vdf["_TARGET_TEXT"] = results["target_text"]
+
+ model = cfg.prediction.metric_gpt_model
+ template_name = cfg.prediction.metric_gpt_template
+
+ if template_name == "mt-bench":
+ eval_template = open("prompts/mt-bench/general.txt", "r").read()
+ else:
+ eval_template = open(f"prompts/{template_name}.txt", "r").read()
+ vdf["filled_eval_template"] = eval_template
+ if template_name == "mt-bench":
+ eval_template = open("prompts/mt-bench/reference.txt", "r").read()
+ vdf.loc[
+ vdf.category.isin(["math", "reasoning", "coding"]), "filled_eval_template"
+ ] = eval_template
+
+ vdf["filled_eval_template"] = vdf.apply(
+ lambda row: row["filled_eval_template"].format(**row), axis=1
+ )
+
+ ret = Parallel(n_jobs=8, backend="multiprocessing")(
+ delayed(rate_reply)(
+ filled_eval_template,
+ model,
+ )
+ for filled_eval_template in tqdm(
+ vdf["filled_eval_template"].values,
+ file=TqdmToLogger(logger, level=logging.INFO),
+ desc=f"GPT eval {model} - {template_name}",
+ total=len(vdf),
+ )
+ )
+ scores = [x[0] for x in ret]
+ explanations = [x[1] for x in ret]
+
+ if template_name == "mt-bench":
+ vdf["score"] = scores
+ score_by_category = vdf.groupby("category").agg({"score": "mean"}).reset_index()
+ logger.info(
+ "MT-Bench scores by category:\n" + score_by_category.to_string(index=False)
+ )
+
+ if raw_results:
+ return np.array(scores), explanations
+ return np.mean(scores)
+
+
+class Perplexity(nn.Module):
+ def __init__(self, cfg: DefaultConfigProblemBase, reduce: bool = True):
+ super().__init__()
+ self.cfg = cfg
+ self.loss_fn = nn.CrossEntropyLoss()
+ self.reduce = reduce
+
+ def forward(self, logits, labels):
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+
+ perplexity = []
+ for i in range(labels.shape[0]):
+ perplexity.append(self.loss_fn(shift_logits[i], shift_labels[i]))
+ perplexity = torch.stack(perplexity, dim=0)
+ perplexity = torch.exp(perplexity)
+ if self.reduce:
+ perplexity = torch.mean(perplexity)
+ return perplexity
+
+
+def perplexity(cfg: DefaultConfigProblemBase, results: Dict, val_df: pd.DataFrame):
+ return results["perplexity"].detach().float().cpu().numpy()
+
+
+class Metrics:
+ """
+ Metrics factory. Returns:
+ - metric value
+ - should it be maximized or minimized
+ - Reduce function
+
+ Maximized or minimized is needed for early stopping (saving best checkpoint)
+ Reduce function to generate a single metric value, usually "mean" or "none"
+ """
+
+ _metrics = {
+ "Perplexity": (perplexity, "min", "mean"),
+ "BLEU": (sacrebleu_score, "max", "mean"),
+ "GPT": (gpt_score, "max", "mean"),
+ }
+
+ @classmethod
+ def names(cls) -> List[str]:
+ return sorted(cls._metrics.keys())
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ """Access to Metrics.
+
+ Args:
+ name: metrics name
+ Returns:
+ A class to build the Metrics
+ """
+ return cls._metrics.get(name, cls._metrics["BLEU"])
diff --git a/llm_studio/src/metrics/text_causal_regression_modeling_metrics.py b/llm_studio/src/metrics/text_causal_regression_modeling_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab168dd989296056dc40c811f80320d3ec881c8e
--- /dev/null
+++ b/llm_studio/src/metrics/text_causal_regression_modeling_metrics.py
@@ -0,0 +1,84 @@
+import logging
+from typing import Any, Dict, List, Tuple, Union
+
+import numpy as np
+import pandas as pd
+from numpy.typing import NDArray
+
+logger = logging.getLogger(__name__)
+
+
+def mse_score(
+ cfg: Any,
+ results: Dict,
+ val_df: pd.DataFrame,
+ raw_results: bool = False,
+) -> Union[NDArray, Tuple[NDArray, List[str]]]:
+ target = np.array(
+ [[float(t) for t in text.split(",")] for text in results["target_text"]]
+ )
+ predictions = np.array(results["predictions"])
+
+ if len(target) != len(predictions):
+ raise ValueError(
+ f"Length of target ({len(target)}) and predictions ({len(predictions)}) "
+ "should be the same."
+ )
+ if len(target) == 0:
+ raise ValueError("No data to calculate MSE score")
+
+ return ((target - predictions) ** 2).mean(axis=1).reshape(-1).astype("float")
+
+
+def mae_score(
+ cfg: Any,
+ results: Dict,
+ val_df: pd.DataFrame,
+ raw_results: bool = False,
+) -> Union[NDArray, Tuple[NDArray, List[str]]]:
+ target = np.array(
+ [[float(t) for t in text.split(",")] for text in results["target_text"]]
+ )
+ predictions = np.array(results["predictions"])
+
+ if len(target) != len(predictions):
+ raise ValueError(
+ f"Length of target ({len(target)}) and predictions ({len(predictions)}) "
+ "should be the same."
+ )
+ if len(target) == 0:
+ raise ValueError("No data to calculate MAE score")
+
+ return np.abs(target - predictions).mean(axis=1).reshape(-1).astype("float")
+
+
+class Metrics:
+ """
+ Metrics factory. Returns:
+ - metric value
+ - should it be maximized or minimized
+ - Reduce function
+
+ Maximized or minimized is needed for early stopping (saving best checkpoint)
+ Reduce function to generate a single metric value, usually "mean" or "none"
+ """
+
+ _metrics = {
+ "MSE": (mse_score, "min", "mean"),
+ "MAE": (mae_score, "min", "mean"),
+ }
+
+ @classmethod
+ def names(cls) -> List[str]:
+ return sorted(cls._metrics.keys())
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ """Access to Metrics.
+
+ Args:
+ name: metrics name
+ Returns:
+ A class to build the Metrics
+ """
+ return cls._metrics.get(name, cls._metrics["MSE"])
diff --git a/llm_studio/src/models/__init__.py b/llm_studio/src/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/src/models/text_causal_classification_modeling_model.py b/llm_studio/src/models/text_causal_classification_modeling_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b66031183e4d135f321ab605436665b0320577f
--- /dev/null
+++ b/llm_studio/src/models/text_causal_classification_modeling_model.py
@@ -0,0 +1,92 @@
+import logging
+from typing import Any, Dict
+
+from torch import nn
+from transformers import AutoModelForCausalLM
+
+from llm_studio.src.utils.data_utils import batch_padding
+from llm_studio.src.utils.modeling_utils import (
+ create_nlp_backbone,
+ forward,
+ prepare_lora,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Model(nn.Module):
+ """
+ Model for causal language modeling problem type.
+ """
+
+ def __init__(self, cfg: Any):
+ """
+ Args:
+ cfg: config with all the hyperparameters
+ """
+
+ super(Model, self).__init__()
+
+ self.cfg = cfg
+ self.backbone, self.backbone_config = create_nlp_backbone(
+ cfg, model_class=AutoModelForCausalLM
+ )
+
+ if cfg.training.lora:
+ self.backbone = prepare_lora(cfg, self.backbone)
+
+ self.classification_head = nn.Linear(
+ self.backbone_config.vocab_size, cfg.dataset.num_classes, bias=False
+ )
+
+ self.loss_fn = self.cfg.training.loss_class.get(
+ self.cfg.training.loss_function
+ )(self.cfg)
+
+ def forward(
+ self,
+ batch: Dict,
+ padding: bool = True,
+ ) -> Dict:
+ # disable cache if gradient checkpointing is enabled
+ if self.cfg.architecture.gradient_checkpointing:
+ self.backbone.config.use_cache = False
+
+ outputs: Dict = {}
+ mask_key = "prompt_attention_mask"
+ pad_keys = [
+ "prompt_input_ids",
+ "prompt_attention_mask",
+ "special_tokens_mask",
+ "labels",
+ ]
+
+ if padding:
+ batch = batch_padding(
+ self.cfg,
+ batch,
+ self.training,
+ mask_key=mask_key,
+ pad_keys=pad_keys,
+ padding_side=self.cfg.tokenizer._padding_side,
+ )
+
+ output = forward(
+ self.backbone,
+ input_ids=batch["prompt_input_ids"],
+ attention_mask=batch["prompt_attention_mask"],
+ )
+
+ output.logits = self.classification_head(output[0][:, -1].float())
+
+ if "labels" in batch:
+ loss = self.loss_fn(output.logits, batch["class_label"].float())
+ outputs["loss"] = loss
+
+ outputs["logits"] = output.logits
+
+ # enable cache again if gradient checkpointing is enabled
+ if self.cfg.architecture.gradient_checkpointing:
+ self.backbone.config.use_cache = True
+
+ return outputs
diff --git a/llm_studio/src/models/text_causal_language_modeling_model.py b/llm_studio/src/models/text_causal_language_modeling_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..22e5f034a727d96f47c8919415d1d3fe3feacd99
--- /dev/null
+++ b/llm_studio/src/models/text_causal_language_modeling_model.py
@@ -0,0 +1,109 @@
+import logging
+from typing import Any, Dict
+
+from torch import nn
+from transformers import AutoModelForCausalLM
+
+from llm_studio.src.metrics.text_causal_language_modeling_metrics import Perplexity
+from llm_studio.src.utils.data_utils import batch_padding
+from llm_studio.src.utils.modeling_utils import (
+ create_nlp_backbone,
+ forward,
+ generate,
+ prepare_lora,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Model(nn.Module):
+ """
+ Model for causal language modeling problem type.
+ """
+
+ def __init__(self, cfg: Any):
+ """
+ Args:
+ cfg: config with all the hyperparameters
+ """
+
+ super(Model, self).__init__()
+
+ self.cfg = cfg
+ self.backbone, self.backbone_config = create_nlp_backbone(
+ cfg, model_class=AutoModelForCausalLM
+ )
+
+ if cfg.training.lora:
+ self.backbone = prepare_lora(cfg, self.backbone)
+
+ self.loss_fn = self.cfg.training.loss_class.get(
+ self.cfg.training.loss_function
+ )(self.cfg)
+
+ if self.cfg.prediction.metric == "Perplexity":
+ self.perplexity = Perplexity(self.cfg, reduce=False)
+
+ def init_deepspeed(self):
+ self.backward = self.backbone.backward
+ self.save_checkpoint = self.backbone.save_checkpoint
+ self.save_16bit_model = self.backbone.save_16bit_model
+ if self.cfg.training.lora:
+ self.backbone.base_model.model.config = (
+ self.backbone.base_model.model.module.config
+ )
+ self.backbone.base_model.model.generation_config = (
+ self.backbone.base_model.model.module.generation_config
+ )
+ else:
+ self.backbone.config = self.backbone.module.config
+ self.backbone.generation_config = self.backbone.module.generation_config
+
+ def generate(self, batch: Dict, cfg: Any, streamer=None):
+ if cfg.environment.use_deepspeed and cfg.training.lora:
+ return generate(self.backbone.base_model.model, batch, cfg, streamer)
+ else:
+ return generate(self.backbone, batch, cfg, streamer)
+
+ def forward(
+ self,
+ batch: Dict,
+ padding: bool = True,
+ ) -> Dict:
+ # disable cache if gradient checkpointing is enabled
+ if self.cfg.architecture.gradient_checkpointing:
+ self.backbone.config.use_cache = False
+
+ outputs: Dict = {}
+ mask_key = "attention_mask"
+ pad_keys = [
+ "input_ids",
+ "attention_mask",
+ "special_tokens_mask",
+ "labels",
+ ]
+
+ if padding:
+ batch = batch_padding(
+ self.cfg,
+ batch,
+ self.training,
+ mask_key=mask_key,
+ pad_keys=pad_keys,
+ padding_side=self.cfg.tokenizer._padding_side,
+ )
+
+ output = forward(self.backbone, batch["input_ids"], batch["attention_mask"])
+
+ if "labels" in batch:
+ loss = self.loss_fn(output.logits, batch["labels"])
+ outputs["loss"] = loss
+
+ if not self.training and self.cfg.prediction.metric == "Perplexity":
+ outputs["perplexity"] = self.perplexity(output.logits, batch["labels"])
+
+ # enable cache again if gradient checkpointing is enabled
+ if self.cfg.architecture.gradient_checkpointing:
+ self.backbone.config.use_cache = True
+
+ return outputs
diff --git a/llm_studio/src/models/text_causal_regression_modeling_model.py b/llm_studio/src/models/text_causal_regression_modeling_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed8f7504ea93a88f5a818135d007bcb030bf7718
--- /dev/null
+++ b/llm_studio/src/models/text_causal_regression_modeling_model.py
@@ -0,0 +1,92 @@
+import logging
+from typing import Any, Dict
+
+from torch import nn
+from transformers import AutoModelForCausalLM
+
+from llm_studio.src.utils.data_utils import batch_padding
+from llm_studio.src.utils.modeling_utils import (
+ create_nlp_backbone,
+ forward,
+ prepare_lora,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Model(nn.Module):
+ """
+ Model for causal language modeling problem type.
+ """
+
+ def __init__(self, cfg: Any):
+ """
+ Args:
+ cfg: config with all the hyperparameters
+ """
+
+ super(Model, self).__init__()
+
+ self.cfg = cfg
+ self.backbone, self.backbone_config = create_nlp_backbone(
+ cfg, model_class=AutoModelForCausalLM
+ )
+
+ if cfg.training.lora:
+ self.backbone = prepare_lora(cfg, self.backbone)
+
+ self.regression_head = nn.Linear(
+ self.backbone_config.vocab_size, len(cfg.dataset.answer_column), bias=False
+ )
+
+ self.loss_fn = self.cfg.training.loss_class.get(
+ self.cfg.training.loss_function
+ )(self.cfg)
+
+ def forward(
+ self,
+ batch: Dict,
+ padding: bool = True,
+ ) -> Dict:
+ # disable cache if gradient checkpointing is enabled
+ if self.cfg.architecture.gradient_checkpointing:
+ self.backbone.config.use_cache = False
+
+ outputs: Dict = {}
+ mask_key = "prompt_attention_mask"
+ pad_keys = [
+ "prompt_input_ids",
+ "prompt_attention_mask",
+ "special_tokens_mask",
+ "labels",
+ ]
+
+ if padding:
+ batch = batch_padding(
+ self.cfg,
+ batch,
+ self.training,
+ mask_key=mask_key,
+ pad_keys=pad_keys,
+ padding_side=self.cfg.tokenizer._padding_side,
+ )
+
+ output = forward(
+ self.backbone,
+ input_ids=batch["prompt_input_ids"],
+ attention_mask=batch["prompt_attention_mask"],
+ )
+
+ output.logits = self.regression_head(output[0][:, -1].float())
+
+ if "labels" in batch:
+ loss = self.loss_fn(output.logits, batch["class_label"].float())
+ outputs["loss"] = loss
+
+ outputs["predictions"] = output.logits
+
+ # enable cache again if gradient checkpointing is enabled
+ if self.cfg.architecture.gradient_checkpointing:
+ self.backbone.config.use_cache = True
+
+ return outputs
diff --git a/llm_studio/src/models/text_dpo_modeling_model.py b/llm_studio/src/models/text_dpo_modeling_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..983bb8f4cdd1f99d3e631c2380fae8f0245afd96
--- /dev/null
+++ b/llm_studio/src/models/text_dpo_modeling_model.py
@@ -0,0 +1,232 @@
+import logging
+from typing import Any, Dict
+
+import torch
+from torch import nn
+from transformers import AutoModelForCausalLM
+
+from llm_studio.src.losses.text_causal_language_modeling_losses import (
+ SampleAveragedCrossEntropyLoss,
+)
+from llm_studio.src.losses.text_dpo_modeling_losses import LOSS_REDUCTION
+from llm_studio.src.metrics.text_causal_language_modeling_metrics import Perplexity
+from llm_studio.src.utils.data_utils import batch_padding
+from llm_studio.src.utils.modeling_utils import (
+ create_nlp_backbone,
+ forward,
+ generate,
+ prepare_lora,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def get_batch_logps(
+ logits: torch.FloatTensor,
+ labels: torch.LongTensor,
+ average_log_prob: bool = False,
+) -> torch.Tensor:
+ """
+ Based upon the official implementation of DPO:
+ https://github.com/eric-mitchell/direct-preference-optimization
+
+ Compute the log probabilities of the given labels under the given logits.
+ Args:
+ logits:
+ Logits of the model (unnormalized).
+ Shape: (batch_size, sequence_length, vocab_size)
+ labels:
+ Labels for which to compute the log probabilities.
+ Label tokens with a value of -100 are ignored.
+ Shape: (batch_size, sequence_length)
+ average_log_prob:
+ If True, return the average log probability per (non-masked) token.
+ Otherwise, return the sum of the
+ log probabilities of the (non-masked) tokens.
+ Returns:
+ A tensor of shape (batch_size,) containing the average/sum
+ log probabilities of the given labels under the given logits.
+ """
+ assert logits.shape[:-1] == labels.shape
+
+ # shift labels and logits to account for next token prediction
+ # See also text_causal_language_modeling_losses.py
+ labels = labels[:, 1:].clone()
+ logits = logits[:, :-1, :]
+ loss_mask = labels != -100
+
+ # dummy token; we'll ignore the losses on these tokens when loss_mask is applied
+ # Needed to be able to apply torch.gather with index=labels.unsqueeze(2)
+ labels[labels == -100] = 0
+
+ per_token_logps = torch.gather(
+ logits.log_softmax(-1), dim=2, index=labels.unsqueeze(2)
+ ).squeeze(2)
+
+ if average_log_prob:
+ return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
+ else:
+ return (per_token_logps * loss_mask).sum(-1)
+
+
+class Model(nn.Module):
+ """
+ Model for DPO language modeling problem type.
+ """
+
+ def __init__(self, cfg: Any):
+ super().__init__()
+
+ self.cfg = cfg
+ self.backbone, self.backbone_config = create_nlp_backbone(
+ cfg, model_class=AutoModelForCausalLM
+ )
+
+ if cfg.training.lora:
+ self.backbone = prepare_lora(cfg=cfg, backbone=self.backbone)
+
+ self.loss_fn = self.cfg.training.loss_class.get(
+ self.cfg.training.loss_function
+ )(self.cfg)
+
+ if self.loss_fn.requires_reference_model:
+ if cfg.training.lora and not cfg.training.lora_unfreeze_layers:
+ self.backbone_reference = None
+ else:
+ logger.info("Duplicating backbone for reference model.")
+ self.backbone_reference, _ = create_nlp_backbone(
+ cfg, model_class=AutoModelForCausalLM
+ )
+ for _, param in self.backbone_reference.named_parameters():
+ # freeze base model's layers
+ param.requires_grad = False
+ self.backbone_reference = self.backbone_reference.eval()
+
+ if self.cfg.prediction.metric == "Perplexity":
+ self.perplexity = Perplexity(self.cfg, reduce=False)
+
+ def generate(self, batch: Dict, cfg: Any, streamer=None):
+ return generate(self.backbone, batch, cfg, streamer)
+
+ def forward(
+ self,
+ batch: Dict,
+ padding: bool = True,
+ ) -> Dict:
+ """
+ Forward pass of DPO model.
+ Runtime is 4 times slower than causal language modeling model
+ as we need to compute
+ - logits for chosen answer
+ - logits for rejected answer
+ - logits for chosen answer with reference model
+ - logits for rejected answer with reference model
+ """
+ # disable cache if gradient checkpointing is enabled
+ if self.cfg.architecture.gradient_checkpointing:
+ self.backbone.config.use_cache = False
+
+ outputs: Dict = {}
+
+ logits_dict = {}
+ labels_dict = {}
+
+ for answer in ["chosen", "rejected"]:
+ if padding:
+ batch = batch_padding(
+ self.cfg,
+ batch,
+ self.training,
+ mask_key=f"{answer}_attention_mask",
+ pad_keys=[
+ f"{answer}_input_ids",
+ f"{answer}_attention_mask",
+ f"{answer}_labels",
+ ],
+ )
+ logits = forward(
+ self.backbone,
+ input_ids=batch[f"{answer}_input_ids"],
+ attention_mask=batch[f"{answer}_attention_mask"],
+ ).logits
+
+ logits_dict[answer] = logits
+ labels_dict[answer] = batch[f"{answer}_labels"]
+
+ outputs[f"{answer}_logps"] = get_batch_logps(
+ logits,
+ batch[f"{answer}_labels"],
+ average_log_prob=LOSS_REDUCTION[self.cfg.training.loss_function],
+ )
+
+ if self.loss_fn.requires_reference_model:
+ with torch.no_grad():
+ if self.backbone_reference:
+ reference_logits = forward(
+ self.backbone_reference,
+ input_ids=batch[f"{answer}_input_ids"],
+ attention_mask=batch[f"{answer}_attention_mask"],
+ ).logits
+ else:
+ with self.backbone.disable_adapter():
+ reference_logits = forward(
+ self.backbone,
+ input_ids=batch[f"{answer}_input_ids"],
+ attention_mask=batch[f"{answer}_attention_mask"],
+ ).logits
+
+ outputs[f"{answer}_reference_logps"] = get_batch_logps(
+ reference_logits,
+ batch[f"{answer}_labels"],
+ average_log_prob=LOSS_REDUCTION[
+ self.cfg.training.loss_function
+ ],
+ )
+
+ if self.loss_fn.requires_reference_model:
+ loss, chosen_rewards, rejected_rewards = self.loss_fn(
+ policy_chosen_logps=outputs["chosen_logps"],
+ policy_rejected_logps=outputs["rejected_logps"],
+ reference_chosen_logps=outputs["chosen_reference_logps"],
+ reference_rejected_logps=outputs["rejected_reference_logps"],
+ )
+ else:
+ loss, chosen_rewards, rejected_rewards = self.loss_fn(
+ policy_chosen_logps=outputs["chosen_logps"],
+ policy_rejected_logps=outputs["rejected_logps"],
+ )
+ outputs["loss"] = loss
+
+ # These values will be logged to external logger if enabled, see train.py
+ outputs["additional_log_chosen_rewards"] = chosen_rewards.detach()
+ outputs["additional_log_rejected_rewards"] = rejected_rewards.detach()
+ # Reward margin should increase over time
+ outputs["additional_log_reward_margin"] = (
+ chosen_rewards - rejected_rewards
+ ).detach()
+
+ # log sample average cross entropy, perplexity metric is also sample averaged
+ outputs["additional_log_chosen_cross_entropy_loss"] = (
+ SampleAveragedCrossEntropyLoss(self.cfg)(
+ logits_dict["chosen"], labels_dict["chosen"]
+ ).detach()
+ )
+ outputs["additional_log_rejected_cross_entropy_loss"] = (
+ SampleAveragedCrossEntropyLoss(self.cfg)(
+ logits_dict["rejected"], labels_dict["rejected"]
+ ).detach()
+ )
+
+ if not self.training and self.cfg.prediction.metric == "Perplexity":
+ outputs["perplexity"] = self.perplexity(
+ logits_dict["chosen"], labels_dict["chosen"]
+ )
+ outputs["additional_log_rejected_perplexity"] = self.perplexity(
+ logits_dict["rejected"], labels_dict["rejected"]
+ )
+
+ # enable cache again if gradient checkpointing is enabled
+ if self.cfg.architecture.gradient_checkpointing:
+ self.backbone.config.use_cache = True
+
+ return outputs
diff --git a/llm_studio/src/models/text_sequence_to_sequence_modeling_model.py b/llm_studio/src/models/text_sequence_to_sequence_modeling_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..bef31c0bf6ad7941a286264ecb53b0a97e4549cf
--- /dev/null
+++ b/llm_studio/src/models/text_sequence_to_sequence_modeling_model.py
@@ -0,0 +1,117 @@
+import logging
+from typing import Any, Dict
+
+import torch.nn as nn
+from transformers import AutoModelForSeq2SeqLM
+
+from llm_studio.src.metrics.text_causal_language_modeling_metrics import Perplexity
+from llm_studio.src.utils.data_utils import batch_padding
+from llm_studio.src.utils.modeling_utils import (
+ create_nlp_backbone,
+ generate,
+ prepare_lora,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Model(nn.Module):
+ """
+ Model for causal language modeling problem type.
+ """
+
+ def __init__(self, cfg: Any):
+ """
+ Args:
+ cfg: config with all the hyperparameters
+ """
+
+ super(Model, self).__init__()
+
+ self.cfg = cfg
+ self.backbone, self.backbone_config = create_nlp_backbone(
+ cfg, model_class=AutoModelForSeq2SeqLM
+ )
+
+ if cfg.training.lora:
+ self.backbone = prepare_lora(cfg, self.backbone)
+
+ self.loss_fn = self.cfg.training.loss_class.get(
+ self.cfg.training.loss_function
+ )(self.cfg)
+
+ if self.cfg.prediction.metric == "Perplexity":
+ self.perplexity = Perplexity(self.cfg, reduce=False)
+
+ def generate(self, batch: Dict, cfg: Any, streamer=None):
+ return generate(
+ backbone=self.backbone,
+ batch=batch,
+ cfg=cfg,
+ streamer=streamer,
+ remove_prompt=False,
+ )
+
+ def forward(
+ self,
+ batch: Dict,
+ padding: bool = True,
+ ) -> Dict:
+ # disable cache if gradient checkpointing is enabled
+ if self.cfg.architecture.gradient_checkpointing:
+ self.backbone.config.use_cache = False
+
+ outputs: Dict = {}
+ kwargs: Dict = {}
+
+ if padding:
+ mask_key = "prompt_attention_mask"
+ pad_keys = [
+ "prompt_input_ids",
+ "prompt_attention_mask",
+ ]
+
+ batch = batch_padding(
+ self.cfg,
+ batch,
+ self.training,
+ mask_key=mask_key,
+ pad_keys=pad_keys,
+ padding_side=self.cfg.tokenizer._padding_side,
+ )
+
+ mask_key = "answer_attention_mask"
+ pad_keys = [
+ "answer_input_ids",
+ "answer_attention_mask",
+ ]
+
+ batch = batch_padding(
+ self.cfg,
+ batch,
+ self.training,
+ mask_key=mask_key,
+ pad_keys=pad_keys,
+ padding_side="right",
+ )
+
+ labels = batch["answer_input_ids"]
+ labels[batch["answer_attention_mask"] == 0] = -100
+
+ output = self.backbone(
+ input_ids=batch["prompt_input_ids"],
+ attention_mask=batch["prompt_attention_mask"],
+ labels=labels,
+ **kwargs,
+ )
+
+ outputs["loss"] = output.loss
+
+ if not self.training and self.cfg.prediction.metric == "Perplexity":
+ outputs["perplexity"] = self.perplexity(output.logits, labels)
+
+ # enable cache again if gradient checkpointing is enabled
+ if self.cfg.architecture.gradient_checkpointing:
+ self.backbone.config.use_cache = True
+
+ return outputs
diff --git a/llm_studio/src/nesting.py b/llm_studio/src/nesting.py
new file mode 100644
index 0000000000000000000000000000000000000000..c72d408c4642231895d01234b39a994571546b05
--- /dev/null
+++ b/llm_studio/src/nesting.py
@@ -0,0 +1,85 @@
+from collections import defaultdict
+from typing import DefaultDict, List, Set, Union
+
+from pydantic.dataclasses import dataclass
+
+
+@dataclass
+class Dependency:
+ """
+ Represents a dependency with a key, value, and set condition.
+
+ Attributes:
+ key (str): The key of the dependency (parent).
+ value (Union[str, bool, int, None]): The value of the dependency to look for. \
+ None for empty condition (dependency only needs to exist).
+ is_set (bool): Whether the value of the dependency should be set (True) or not \
+ set (False).
+ """
+
+ key: str
+ value: Union[str, bool, int, None]
+ is_set: bool
+
+ def check(self, dependency_values: List[str]) -> bool:
+ """
+ Check if dependency is satisfied
+
+ Args:
+ dependency_values (List[str]): List of dependency values to check against.
+
+ Returns:
+ bool: True if the dependency is satisfied, False otherwise.
+ """
+
+ if self.value is None and self.is_set and len(dependency_values):
+ return False
+ elif self.value is None and not self.is_set and not len(dependency_values):
+ return False
+ elif self.is_set and self.value not in dependency_values:
+ return False
+ elif (
+ not self.is_set
+ and len([v for v in dependency_values if v != self.value]) == 0
+ ):
+ return False
+ return True
+
+
+class Nesting:
+ """
+ A tree-like structure to specify nested dependencies of type `Dependency`.
+
+ This class maps dependencies of keys requiring any number dependencies of \
+ type `Dependency`. It is primarily useful for specifying nested dependencies \
+ of UI elements shown in Wave.
+
+ Attributes:
+ dependencies (DefaultDict[str, List[Dependency]]): A dictionary mapping keys \
+ to their dependencies of type `Dependency`.
+ triggers (Set[str]): A set of all dependency keys that can trigger changes.
+ """
+
+ def __init__(self) -> None:
+ self.dependencies: DefaultDict[str, List[Dependency]] = defaultdict(list)
+ self.triggers: Set[str] = set()
+
+ def add(self, keys: List[str], dependencies: List[Dependency]) -> None:
+ """
+ Append dependencies of type `Dependency` for given keys.
+
+ Args:
+ keys (List[str]): Keys to add dependencies for.
+ dependencies (List[Dependency]): The Dependencys to depend on.
+
+ Raises:
+ ValueError: If the input keys are not unique.
+ """
+
+ if len(set(keys)) != len(keys):
+ raise ValueError("Nesting keys must be unique.")
+
+ for dependency in dependencies:
+ self.triggers.add(dependency.key)
+ for key in set(keys):
+ self.dependencies[key].append(dependency)
diff --git a/llm_studio/src/optimizers.py b/llm_studio/src/optimizers.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6a265fba2617f84f2dd16341022438c902a8f82
--- /dev/null
+++ b/llm_studio/src/optimizers.py
@@ -0,0 +1,33 @@
+from functools import partial
+from typing import Any, List
+
+import bitsandbytes as bnb
+from torch import optim
+
+
+class Optimizers:
+ """Optimizers factory."""
+
+ _optimizers = {
+ "Adam": optim.Adam,
+ "AdamW": optim.AdamW,
+ "SGD": partial(optim.SGD, momentum=0.9, nesterov=True),
+ "RMSprop": partial(optim.RMSprop, momentum=0.9, alpha=0.9),
+ "Adadelta": optim.Adadelta,
+ "AdamW8bit": bnb.optim.Adam8bit,
+ }
+
+ @classmethod
+ def names(cls) -> List[str]:
+ return sorted(cls._optimizers.keys())
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ """Access to Optimizers.
+
+ Args:
+ name: optimizer name
+ Returns:
+ A class to build the Optimizer
+ """
+ return cls._optimizers.get(name)
diff --git a/llm_studio/src/order.py b/llm_studio/src/order.py
new file mode 100644
index 0000000000000000000000000000000000000000..b56281b0baaa2039552a77aea106517f9bdc01c9
--- /dev/null
+++ b/llm_studio/src/order.py
@@ -0,0 +1,99 @@
+from typing import Iterable, List, Optional
+
+
+class Order:
+ """
+ A list-like structure to specify the order of items in a dictionary.
+ The main characteristics are:
+ - Append and insert only. Cannot remove elements. This is not strictly required
+ by the use-case but probably good practice.
+ - Elements must be unique. Inserting an element which is already in the list
+ will throw an error.
+
+ Primarily useful for specifying the order in which UI elements
+ should be shown in Wave.
+ """
+
+ def __init__(self, keys: Optional[List[str]] = None):
+ if keys is not None:
+ self._list = list(keys)
+ else:
+ self._list = list()
+
+ def _unique_guard(self, *keys: str) -> None:
+ for key in keys:
+ if key in self._list:
+ raise ValueError(f"`{key}` is already in the list!")
+
+ def append(self, key: str) -> None:
+ """
+ Append a key at the end of the list:
+
+ Args:
+ key: String to append.
+
+ Raises:
+ - `ValueError` if the key is already in the list.
+ """
+
+ self._unique_guard(key)
+
+ self._list.append(key)
+
+ def extend(self, keys: Iterable[str]) -> None:
+ """
+ Extend the list by multiple keys.
+
+ Args:
+ keys: Iterable of keys.
+
+ Raises:
+ - `ValueError` if one or more key is already in the list.
+ """
+
+ self._unique_guard(*keys)
+
+ self._list.extend(keys)
+
+ def insert(
+ self, *keys: str, before: Optional[str] = None, after: Optional[str] = None
+ ) -> None:
+ """
+ Insert one or more keys. Either `before` or `after`, but not both, must be set
+ to determine position.
+
+ Args:
+ keys: One more keys to insert.
+ after: A key immediately after which the `keys` will be inserted.
+ before: A key immediately before which the `keys` are inserted.
+
+ Raises:
+ - `ValueError` if one or more key is already in the list.
+ - `ValueError` if `before` / `after` does not exist in the list.
+ - `ValueError` if an invalid combination of arguments is set.
+ """
+
+ self._unique_guard(*keys)
+
+ if before is None and after is None:
+ raise ValueError("Either `before` or `after` must be set.")
+
+ if before and after:
+ raise ValueError("Can't set `before` and `after` at the same time.")
+
+ if before is not None:
+ for key in keys:
+ self._list.insert(self._list.index(before), key)
+
+ if after is not None:
+ for key in keys:
+ self._list.insert(self._list.index(after) + 1, key)
+
+ def __getitem__(self, idx):
+ return self._list[idx]
+
+ def __len__(self) -> int:
+ return len(self._list)
+
+ def __iter__(self):
+ return iter(self._list)
diff --git a/llm_studio/src/plots/__init__.py b/llm_studio/src/plots/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/src/plots/text_causal_classification_modeling_plots.py b/llm_studio/src/plots/text_causal_classification_modeling_plots.py
new file mode 100644
index 0000000000000000000000000000000000000000..1997989a8a305e73f87a92b4793d06e7b25d0f2c
--- /dev/null
+++ b/llm_studio/src/plots/text_causal_classification_modeling_plots.py
@@ -0,0 +1,22 @@
+import os
+
+from llm_studio.src.datasets.text_utils import get_tokenizer
+from llm_studio.src.plots.text_causal_language_modeling_plots import (
+ Plots as TextCausalLanguageModelingPlots,
+)
+from llm_studio.src.plots.text_causal_language_modeling_plots import (
+ create_batch_prediction_df,
+)
+from llm_studio.src.utils.plot_utils import PlotData
+
+
+class Plots(TextCausalLanguageModelingPlots):
+ @classmethod
+ def plot_batch(cls, batch, cfg) -> PlotData:
+ tokenizer = get_tokenizer(cfg)
+ df = create_batch_prediction_df(
+ batch, tokenizer, ids_for_tokenized_text="prompt_input_ids"
+ )
+ path = os.path.join(cfg.output_directory, "batch_viz.parquet")
+ df.to_parquet(path)
+ return PlotData(path, encoding="df")
diff --git a/llm_studio/src/plots/text_causal_language_modeling_plots.py b/llm_studio/src/plots/text_causal_language_modeling_plots.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2aeda47b4105ce959294fdc0a5ff384b900ecad
--- /dev/null
+++ b/llm_studio/src/plots/text_causal_language_modeling_plots.py
@@ -0,0 +1,250 @@
+import hashlib
+import os
+from typing import Any, Dict
+
+import pandas as pd
+
+from llm_studio.src.datasets.conversation_chain_handler import get_conversation_chains
+from llm_studio.src.datasets.text_utils import get_tokenizer
+from llm_studio.src.utils.data_utils import read_dataframe_drop_missing_labels
+from llm_studio.src.utils.plot_utils import (
+ PlotData,
+ format_for_markdown_visualization,
+ list_to_markdown_representation,
+)
+
+
+class Plots:
+ @classmethod
+ def plot_batch(cls, batch, cfg) -> PlotData:
+ tokenizer = get_tokenizer(cfg)
+ df = create_batch_prediction_df(batch, tokenizer)
+ path = os.path.join(cfg.output_directory, "batch_viz.parquet")
+ df.to_parquet(path)
+ return PlotData(path, encoding="df")
+
+ @classmethod
+ def plot_data(cls, cfg) -> PlotData:
+ """
+ Plots the data in a scrollable table.
+ We limit the number of rows to max 600 to avoid rendering issues in Wave.
+ As the data visualization is instantiated on every page load, we cache the
+ data visualization in a parquet file.
+ """
+ config_id = (
+ str(cfg.dataset.train_dataframe)
+ + str(cfg.dataset.system_column)
+ + str(cfg.dataset.prompt_column)
+ + str(cfg.dataset.answer_column)
+ + str(cfg.dataset.parent_id_column)
+ )
+ config_hash = hashlib.md5(config_id.encode()).hexdigest()
+ path = os.path.join(
+ os.path.dirname(cfg.dataset.train_dataframe),
+ f"__meta_info__{config_hash}_data_viz.parquet",
+ )
+ if os.path.exists(path):
+ return PlotData(path, encoding="df")
+
+ df = read_dataframe_drop_missing_labels(cfg.dataset.train_dataframe, cfg)
+
+ conversations = get_conversation_chains(df, cfg, limit_chained_samples=True)
+
+ # Limit to max 15 prompt-conversation-answer rounds
+ # This yields to max 5 * sum_{i=1}^{15} i = 600 rows in the DataFrame
+ max_conversation_length = min(
+ max([len(conversation["prompts"]) for conversation in conversations]), 15
+ )
+
+ conversations_to_display = []
+ for conversation_length in range(1, max_conversation_length + 1):
+ conversations_to_display += [
+ conversation
+ for conversation in conversations
+ if len(conversation["prompts"]) == conversation_length
+ ][:5]
+
+ # Convert into a scrollable table by transposing the dataframe
+ df_transposed = pd.DataFrame(columns=["Sample Number", "Field", "Content"])
+
+ i = 0
+ for sample_number, conversation in enumerate(conversations_to_display):
+ if conversation["systems"][0] != "":
+ df_transposed.loc[i] = [
+ sample_number,
+ "System",
+ conversation["systems"][0],
+ ]
+ i += 1
+ for prompt, answer in zip(conversation["prompts"], conversation["answers"]):
+ df_transposed.loc[i] = [
+ sample_number,
+ "Prompt",
+ prompt,
+ ]
+ i += 1
+ df_transposed.loc[i] = [
+ sample_number,
+ "Answer",
+ answer,
+ ]
+ i += 1
+
+ df_transposed["Content"] = df_transposed["Content"].apply(
+ format_for_markdown_visualization
+ )
+
+ df_transposed.to_parquet(path)
+
+ return PlotData(path, encoding="df")
+
+ @classmethod
+ def plot_validation_predictions(
+ cls, val_outputs: Dict, cfg: Any, val_df: pd.DataFrame, mode: str
+ ) -> PlotData:
+ return plot_validation_predictions(val_outputs, cfg, val_df, mode)
+
+
+def plot_validation_predictions(
+ val_outputs: Dict, cfg: Any, val_df: pd.DataFrame, mode: str
+) -> PlotData:
+ conversations = get_conversation_chains(
+ val_df, cfg, limit_chained_samples=cfg.dataset.limit_chained_samples
+ )
+ prompt_column_name = (
+ cfg.dataset.prompt_column
+ if len(cfg.dataset.prompt_column) > 1
+ else cfg.dataset.prompt_column[0]
+ )
+
+ target_texts = [conversation["answers"][-1] for conversation in conversations]
+
+ input_texts = []
+ for conversation in conversations:
+ input_text = conversation["systems"][0]
+ prompts = conversation["prompts"]
+ answers = conversation["answers"]
+ # exclude last answer
+ answers[-1] = ""
+ for prompt, answer in zip(prompts, answers):
+ input_text += (
+ f" **{prompt_column_name}:** "
+ f"{prompt}\n\n"
+ f"**{cfg.dataset.answer_column}:** "
+ f"{answer}\n\n"
+ )
+ input_texts += [input_text]
+
+ if "predicted_text" in val_outputs.keys():
+ predicted_texts = val_outputs["predicted_text"]
+ else:
+ predicted_texts = [
+ "No predictions are generated for the selected metric"
+ ] * len(target_texts)
+
+ input_text_column_name = (
+ "Input Text (tokenization max length setting "
+ "may truncate the input text during training/inference)"
+ )
+ df = pd.DataFrame(
+ {
+ input_text_column_name: input_texts,
+ "Target Text": target_texts,
+ "Predicted Text": predicted_texts,
+ }
+ )
+ df[input_text_column_name] = df[input_text_column_name].apply(
+ format_for_markdown_visualization
+ )
+ df["Target Text"] = df["Target Text"].apply(format_for_markdown_visualization)
+ df["Predicted Text"] = df["Predicted Text"].apply(format_for_markdown_visualization)
+
+ if val_outputs.get("metrics") is not None:
+ metric_column_name = f"Metric ({cfg.prediction.metric})"
+ df[metric_column_name] = val_outputs["metrics"]
+ df[metric_column_name] = df[metric_column_name].round(decimals=3)
+ if len(df) > 900:
+ df.sort_values(by=metric_column_name, inplace=True)
+ df = pd.concat(
+ [
+ df.iloc[:300],
+ df.iloc[300:-300].sample(n=300, random_state=42),
+ df.iloc[-300:],
+ ]
+ ).reset_index(drop=True)
+
+ elif len(df) > 900:
+ df = df.sample(n=900, random_state=42).reset_index(drop=True)
+
+ if val_outputs.get("explanations") is not None:
+ df["Explanation"] = val_outputs["explanations"]
+
+ path = os.path.join(cfg.output_directory, f"{mode}_viz.parquet")
+ df.to_parquet(path)
+ return PlotData(data=path, encoding="df")
+
+
+def create_batch_prediction_df(
+ batch, tokenizer, ids_for_tokenized_text="input_ids", labels_column="labels"
+):
+ df = pd.DataFrame(
+ {
+ "Prompt Text": [
+ tokenizer.decode(input_ids, skip_special_tokens=True)
+ for input_ids in batch["prompt_input_ids"].detach().cpu().numpy()
+ ]
+ }
+ )
+ df["Prompt Text"] = df["Prompt Text"].apply(format_for_markdown_visualization)
+ if labels_column in batch.keys():
+ df["Answer Text"] = [
+ tokenizer.decode(
+ [label for label in labels if label != -100],
+ skip_special_tokens=True,
+ )
+ for labels in batch.get(labels_column, batch[ids_for_tokenized_text])
+ .detach()
+ .cpu()
+ .numpy()
+ ]
+ tokens_list = [
+ tokenizer.convert_ids_to_tokens(input_ids)
+ for input_ids in batch[ids_for_tokenized_text].detach().cpu().numpy()
+ ]
+ masks_list = [
+ [label != -100 for label in labels]
+ for labels in batch.get(labels_column, batch[ids_for_tokenized_text])
+ .detach()
+ .cpu()
+ .numpy()
+ ]
+ df["Tokenized Text"] = [
+ list_to_markdown_representation(
+ tokens, masks, pad_token=tokenizer.pad_token, num_chars=100
+ )
+ for tokens, masks in zip(tokens_list, masks_list)
+ ]
+ # limit to 2000 rows, still renders fast in wave
+ df = df.iloc[:2000]
+ # Convert into a scrollable table by transposing the dataframe
+ df_transposed = pd.DataFrame(columns=["Sample Number", "Field", "Content"])
+ has_answer = "Answer Text" in df.columns
+ for i, row in df.iterrows():
+ offset = 2 + int(has_answer)
+ df_transposed.loc[i * offset] = [
+ i,
+ "Prompt Text",
+ row["Prompt Text"],
+ ]
+ if has_answer:
+ df_transposed.loc[i * offset + 1] = [
+ i,
+ "Answer Text",
+ row["Answer Text"],
+ ]
+ df_transposed.loc[i * offset + 1 + int(has_answer)] = [
+ i,
+ "Tokenized Text",
+ row["Tokenized Text"],
+ ]
+ return df_transposed
diff --git a/llm_studio/src/plots/text_dpo_modeling_plots.py b/llm_studio/src/plots/text_dpo_modeling_plots.py
new file mode 100644
index 0000000000000000000000000000000000000000..41cee6c786c8251d26bc3b1c75bae213640cc9d2
--- /dev/null
+++ b/llm_studio/src/plots/text_dpo_modeling_plots.py
@@ -0,0 +1,142 @@
+import hashlib
+import os
+from typing import Any, Dict, List
+
+import pandas as pd
+
+from llm_studio.src.datasets.conversation_chain_handler import get_conversation_chains
+from llm_studio.src.datasets.text_utils import get_tokenizer
+from llm_studio.src.plots.text_causal_language_modeling_plots import (
+ create_batch_prediction_df,
+ plot_validation_predictions,
+)
+from llm_studio.src.utils.data_utils import read_dataframe_drop_missing_labels
+from llm_studio.src.utils.plot_utils import PlotData, format_for_markdown_visualization
+from llm_studio.src.utils.utils import PatchedAttribute
+
+
+class Plots:
+ @classmethod
+ def plot_batch(cls, batch, cfg) -> PlotData:
+ tokenizer = get_tokenizer(cfg)
+ df = create_batch_prediction_df(
+ batch,
+ tokenizer,
+ ids_for_tokenized_text="chosen_input_ids",
+ labels_column="chosen_labels",
+ )
+ path = os.path.join(cfg.output_directory, "batch_viz.parquet")
+ df.to_parquet(path)
+ return PlotData(path, encoding="df")
+
+ @classmethod
+ def plot_data(cls, cfg) -> PlotData:
+ """
+ Plots the data in a scrollable table.
+ We limit the number of rows to max 600 to avoid rendering issues in Wave.
+ As the data visualization is instantiated on every page load, we cache the
+ data visualization in a parquet file.
+ """
+ config_id = (
+ str(cfg.dataset.train_dataframe)
+ + str(cfg.dataset.system_column)
+ + str(cfg.dataset.prompt_column)
+ + str(cfg.dataset.answer_column)
+ + str(cfg.dataset.rejected_answer_column)
+ + str(cfg.dataset.parent_id_column)
+ )
+ config_hash = hashlib.md5(config_id.encode()).hexdigest()
+ path = os.path.join(
+ os.path.dirname(cfg.dataset.train_dataframe),
+ f"__meta_info__{config_hash}_data_viz.parquet",
+ )
+ if os.path.exists(path):
+ return PlotData(path, encoding="df")
+
+ df = read_dataframe_drop_missing_labels(cfg.dataset.train_dataframe, cfg)
+
+ conversations_chosen = get_conversation_chains(
+ df, cfg, limit_chained_samples=True
+ )
+ with PatchedAttribute(
+ cfg.dataset, "answer_column", cfg.dataset.rejected_answer_column
+ ):
+ conversations_rejected = get_conversation_chains(
+ df, cfg, limit_chained_samples=True
+ )
+
+ # Limit to max 15 prompt-conversation-answer rounds
+ max_conversation_length = min(
+ max(
+ [len(conversation["prompts"]) for conversation in conversations_chosen]
+ ),
+ 15,
+ )
+
+ conversations_to_display: List = []
+ for conversation_length in range(1, max_conversation_length + 1):
+ conversations_to_display += [
+ (conversation_chosen, conversations_rejected)
+ for conversation_chosen, conversations_rejected in zip(
+ conversations_chosen, conversations_rejected
+ )
+ if len(conversation_chosen["prompts"]) == conversation_length
+ ][:5]
+
+ # Convert into a scrollable table by transposing the dataframe
+ df_transposed = pd.DataFrame(columns=["Sample Number", "Field", "Content"])
+
+ i = 0
+ for sample_number, (conversation_chosen, conversations_rejected) in enumerate(
+ conversations_to_display
+ ):
+ if conversation_chosen["systems"][0] != "":
+ df_transposed.loc[i] = [
+ sample_number,
+ "System",
+ conversation_chosen["systems"][0],
+ ]
+ i += 1
+ for prompt, answer_chosen, answer_rejected in zip(
+ conversation_chosen["prompts"],
+ conversation_chosen["answers"],
+ conversations_rejected["answers"], # type: ignore
+ ):
+ df_transposed.loc[i] = [
+ sample_number,
+ "Prompt",
+ prompt,
+ ]
+ i += 1
+ if answer_chosen == answer_rejected:
+ df_transposed.loc[i] = [
+ sample_number,
+ "Answer",
+ answer_chosen,
+ ]
+ i += 1
+ else:
+ df_transposed.loc[i] = [
+ sample_number,
+ "Answer Chosen",
+ answer_chosen,
+ ]
+ i += 1
+ df_transposed.loc[i] = [
+ sample_number,
+ "Answer Rejected",
+ answer_rejected,
+ ]
+ i += 1
+
+ df_transposed["Content"] = df_transposed["Content"].apply(
+ format_for_markdown_visualization
+ )
+ df_transposed.to_parquet(path)
+ return PlotData(path, encoding="df")
+
+ @classmethod
+ def plot_validation_predictions(
+ cls, val_outputs: Dict, cfg: Any, val_df: pd.DataFrame, mode: str
+ ) -> PlotData:
+ return plot_validation_predictions(val_outputs, cfg, val_df, mode)
diff --git a/llm_studio/src/possible_values.py b/llm_studio/src/possible_values.py
new file mode 100644
index 0000000000000000000000000000000000000000..57004143b7cd993accc673ee8629c1b6b3ec50bb
--- /dev/null
+++ b/llm_studio/src/possible_values.py
@@ -0,0 +1,313 @@
+import os
+from abc import abstractmethod
+from typing import Any, Callable, List, Optional, Sequence, Set, Tuple
+
+from pydantic.dataclasses import dataclass
+
+
+def _scan_dirs(dirname: str) -> List[str]:
+ """
+ Recursively scans a directory for subfolders.
+
+ Args:
+ dirname (str): The directory to scan.
+
+ Returns:
+ List[str]: A list of subfolder paths, with '/' appended to each path.
+ """
+
+ subfolders = [f.path for f in os.scandir(dirname) if f.is_dir()]
+ for dirname in list(subfolders):
+ subfolders.extend(_scan_dirs(dirname))
+ subfolders = [x + "/" if x[-1] != "/" else x for x in subfolders]
+ return subfolders
+
+
+def _scan_files(
+ dirname: str, extensions: Tuple[str, ...] = (".csv", ".pq", ".parquet", ".json")
+) -> List[str]:
+ """
+ Scans a directory for files with given extension
+
+ Excludes files starting with "__meta_info__".
+
+ Args:
+ dirname (str): The directory to scan.
+ extensions (Tuple[str, ...]): File extensions to consider.
+
+ Returns:
+ List[str]: A sorted list of file paths matching the given extensions.
+ """
+ path_list = [
+ os.path.join(dirpath, filename)
+ for dirpath, _, filenames in os.walk(dirname)
+ for filename in filenames
+ if any(map(filename.__contains__, extensions))
+ and not filename.startswith("__meta_info__")
+ ]
+ return sorted(path_list)
+
+
+def strip_common_prefix(
+ paths: Sequence[str], ignore_set: Set[str] = set()
+) -> Tuple[str, ...]:
+ """
+ Strips the common prefix from all given paths.
+
+ Args:
+ paths (Sequence[str]): The paths to strip.
+ ignore_set (Set[str]): Set of path names to ignore when computing the prefix.
+
+ Returns:
+ Tuple[str, ...]: A tuple of paths with common prefixes removed.
+ """
+
+ paths_to_check = [
+ os.path.split(os.path.normpath(path))[0]
+ for path in paths
+ if path not in ignore_set
+ ]
+
+ if len(paths_to_check) == 0:
+ return tuple(paths)
+
+ prefix = os.path.commonpath(paths_to_check)
+ stripped = tuple(
+ [
+ path if path in ignore_set else os.path.relpath(path, prefix)
+ for path in paths
+ ]
+ )
+
+ return stripped
+
+
+class Value:
+ """Base class for value types."""
+
+ pass
+
+
+@dataclass
+class Number:
+ """
+ Represents a numeric range for a setting with optional constraints.
+
+ Attributes:
+ min (float | int): Minimum allowed value. Must be less than or equal to `max`.
+ step (float | int]): Step size for value increments
+ max (float | None): Maximum allowed value. Optional.
+ If provided, the UI component will be rendered as a slider. Otherwise as \
+ a spinbox.
+ """
+
+ min: float | int
+ step: float | int
+ max: Optional[float | int] = None
+
+ def __post_init__(self):
+ if self.max is not None and self.min > self.max:
+ raise ValueError(
+ f"Expected `min <= max`, got min={self.min} > max={self.max}"
+ )
+
+
+@dataclass
+class String:
+ """
+ Represents possible string values for a setting with optional constraints.
+
+ Attributes:
+ values (Tuple[str, ...] | Tuple[Tuple[str, str], ...]):
+ Possible values for the string.
+ - a tuple of tuples (value, name)
+ - a tuple of strings. In that case the value will be used for name and value
+ allow_custom (bool): Whether custom values are allowed. This will render a \
+ combobox. If False (default), a dropdown will be rendered.
+ placeholder (Optional[str]): Placeholder text for input fields.
+ """
+
+ values: Tuple[str, ...] | Tuple[Tuple[str, str], ...]
+ allow_custom: bool = False
+ placeholder: Optional[str] = None
+
+
+class DatasetValue:
+ """Base class for dataset-related values."""
+
+ @abstractmethod
+ def get_value(
+ self, dataset: Any, value: Any, type_annotation: type
+ ) -> Tuple[String, Any]:
+ """
+ Abstract method to get the value for a dataset.
+
+ Args:
+ dataset (Any): The dataset object.
+ value (Any): The current value.
+ type_annotation (type): The expected type of the value.
+
+ Returns:
+ Tuple[String, Any]: A tuple containing the String object and the value.
+ """
+ raise NotImplementedError
+
+ @staticmethod
+ def _compute_current_values(
+ current_values: List[str],
+ possible_values: List[str],
+ prefer_with: Optional[Callable[[str], bool]] = None,
+ ) -> List[str]:
+ """
+ Compute current values based on possible values and preferences.
+
+ This method does not handle duplicate values and raises an error if either \
+ `current_values` or `possible_values` contain duplicates.
+
+ Args:
+ current_values (List[str]): The preliminary current values.
+ possible_values (List[str]): All possible values.
+ prefer_with (Optional[Callable[[str], bool]]): Function determining which \
+ values to prefer as default.
+
+ Returns:
+ List[str]: A list of computed current values.
+
+ Raises:
+ ValueError: If either `current_values` or `possible_values` contain \
+ duplicate
+ """
+
+ if len(set(current_values)) != len(current_values):
+ raise ValueError("Duplicate values in `current_values`")
+
+ if len(set(possible_values)) != len(possible_values):
+ raise ValueError("Duplicate values in `possible_values`")
+
+ if len(possible_values) == 0:
+ return [""]
+
+ # allow only values which are in the possible values
+ current_values = list(
+ filter(lambda value: value in possible_values, current_values)
+ )
+
+ if len(current_values) == 0:
+ # if the values are empty, take all the values where `prefer_with` is true
+ for c in possible_values:
+ if prefer_with is not None and prefer_with(c):
+ current_values.append(c)
+
+ # if they are still empty, just take the first possible value
+ if len(current_values) == 0:
+ current_values = [possible_values[0]]
+
+ return current_values
+
+
+@dataclass
+class Files(DatasetValue):
+ """
+ Represents a selection of files from a dataset.
+
+ Used to select a file from a dataset for e.g. `train_dataframe`.
+
+ Attributes:
+ add_none (bool): Whether to add a "None" option.
+ prefer_with (Optional[Callable[[str], bool]]): Function to determine preferred \
+ values.
+ prefer_none (bool): Whether to prefer "None" as the default option.
+ """
+
+ add_none: bool = False
+ prefer_with: Optional[Callable[[str], bool]] = None
+ # For the case where no match found, whether to prioritize
+ # selecting any file or selecting no file
+ prefer_none: bool = True
+
+ def get_value(
+ self, dataset: Any, value: Any, type_annotation: type
+ ) -> Tuple[String, Any]:
+ """
+ Get the value for file selection.
+
+ Args:
+ dataset (Any): The dataset object.
+ value (Any): The current value.
+ type_annotation (type): The expected type of the value.
+
+ Returns:
+ Tuple[String, Any]: Tuple containing the String object and the current \
+ value.
+ """
+ if dataset is None:
+ return String(tuple()), value
+
+ available_files = _scan_files(dataset["path"])
+ if self.add_none is True:
+ if self.prefer_none:
+ available_files.insert(0, "None")
+ else:
+ available_files.insert(len(available_files), "None")
+
+ if isinstance(value, str):
+ value = [value]
+
+ value = DatasetValue._compute_current_values(
+ value, available_files, self.prefer_with
+ )
+
+ return (
+ String(
+ tuple(
+ zip(
+ available_files,
+ strip_common_prefix(available_files, ignore_set={"None"}),
+ )
+ )
+ ),
+ value if type_annotation == Tuple[str, ...] else value[0],
+ )
+
+
+@dataclass
+class Columns(DatasetValue):
+ """
+ Represents a selection of columns from a dataset.
+
+ Used to select a column from a dataset for e.g. `prompt_column`.
+
+ Attributes:
+ add_none (bool): Whether to add a "None" option.
+ prefer_with (Optional[Callable[[str], bool]]): Function to determine preferred \
+ values.
+ """
+
+ add_none: bool = False
+ prefer_with: Optional[Callable[[str], bool]] = None
+
+ def get_value(
+ self, dataset: Any, value: Any, type_annotation: type
+ ) -> Tuple[String, Any]:
+ if dataset is None:
+ return String(tuple()), value
+
+ try:
+ columns = list(dataset["dataframe"].columns)
+ except KeyError:
+ columns = []
+
+ if self.add_none is True:
+ columns.insert(0, "None")
+
+ if isinstance(value, str):
+ value = [value]
+ if value is None:
+ value = [columns[0]]
+
+ value = DatasetValue._compute_current_values(value, columns, self.prefer_with)
+
+ return (
+ String(tuple(columns)),
+ value if type_annotation == Tuple[str, ...] else value[0],
+ )
diff --git a/llm_studio/src/schedulers.py b/llm_studio/src/schedulers.py
new file mode 100644
index 0000000000000000000000000000000000000000..40a0da28e0a9cb8c242218d0e6ea242d8e073a37
--- /dev/null
+++ b/llm_studio/src/schedulers.py
@@ -0,0 +1,84 @@
+import math
+from typing import Any, List
+
+from torch.optim import Optimizer
+from torch.optim.lr_scheduler import LambdaLR
+from transformers import get_constant_schedule_with_warmup
+
+
+def constant_schedule_with_warmup(
+ optimizer: Optimizer, num_warmup_steps: int, **kwargs
+) -> LambdaLR:
+ return get_constant_schedule_with_warmup(
+ optimizer=optimizer, num_warmup_steps=num_warmup_steps
+ )
+
+
+# adjusted from transformers
+def get_cosine_schedule_with_warmup(
+ optimizer: Optimizer,
+ num_warmup_steps: int,
+ num_training_steps: int,
+ min_learning_rate_ratio: float = 0.0,
+ num_cycles: float = 0.5,
+ last_epoch: int = -1,
+):
+
+ def lr_lambda(current_step):
+ if current_step < num_warmup_steps:
+ return float(current_step) / float(max(1, num_warmup_steps))
+ progress = float(current_step - num_warmup_steps) / float(
+ max(1, num_training_steps - num_warmup_steps)
+ )
+ return max(
+ min_learning_rate_ratio,
+ 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress)),
+ )
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch)
+
+
+# adjusted from transformers
+def get_linear_schedule_with_warmup(
+ optimizer: Optimizer,
+ num_warmup_steps: int,
+ num_training_steps: int,
+ min_learning_rate_ratio: float = 0.0,
+ last_epoch: int = -1,
+):
+
+ def lr_lambda(current_step: int):
+ if current_step < num_warmup_steps:
+ return float(current_step) / float(max(1, num_warmup_steps))
+ return max(
+ min_learning_rate_ratio,
+ float(num_training_steps - current_step)
+ / float(max(1, num_training_steps - num_warmup_steps)),
+ )
+
+ return LambdaLR(optimizer, lr_lambda, last_epoch)
+
+
+class Schedulers:
+ """Schedulers factory."""
+
+ _schedulers = {
+ "Cosine": get_cosine_schedule_with_warmup,
+ "Linear": get_linear_schedule_with_warmup,
+ "Constant": constant_schedule_with_warmup,
+ }
+
+ @classmethod
+ def names(cls) -> List[str]:
+ return sorted(cls._schedulers.keys())
+
+ @classmethod
+ def get(cls, name: str) -> Any:
+ """Access to Schedulers.
+
+ Args:
+ name: scheduler name
+ Returns:
+ A class to build the Schedulers
+ """
+ return cls._schedulers.get(name)
diff --git a/llm_studio/src/tooltips.py b/llm_studio/src/tooltips.py
new file mode 100644
index 0000000000000000000000000000000000000000..665c168f6a14017e1afed2b8f5f383ee5215f86f
--- /dev/null
+++ b/llm_studio/src/tooltips.py
@@ -0,0 +1,194 @@
+import glob
+import re
+from dataclasses import dataclass
+from typing import Dict
+
+tooltip_files = glob.glob("documentation/docs/tooltips/**/*.mdx", recursive=True)
+
+
+def read_tooltip_file(path: str) -> str:
+ """
+ Reads all lines of a text file and returns its content as a single string.
+
+ Args:
+ path (str): The path to the file to be read.
+
+ Returns:
+ str: The entire content of the file as a single string.
+
+ Raises:
+ FileNotFoundError: If the specified file is not found.
+ IOError: If there's an error reading the file.
+ """
+ with open(path) as f:
+ return f.read()
+
+
+def cleanhtml(raw_html: str) -> str:
+ """
+ Removes HTML tags from a string.
+
+ Args:
+ raw_html (str): The string containing HTML tags to be removed.
+
+ Returns:
+ str: The input string with all HTML tags removed.
+ """
+ cleantext = re.sub(re.compile("<[^<]+?>"), "", raw_html)
+ return cleantext
+
+
+def clean_docusaurus_tags(text: str) -> str:
+ """
+ Removes Docusaurus tags from a string.
+
+ Args:
+ text (str): The string containing Docusaurus tags to be removed.
+
+ Returns:
+ str: The input string with Docusaurus tags removed.
+ """
+ text = text.replace(":::info note", "")
+ text = text.replace(":::info Note", "")
+ text = text.replace(":::tip tip", "")
+ text = text.replace(":::", "")
+ return text.strip()
+
+
+def clean_md_links(text: str) -> str:
+ """
+ Removes Markdown links from a string, keeping only the link text.
+
+ Args:
+ text (str): The string containing Markdown links to be cleaned.
+
+ Returns:
+ str: The input string with Markdown links replaced by their text content.
+ """
+ text = re.sub(r"\[(.*?)\]\(.*?\)", r"\1", text)
+ return text
+
+
+@dataclass
+class Tooltip:
+ """
+ Represents a single tooltip with a name and associated text.
+
+ Attributes:
+ name (str): A name for the tooltip.
+ text (str): The content of the tooltip.
+ """
+
+ name: str
+ text: str
+
+ def __repr__(self):
+ return f"{self.name}: {self.text}"
+
+
+class Tooltips:
+ """
+ A collection of tooltips that can be accessed by their names.
+
+ During initialization, all tooltips are read from the specified tooltip files.
+
+ Attributes:
+ tooltips (Dict[str, Tooltip]): A dictionary mapping tooltip names to Tooltip\
+ objects.
+
+ Methods:
+ add_tooltip(tooltip: Tooltip): Adds a new tooltip to the collection.
+ __getitem__(name: str) -> Optional[str]: Retrieves the text of a tooltip by its\
+ name.
+ __len__() -> int: Returns the number of tooltips in the collection.
+ __repr__() -> str: Returns a string representation of the tooltips collection.
+ get(name: str, default=None) -> Optional[str]: Retrieves the text of a tooltip\
+ by its name, with an optional default value.
+ """
+
+ def __init__(self, tooltip_files: list[str] = tooltip_files):
+ """
+ Initializes the Tooltips collection by reading and processing tooltip files.
+
+ Args:
+ tooltip_files (List[str]): A list of file paths to tooltip files.
+
+ Raises:
+ ValueError: If a tooltip file name does not start with an underscore.
+ ValueError: If a duplicate tooltip name is encountered.
+ """
+ self.tooltips: Dict[str, Tooltip] = {}
+ for filename in tooltip_files:
+ name = filename.split("/")[-1].split(".")[0]
+ name = name.replace("-", "_")
+
+ if name.startswith("_"):
+ name = name[1:] # remove leading underscore
+ else:
+ raise ValueError("Tooltip file names must start with an underscore.")
+
+ # documentation/docs/tooltips/SECTION/_TOOLTIPNAME.mdx
+ section = filename.split("/")[3]
+
+ tooltip_name = f"{section}_{name}"
+ if tooltip_name in self.tooltips.keys():
+ raise ValueError("Tooltip names must be unique.")
+
+ text = read_tooltip_file(filename)
+ text = cleanhtml(text)
+ text = clean_docusaurus_tags(text)
+ text = clean_md_links(text)
+
+ self.add_tooltip(Tooltip(tooltip_name, text))
+
+ def add_tooltip(self, tooltip: Tooltip):
+ """
+ Adds a new tooltip to the collection.
+
+ Args:
+ tooltip (Tooltip): The tooltip object to be added.
+ """
+ self.tooltips[tooltip.name] = tooltip
+
+ def __getitem__(self, name: str) -> None | str:
+ """
+ Retrieves the text of a tooltip by its name.
+
+ Args:
+ name (str): The name of the tooltip to retrieve.
+
+ Returns:
+ Optional[str]: The text of the tooltip if found, None otherwise.
+ """
+ try:
+ text = self.tooltips[name].text
+ except KeyError:
+ text = None
+ return text
+
+ def __len__(self) -> int:
+ return len(self.tooltips)
+
+ def __repr__(self):
+ return f"{self.tooltips}"
+
+ def get(self, name: str, default=None):
+ """
+ Retrieves the text of a tooltip by its name, with an optional default value.
+
+ Args:
+ name (str): The name of the tooltip to retrieve.
+ default (Optional[str]): The default value to return if the tooltip is not \
+ found.
+
+ Returns:
+ Optional[str]: The text of the tooltip if found, or the default value \
+ otherwise.
+ """
+ if name in self.tooltips.keys():
+ return self.tooltips[name].text
+ else:
+ return default
+
+
+tooltips = Tooltips()
diff --git a/llm_studio/src/utils/__init__.py b/llm_studio/src/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/llm_studio/src/utils/config_utils.py b/llm_studio/src/utils/config_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9b06deef4cd5412730efedda2c834af3831de6a
--- /dev/null
+++ b/llm_studio/src/utils/config_utils.py
@@ -0,0 +1,215 @@
+import dataclasses
+import importlib
+from types import ModuleType
+from typing import Any, Dict, List, Type
+
+import yaml
+
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.src.utils.type_annotations import KNOWN_TYPE_ANNOTATIONS
+
+
+def _recursive_reload(module):
+ """Recursively reload modules.
+
+ Args:
+ module: module to reload
+ """
+
+ for attribute_name in dir(module):
+ if "Config" in attribute_name:
+ attribute1 = getattr(module, attribute_name)
+ for attribute_name in dir(attribute1):
+ attribute2 = getattr(attribute1, attribute_name)
+ if type(attribute2) is ModuleType:
+ importlib.reload(attribute2)
+
+
+def _load_cls(module_path: str, cls_name: str) -> DefaultConfigProblemBase:
+ """Loads the python class.
+
+ Args:
+ module_path: path to the module
+ cls_name: name of the class
+
+ Returns:
+ Loaded python class
+ """
+
+ module_path_fixed = module_path
+ if module_path_fixed.endswith(".py"):
+ module_path_fixed = module_path_fixed[:-3]
+ module_path_fixed = module_path_fixed.replace("/", ".")
+
+ module = importlib.import_module(module_path_fixed)
+ module = importlib.reload(module)
+ _recursive_reload(module)
+ module = importlib.reload(module)
+
+ assert hasattr(module, cls_name), "{} file should contain {} class".format(
+ module_path, cls_name
+ )
+
+ cls: DefaultConfigProblemBase = getattr(module, cls_name)()
+
+ return cls
+
+
+def load_config_py(
+ config_path: str, config_name: str = "Config"
+) -> DefaultConfigProblemBase:
+ """Loads the config class.
+
+ Args:
+ config_path: path to the config file
+ config_name: name of the config class
+
+ Returns:
+ Loaded config class
+ """
+
+ return _load_cls(config_path, config_name)
+
+
+def _get_type_annotation_error(v: Any, type_annotation: Type) -> ValueError:
+ return ValueError(
+ f"Cannot show {v}: not a dataclass"
+ f" and {type_annotation} is not a known type annotation."
+ )
+
+
+def convert_cfg_base_to_nested_dictionary(cfg: DefaultConfigProblemBase) -> dict:
+ """Returns a grouped config settings dict for a given configuration
+
+ Args:
+ cfg: configuration
+ q: Q
+
+ Returns:
+ Dict of configuration settings
+ """
+
+ cfg_dict = cfg.__dict__
+ type_annotations = cfg.get_annotations()
+ cfg_dict = {key: cfg_dict[key] for key in cfg._get_order()}
+
+ grouped_cfg_dict = {}
+
+ for k, v in cfg_dict.items():
+ if k.startswith("_"):
+ continue
+
+ if any([x in k for x in ["api", "secret", "key"]]):
+ raise AssertionError(
+ "Config item must not contain the word 'api', 'secret', or 'key'"
+ )
+
+ type_annotation = type_annotations[k]
+
+ if type_annotation in KNOWN_TYPE_ANNOTATIONS:
+ grouped_cfg_dict.update({k: v})
+ elif dataclasses.is_dataclass(v):
+ group_items = parse_cfg_dataclass(cfg=v)
+ group_items = {
+ k: list(v) if isinstance(v, tuple) else v
+ for d in group_items
+ for k, v in d.items()
+ }
+ grouped_cfg_dict.update({k: group_items})
+ else:
+ raise _get_type_annotation_error(v, type_annotations[k])
+
+ # not an explicit field in the config
+ grouped_cfg_dict["problem_type"] = cfg.problem_type
+ return grouped_cfg_dict
+
+
+def convert_nested_dictionary_to_cfg_base(
+ cfg_dict: Dict[str, Any]
+) -> DefaultConfigProblemBase:
+ """
+ Inverse operation of convert_cfg_base_to_nested_dictionary
+ """
+ problem_type = cfg_dict["problem_type"]
+ module_name = f"llm_studio.python_configs.{problem_type}_config"
+ try:
+ module = importlib.import_module(module_name)
+ except ModuleNotFoundError:
+ raise NotImplementedError(f"Problem Type {problem_type} not implemented")
+ return module.ConfigProblemBase.from_dict(cfg_dict)
+
+
+def parse_cfg_dataclass(cfg) -> List[Dict]:
+ """Returns all single config settings for a given configuration
+
+ Args:
+ cfg: configuration
+ """
+
+ items = []
+
+ cfg_dict = cfg.__dict__
+ type_annotations = cfg.get_annotations()
+ cfg_dict = {key: cfg_dict[key] for key in cfg._get_order()}
+
+ for k, v in cfg_dict.items():
+ if k.startswith("_"):
+ continue
+
+ if any([x in k for x in ["api", "secret", "key"]]):
+ continue
+
+ type_annotation = type_annotations[k]
+
+ if type_annotation in KNOWN_TYPE_ANNOTATIONS:
+ if type_annotation == float:
+ v = float(v)
+ t = [{k: v}]
+ elif dataclasses.is_dataclass(v):
+ elements_group = parse_cfg_dataclass(cfg=v)
+ t = elements_group
+ else:
+ continue
+
+ items += t
+
+ return items
+
+
+def save_config_yaml(path: str, cfg: DefaultConfigProblemBase) -> None:
+ """Saves config as yaml file
+
+ Args:
+ path: path of file to save to
+ cfg: config to save
+ """
+ """
+ Returns a dictionary representation of the config object.
+ Protected attributes (starting with an underscore) are not included.
+ Nested configs are converted to nested dictionaries.
+ """
+ cfg_dict = convert_cfg_base_to_nested_dictionary(cfg)
+ with open(path, "w") as fp:
+ yaml.dump(cfg_dict, fp, indent=4)
+
+
+def load_config_yaml(path: str) -> DefaultConfigProblemBase:
+ """Loads config from yaml file
+
+ Args:
+ path: path of file to load from
+ Returns:
+ config object
+ """
+ with open(path, "r") as fp:
+ cfg_dict = yaml.load(fp, Loader=yaml.FullLoader)
+ return convert_nested_dictionary_to_cfg_base(cfg_dict)
+
+
+# Note that importing ConfigProblemBase from the python_configs
+# and using cfg.problem_type below will not work because of circular imports
+GENERATION_PROBLEM_TYPES = ["text_causal_language_modeling", "text_dpo_modeling"]
+NON_GENERATION_PROBLEM_TYPES = [
+ "text_causal_classification_modeling",
+ "text_causal_regression_modeling",
+]
diff --git a/llm_studio/src/utils/data_utils.py b/llm_studio/src/utils/data_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..711530515c67ef498c3609d280679a6af71daa34
--- /dev/null
+++ b/llm_studio/src/utils/data_utils.py
@@ -0,0 +1,693 @@
+import logging
+import math
+import os
+from typing import Any, DefaultDict, Dict, List, Optional, Tuple, Union, no_type_check
+
+import networkx as nx
+import numpy as np
+import pandas as pd
+import pyarrow.parquet as pq
+import torch
+from sklearn.model_selection import GroupShuffleSplit, train_test_split
+from torch import distributed as dist
+from torch.utils.data import DataLoader, Dataset, Sampler, SequentialSampler
+
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler
+from llm_studio.src.utils.exceptions import LLMDataException
+from llm_studio.src.utils.gpu_utils import sync_across_processes
+from llm_studio.src.utils.utils import PatchedAttribute, set_seed
+
+logger = logging.getLogger(__name__)
+
+
+def read_dataframe(
+ path: str,
+ n_rows: int = -1,
+ meta_only: bool = False,
+ non_missing_columns: Optional[List[str]] = None,
+ verbose: bool = False,
+ handling: str = "warn",
+ fill_columns: Optional[List[str]] = None,
+ fill_value: Any = "",
+ mode: str = "",
+) -> pd.DataFrame:
+ """Reading a dataframe from different file types
+
+ Args:
+ path: path of the dataframe
+ n_rows: number of rows to limit to
+ meta_only: return only meta information
+ non_missing_columns: list of columns that cannot contain missing values
+ verbose: if warning about dropped rows should be logged
+ handling: how to handle missing values
+ fill_columns: columns where empty value should be filled (used for empty text)
+ fill_value: value to fill empty columns with (used for empty text)
+ mode: dataset type, used only for better exception/log information
+ Returns:
+ DataFrame
+
+ """
+
+ non_missing_columns = [] if non_missing_columns is None else non_missing_columns
+ fill_columns = [] if fill_columns is None else fill_columns
+
+ meta_info_path = os.path.split(path)
+ meta_info_path = os.path.join(
+ *meta_info_path[:-1],
+ "__meta_info__" + meta_info_path[-1] + ".csv",
+ )
+ if meta_only and os.path.exists(meta_info_path):
+ path = meta_info_path
+
+ if path.endswith(".csv"):
+ df = pd.read_csv(path, lineterminator="\n").reset_index(drop=True)
+ elif path.endswith(".pq") or path.endswith(".parquet"):
+ try:
+ df = pd.read_parquet(path, engine="pyarrow").reset_index(drop=True)
+ except Exception:
+ df = pd.read_parquet(path, engine="fastparquet").reset_index(drop=True)
+ elif path.endswith(".json") or path == "":
+ return pd.DataFrame()
+ else:
+ raise ValueError(
+ f"Could not determine type of file {path}: "
+ f"CSV (`.csv`) and Parquet (`.pq` and `.parquet`) are supported."
+ )
+
+ if fill_columns:
+ df[fill_columns] = df[fill_columns].fillna(fill_value)
+
+ if meta_only and os.path.exists(meta_info_path):
+ return df
+
+ non_missing_columns = [x for x in non_missing_columns if x in df]
+ if len(non_missing_columns):
+ orig_size = df.shape[0]
+ non_missing_index = df[non_missing_columns].dropna().index
+ dropped_index = [idx for idx in df.index if idx not in non_missing_index]
+ df = df.loc[non_missing_index].reset_index(drop=True)
+ new_size = df.shape[0]
+ if new_size < orig_size and verbose:
+ logger.warning(
+ f"Dropped {orig_size - new_size} rows when reading dataframe '{path}' "
+ f"due to missing values encountered in one of the following columns:"
+ f" {non_missing_columns} in the following rows: {dropped_index}"
+ )
+
+ if handling == "error":
+ dropped_str = dropped_index
+
+ if len(dropped_str) > 10:
+ dropped_str = dropped_str[:5] + ["..."] + dropped_str[-5:]
+
+ dropped_str = ", ".join([str(x) for x in dropped_str])
+ prefix = f"{mode} " if mode else ""
+ error = (
+ f"{prefix}dataset contains {len(dropped_index)} rows with missing "
+ f"values in one of the following columns: {non_missing_columns} in "
+ f"the following rows: {dropped_str}"
+ )
+
+ raise ValueError(error.capitalize())
+
+ if n_rows > -1:
+ df = df.iloc[sample_indices(len(df), n_indices=n_rows)]
+
+ # create meta information dataframe if it does not exist
+ if not os.path.exists(meta_info_path):
+ df_meta = pd.DataFrame(columns=df.columns)
+ df_meta.to_csv(meta_info_path, index=False)
+
+ return df
+
+
+def get_fill_columns(cfg: DefaultConfigProblemBase) -> List[str]:
+ if hasattr(cfg.dataset, "prompt_column"):
+ if isinstance(cfg.dataset.prompt_column, (list, tuple)):
+ return list(cfg.dataset.prompt_column)
+ return [cfg.dataset.prompt_column]
+
+ return []
+
+
+def read_dataframe_drop_missing_labels(
+ path: str, cfg: DefaultConfigProblemBase
+) -> pd.DataFrame:
+ if isinstance(cfg.dataset.prompt_column, tuple):
+ input_cols = list(cfg.dataset.prompt_column)
+ else:
+ input_cols = [cfg.dataset.prompt_column]
+ fill_columns = get_fill_columns(cfg)
+ df = read_dataframe(
+ path,
+ non_missing_columns=input_cols,
+ verbose=True,
+ fill_columns=fill_columns,
+ )
+ df[input_cols] = df[input_cols].fillna("").astype(str)
+ if (
+ hasattr(cfg.dataset, "answer_column")
+ and not isinstance(cfg.dataset.answer_column, (list, tuple))
+ and cfg.dataset.answer_column in df.columns
+ ):
+ df[cfg.dataset.answer_column] = (
+ df[cfg.dataset.answer_column].fillna("").astype(str)
+ )
+ return df
+
+
+def is_valid_data_frame(path: str, csv_rows: int = 100) -> bool:
+ """Checking data frame format
+
+ Args:
+ path: path of the dataframe
+ csv_rows: number of rows to limit to when checking csv files
+
+ Returns:
+ bool
+
+ """
+ try:
+ if path.endswith(".csv"):
+ pd.read_csv(path, nrows=csv_rows, lineterminator="\n")
+ elif path.endswith(".pq") or path.endswith(".parquet"):
+ pq.ParquetFile(path)
+ else:
+ raise ValueError(
+ f"Could not determine type of file {path}: "
+ f"CSV (`.csv`) and Parquet (`.pq` and `.parquet`) are supported."
+ )
+ except Exception as e:
+ logger.error(str(e))
+ return False
+ return True
+
+
+def sample_data(cfg: DefaultConfigProblemBase, df: pd.DataFrame) -> pd.DataFrame:
+ """Sample data from the dataframe"""
+
+ if cfg.dataset.parent_id_column != "None" and "id" in df.columns:
+ parent_mapping = df.set_index("id")["parent_id"].to_dict()
+
+ # A recursive function to get the root id for each node
+ def get_root(node):
+ parent = parent_mapping.get(node)
+ if parent is None or pd.isna(parent):
+ return node
+ return get_root(parent)
+
+ # Apply the function to assign each row the root id
+ df["root_id"] = df["id"].apply(get_root)
+
+ # Sample root_ids without replacement
+ root_ids = df["root_id"].unique()
+ n_sampled_root_ids = int(len(root_ids) * cfg.dataset.data_sample)
+
+ np.random.seed(7331)
+ sampled_root_ids = np.random.choice(
+ root_ids, size=n_sampled_root_ids, replace=False
+ )
+
+ # Filter the dataframe to only include rows with sampled root_ids
+ df = df[df["root_id"].isin(sampled_root_ids)].reset_index(drop=True)
+ del df["root_id"]
+ else:
+ # at least 10 observations
+ n = max(10, int(len(df) * cfg.dataset.data_sample))
+ df = df.sample(n=min(n, len(df)), random_state=7331, replace=False)
+
+ return df
+
+
+def load_mt_bench_data(cfg: DefaultConfigProblemBase) -> pd.DataFrame:
+ """Loads MT-BENCH data.
+
+ Args:
+ cfg: input config
+
+ Returns:
+ MT-BENCH DataFrame
+ """
+
+ prompt_column = cfg.dataset.prompt_column[0]
+ answer_column = cfg.dataset.answer_column
+
+ df = df = pd.read_json("prompts/mt-bench/question.jsonl", lines=True)
+ df = df.rename(columns={"turns": prompt_column, "reference": answer_column})
+ df[prompt_column] = df[prompt_column].apply(lambda x: x[0])
+ df[answer_column] = (
+ df[answer_column].fillna("").apply(lambda x: x[0] if x != "" else x)
+ )
+
+ if (
+ hasattr(cfg.dataset, "rejected_prompt_column")
+ and cfg.dataset.rejected_prompt_column != "None"
+ ):
+ df[cfg.dataset.rejected_prompt_column] = df[prompt_column]
+
+ return df
+
+
+def get_data(cfg: DefaultConfigProblemBase) -> Tuple[pd.DataFrame, pd.DataFrame]:
+ """Prepares train and validation DataFrames.
+
+ Args:
+ cfg: DefaultConfigProblemBase input config
+
+ Returns:
+ Train and validation DataFrames
+ """
+
+ train_df, val_df = load_train_valid_data(cfg)
+
+ if (
+ hasattr(cfg.prediction, "metric_gpt_template")
+ and cfg.prediction.metric_gpt_template == "mt-bench"
+ ):
+ logger.info(
+ "Overwriting validation data with MT-BENCH data. Please note that "
+ "respective metric is an approximation and might not fully match "
+ "the original implementation."
+ )
+ val_df = load_mt_bench_data(cfg)
+
+ if cfg.dataset.data_sample < 1.0:
+ if "Train" in cfg.dataset.data_sample_choice:
+ train_df = sample_data(cfg, train_df)
+ if "Validation" in cfg.dataset.data_sample_choice:
+ val_df = sample_data(cfg, val_df)
+
+ if cfg.training.train_validation_data:
+ train_df = pd.concat([train_df, val_df], axis=0)
+
+ train_df = cfg.dataset.dataset_class.preprocess_dataframe(train_df, cfg)
+ val_df = cfg.dataset.dataset_class.preprocess_dataframe(val_df, cfg)
+
+ return train_df.reset_index(drop=True), val_df.reset_index(drop=True)
+
+
+def merge_on_common_items(lst):
+ G = nx.Graph()
+ for sublst in lst:
+ for item in sublst:
+ G.add_edge(sublst[0], item)
+ return [list(c) for c in nx.connected_components(G)]
+
+
+def load_train_valid_data(cfg) -> Tuple[pd.DataFrame, pd.DataFrame]:
+ if cfg.dataset.validation_strategy == "custom":
+ if cfg.dataset.validation_dataframe == "None":
+ raise LLMDataException(
+ "No validation dataframe provided. "
+ "Please provide a validation dataframe or "
+ "choose a different validation strategy."
+ )
+ train_df = read_dataframe_drop_missing_labels(cfg.dataset.train_dataframe, cfg)
+ val_df = read_dataframe_drop_missing_labels(
+ cfg.dataset.validation_dataframe, cfg
+ )
+ elif cfg.dataset.validation_strategy == "automatic":
+ logger.info("Setting up automatic validation split...")
+ df = read_dataframe_drop_missing_labels(cfg.dataset.train_dataframe, cfg)
+ if cfg.dataset.parent_id_column != "None":
+ # split based on conversation_chain_ids
+ # this ensures that all samples from the
+ # same conversation are in the same fold
+ with PatchedAttribute(cfg.dataset, "limit_chained_samples", True):
+ conversation_chain_ids = ConversationChainHandler(
+ df=df, cfg=cfg
+ ).conversation_chain_ids
+ # Some conversations may have the same parent id, e.g. for OASST
+ # 6aa548c6-65ad-4531-9411-76173ae060a3 and
+ # 2a164c2a-4f0e-45aa-8990-e7dd3b51c06b
+ # have the same parent a8df94e3-cfc7-4736-9587-0ec943d0fec3
+ # We need to merge those into a single group
+ conversation_chain_ids = merge_on_common_items(conversation_chain_ids)
+ conversation_chain_labels = [
+ i
+ for i, conversation_chain_id in enumerate(conversation_chain_ids)
+ for _ in conversation_chain_id
+ ]
+ group_shuffle_split = GroupShuffleSplit(
+ test_size=cfg.dataset.validation_size, n_splits=1, random_state=1337
+ )
+ train_idx, val_idx = next(
+ group_shuffle_split.split(df, groups=conversation_chain_labels)
+ )
+ # flatten conversation_chain_ids
+ flattened_conversation_chain_ids = np.array(
+ [
+ idx
+ for conversation_chain_id in conversation_chain_ids
+ for idx in conversation_chain_id
+ ]
+ )
+ train_df = df.iloc[flattened_conversation_chain_ids[train_idx]].reset_index(
+ drop=True
+ )
+ val_df = df.iloc[flattened_conversation_chain_ids[val_idx]].reset_index(
+ drop=True
+ )
+ else:
+ train_df, val_df = train_test_split(
+ df, test_size=cfg.dataset.validation_size, random_state=1337
+ )
+ else:
+ raise LLMDataException("No valid validation strategy provided.")
+ return train_df, val_df
+
+
+def worker_init_fn(worker_id: int) -> None:
+ """Sets the random seed for each worker.
+
+ Args:
+ worker_id: ID of the corresponding worker
+ """
+
+ if "PYTHONHASHSEED" in os.environ:
+ seed = int(os.environ["PYTHONHASHSEED"]) + worker_id
+ else:
+ seed = np.random.get_state()[1][0] + worker_id # type: ignore
+ set_seed(seed)
+
+
+def get_train_dataset(train_df: pd.DataFrame, cfg: DefaultConfigProblemBase) -> Dataset:
+ """Prepares train Dataset.
+
+ Args:
+ train_df: train DataFrame
+ cfg: DefaultConfigProblemBase input config
+
+ Returns:
+ Train Dataset
+ """
+
+ logger.info("Loading train dataset...")
+
+ train_dataset: Dataset = cfg.dataset.dataset_class(
+ df=train_df, cfg=cfg, mode="train"
+ )
+ return train_dataset
+
+
+def get_train_dataloader(train_ds: Any, cfg: DefaultConfigProblemBase) -> DataLoader:
+ """Prepares train DataLoader.
+
+ Args:
+ train_ds: train Dataset
+ cfg: input config
+
+ Returns:
+ Train Dataloader
+ """
+
+ sampler: Sampler
+ if cfg.environment._distributed:
+ sampler = torch.utils.data.distributed.DistributedSampler(
+ train_ds,
+ num_replicas=cfg.environment._world_size,
+ rank=cfg.environment._local_rank,
+ shuffle=True,
+ seed=cfg.environment._seed,
+ drop_last=True,
+ )
+ sampler_length = len(sampler)
+ else:
+ sampler = None
+ sampler_length = len(train_ds)
+
+ if sampler_length < cfg.training.batch_size and cfg.training.drop_last_batch:
+ logger.warning(
+ "Training data too small when dropping last batch. Number of rows "
+ "should be at least batch size multiplied by number of gpus. "
+ "Forcing to keep last batch."
+ )
+ cfg.training.drop_last_batch = False
+ if sampler_length <= 1:
+ raise LLMDataException("Data too small to train model.")
+
+ train_dataloader = DataLoader(
+ train_ds,
+ sampler=sampler,
+ shuffle=(sampler is None),
+ batch_size=cfg.training.batch_size,
+ num_workers=cfg.environment.number_of_workers,
+ pin_memory=True,
+ collate_fn=train_ds.get_train_collate_fn(),
+ drop_last=cfg.training.drop_last_batch,
+ worker_init_fn=worker_init_fn,
+ )
+
+ logger.info(f"Number of observations in train dataset: {len(train_ds)}")
+
+ return train_dataloader
+
+
+def get_val_dataset(val_df: pd.DataFrame, cfg: DefaultConfigProblemBase):
+ """Prepares validation Dataset.
+
+ Args:
+ val_df: validation DataFrame
+ cfg: DefaultConfigProblemBase input config
+
+ Returns:
+ Validation Dataset
+ """
+
+ logger.info("Loading validation dataset...")
+ val_dataset = cfg.dataset.dataset_class(df=val_df, cfg=cfg, mode="validation")
+
+ return val_dataset
+
+
+def get_val_dataloader(val_ds: Any, cfg: DefaultConfigProblemBase):
+ """Prepares validation DataLoader.
+
+ Args:
+ val_ds: validation Dataset
+ cfg: DefaultConfigProblemBase input config
+
+ Returns:
+ Validation Dataloader
+ """
+
+ sampler: Sampler
+ if cfg.environment._distributed and cfg.environment._distributed_inference:
+ sampler = OrderedDistributedSampler(
+ val_ds,
+ num_replicas=cfg.environment._world_size,
+ rank=cfg.environment._local_rank,
+ )
+ else:
+ sampler = SequentialSampler(val_ds)
+
+ batch_size = get_inference_batch_size(cfg)
+
+ val_dataloader = DataLoader(
+ val_ds,
+ sampler=sampler,
+ batch_size=batch_size,
+ num_workers=cfg.environment.number_of_workers,
+ pin_memory=True,
+ collate_fn=val_ds.get_validation_collate_fn(),
+ worker_init_fn=worker_init_fn,
+ )
+
+ logger.info(f"Number of observations in validation dataset: {len(val_ds)}")
+
+ return val_dataloader
+
+
+@no_type_check
+def cat_batches(
+ data: DefaultDict[str, Union[torch.Tensor, np.ndarray]]
+) -> DefaultDict[str, Union[torch.Tensor, np.ndarray]]:
+ """Concatenates output data from several batches
+
+ Args:
+ data: dict with keys and list of batch outputs
+
+ Returns:
+ Concatenated dict
+
+ """
+
+ for key, value in data.items():
+ if len(value[0].shape) == 0:
+ if isinstance(value[0], torch.Tensor):
+ data[key] = torch.stack(value)
+ else:
+ data[key] = np.stack(value)
+ else:
+ if isinstance(value[0], torch.Tensor):
+ data[key] = torch.cat(value, dim=0)
+ else:
+ data[key] = np.concatenate(value, axis=0)
+
+ return data
+
+
+class OrderedDistributedSampler(Sampler):
+ """
+ Sampler that restricts data loading to a subset of the dataset.
+ It is especially useful in conjunction with
+ :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
+ process can pass a DistributedSampler instance as a DataLoader sampler,
+ and load a subset of the original dataset that is exclusive to it.
+ Source:
+ https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/distributed_sampler.py
+ """
+
+ def __init__(
+ self,
+ dataset: Any,
+ num_replicas: Optional[int] = None,
+ rank: Optional[int] = None,
+ ):
+ """
+ Args:
+ dataset: Dataset used for sampling
+ num_replicas: Number of processes participating in distributed training
+ rank: Rank of the current process within num_replicas
+ """
+
+ if num_replicas is None:
+ if not dist.is_available():
+ raise RuntimeError("Requires distributed package to be available")
+ num_replicas = dist.get_world_size()
+ if rank is None:
+ if not dist.is_available():
+ raise RuntimeError("Requires distributed package to be available")
+ rank = dist.get_rank()
+ self.dataset = dataset
+ self.num_replicas = num_replicas
+ self.rank = rank
+ self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
+ self.total_size = self.num_samples * self.num_replicas
+
+ def __iter__(self):
+ indices = list(range(len(self.dataset)))
+
+ # add extra samples to make it evenly divisible
+ indices += [0] * (self.total_size - len(indices))
+ assert len(indices) == self.total_size
+
+ # subsample
+ indices = indices[
+ self.rank * self.num_samples : self.rank * self.num_samples
+ + self.num_samples
+ ]
+ assert len(indices) == self.num_samples
+
+ return iter(indices)
+
+ def __len__(self):
+ return self.num_samples
+
+
+def sample_indices(length: int, n_indices: int = 10, seed: int = 1337) -> np.ndarray:
+ """Samples random indices
+
+ Args:
+ length: length to sample from
+ n_indices: number of indices to sample
+ seed: seed for sampling
+
+ Returns:
+ sampled indices
+ """
+ state = np.random.get_state()
+ np.random.seed(seed)
+ idx = np.random.choice(
+ np.arange(length), size=min(length, n_indices), replace=False
+ )
+ np.random.set_state(state)
+
+ return idx
+
+
+def get_inference_batch_size(cfg: DefaultConfigProblemBase) -> int:
+ """Calculates inference batch size
+
+ Args:
+ cfg: DefaultConfigProblemBase config with all the hyperparameters
+ Returns:
+ Inference batch size
+ """
+
+ if cfg.prediction.batch_size_inference != 0:
+ return cfg.prediction.batch_size_inference
+ else:
+ return cfg.training.batch_size
+
+
+def sanity_check(cfg):
+ """
+ Perform sanity check on the data
+ """
+
+ df = read_dataframe_drop_missing_labels(cfg.dataset.train_dataframe, cfg)
+ cfg.dataset.dataset_class.sanity_check(df=df, cfg=cfg, mode="train")
+ valid_filename = cfg.dataset.validation_dataframe
+ if isinstance(valid_filename, str) and os.path.exists(valid_filename):
+ df = read_dataframe_drop_missing_labels(valid_filename, cfg)
+ cfg.dataset.dataset_class.sanity_check(df=df, cfg=cfg, mode="validation")
+
+
+def batch_padding(
+ cfg: DefaultConfigProblemBase,
+ batch: Dict,
+ training: bool = True,
+ mask_key: str = "attention_mask",
+ pad_keys: List[str] = ["input_ids", "attention_mask", "special_tokens_mask"],
+ padding_side: str = "left",
+) -> Dict:
+ """Pads a batch according to set quantile, or cuts it at maximum length"""
+ if cfg.environment.compile_model:
+ # logger.warning("Batch padding not functional with torch compile.")
+ return batch
+ elif batch[mask_key].sum() == 0:
+ # continued pretraining
+ return batch
+ elif cfg.tokenizer.padding_quantile == 0:
+ return batch
+ elif training and cfg.tokenizer.padding_quantile < 1.0:
+ if padding_side == "left":
+ lengths = torch.stack(
+ [
+ torch.where(batch[mask_key][i] == 1)[0].min()
+ for i in range(batch[mask_key].size(0))
+ ]
+ ).float()
+ quantile = 1 - cfg.tokenizer.padding_quantile
+ else:
+ lengths = torch.stack(
+ [
+ torch.where(batch[mask_key][i] == 1)[0].max()
+ for i in range(batch[mask_key].size(0))
+ ]
+ ).float()
+ quantile = cfg.tokenizer.padding_quantile
+ if cfg.environment._distributed:
+ lengths = sync_across_processes(
+ lengths, cfg.environment._world_size
+ ) # type: ignore
+ idx = int(torch.floor(torch.quantile(lengths, quantile)))
+ else:
+ if padding_side == "left":
+ idx = int(torch.where(batch[mask_key] == 1)[1].min())
+ else:
+ idx = int(torch.where(batch[mask_key] == 1)[1].max())
+
+ if padding_side == "left":
+ for key in pad_keys:
+ if key in batch:
+ batch[key] = batch[key][:, idx:].contiguous()
+ else:
+ idx += 1
+ for key in pad_keys:
+ if key in batch:
+ batch[key] = batch[key][:, :idx].contiguous()
+
+ return batch
diff --git a/llm_studio/src/utils/exceptions.py b/llm_studio/src/utils/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a8b6c91f00cda274b5b3998688bd00f232f74d3
--- /dev/null
+++ b/llm_studio/src/utils/exceptions.py
@@ -0,0 +1,22 @@
+class LLMDataException(Exception):
+ pass
+
+
+class LLMModelException(Exception):
+ pass
+
+
+class LLMAugmentationsException(Exception):
+ pass
+
+
+class LLMMetricException(Exception):
+ pass
+
+
+class LLMTrainingException(Exception):
+ pass
+
+
+class LLMResourceException(Exception):
+ pass
diff --git a/llm_studio/src/utils/export_utils.py b/llm_studio/src/utils/export_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f890dd99ef2ea219465b82f27b32de8df0ead22b
--- /dev/null
+++ b/llm_studio/src/utils/export_utils.py
@@ -0,0 +1,171 @@
+import json
+import logging
+import os
+import zipfile
+from typing import Optional
+
+import pandas as pd
+
+from llm_studio.src.utils.exceptions import LLMResourceException
+from llm_studio.src.utils.utils import add_file_to_zip
+
+
+def get_artifact_path_path(
+ experiment_name: str, experiment_path: str, artifact_type: str
+):
+ """Get path to experiment artifact zipfile
+
+ Args:
+ experiment_name: name of the experiment
+ experiment_path: path containing experiment related files
+ artifact_type: type of the artifact
+
+ Returns:
+ Path to the zip file with experiment artifact
+ """
+
+ return os.path.join(experiment_path, f"{artifact_type}_{experiment_name}.zip")
+
+
+def get_prediction_dataframe(experiment_path: str):
+ """Return the validation dataframe"""
+ return pd.read_csv(f"{experiment_path}/validation_predictions.csv")
+
+
+def get_predictions_path(experiment_name: str, experiment_path: str):
+ """Get path to experiment predictions"""
+
+ return get_artifact_path_path(experiment_name, experiment_path, "preds")
+
+
+def get_logs_path(experiment_name: str, experiment_path: str):
+ """Get path to experiment logs"""
+
+ return get_artifact_path_path(experiment_name, experiment_path, "logs")
+
+
+def get_model_path(experiment_name: str, experiment_path: str):
+ """Get path to experiment model"""
+
+ return get_artifact_path_path(experiment_name, experiment_path, "model")
+
+
+def get_adapter_model_path(experiment_name: str, experiment_path: str):
+ """Get path to adapter model"""
+
+ return get_artifact_path_path(experiment_name, experiment_path, "adapter_model")
+
+
+def check_available_space(output_folder: str, min_disk_space: Optional[float]):
+ if not min_disk_space:
+ return True
+
+ stats = os.statvfs(output_folder)
+ available_size = stats.f_frsize * stats.f_bavail
+
+ if available_size < min_disk_space:
+ error = (
+ f"Not enough disk space. Available space is {get_size_str(available_size)}."
+ f" Required space is {get_size_str(min_disk_space)}."
+ )
+ raise LLMResourceException(error)
+
+
+def save_prediction_outputs(
+ experiment_name: str,
+ experiment_path: str,
+):
+ """Save experiment prediction
+
+ Args:
+ experiment_name: name of the experiment
+ experiment_path: path containing experiment related files
+
+ Returns:
+ Path to the zip file with experiment predictions
+ """
+
+ zip_path = get_predictions_path(experiment_name, experiment_path)
+ zf = zipfile.ZipFile(zip_path, "w")
+
+ add_file_to_zip(zf=zf, path=f"{experiment_path}/validation_raw_predictions.pkl")
+ add_file_to_zip(zf=zf, path=f"{experiment_path}/validation_predictions.csv")
+
+ zf.close()
+ return zip_path
+
+
+def save_logs(experiment_name: str, experiment_path: str, logs: dict):
+ """Save experiment logs
+
+ Args:
+ experiment_name: name of the experiment
+ experiment_path: path containing experiment related files
+ logs: dictionary with experiment charts
+
+ Returns:
+ Path to the zip file with experiment logs
+ """
+
+ cfg_path = os.path.join(experiment_path, "cfg.yaml")
+ charts_path = f"{experiment_path}/charts_{experiment_name}.json"
+ with open(charts_path, "w") as fp:
+ json.dump(
+ {k: v for k, v in logs.items() if k in ["meta", "train", "validation"]}, fp
+ )
+
+ zip_path = get_logs_path(experiment_name, experiment_path)
+ zf = zipfile.ZipFile(zip_path, "w")
+ zf.write(charts_path, os.path.basename(charts_path))
+ zf.write(cfg_path, f"cfg_{experiment_name}.yaml")
+
+ try:
+ zf.write(
+ f"{experiment_path}/logs.log",
+ f"logs_{experiment_name}.log",
+ )
+ except FileNotFoundError:
+ logging.warning("Log file is not available yet.")
+
+ zf.close()
+
+ return zip_path
+
+
+def get_size_str(
+ x, sig_figs=2, input_unit="B", output_unit="dynamic", show_unit=True
+) -> str:
+ """
+ Convert a small input unit such as bytes to human readable format.
+
+ Args:
+ x: input value
+ sig_figs: number of significant figures
+ input_unit: input unit ("B", "KB", "MB", "GB", "TB"), default "B"
+ output_unit: output unit ("B", "KB", "MB", "GB", "TB", "dynamic")
+ default "dynamic"
+ show_unit: whether to show the unit in the output string
+
+ Returns:
+ str: Human readable string
+ """
+
+ names = ["B", "KB", "MB", "GB", "TB"]
+ names = names[names.index(input_unit) :]
+
+ act_i = 0
+ if output_unit == "dynamic":
+ while x >= 1024 and act_i < len(names) - 1:
+ x /= 1024
+ act_i += 1
+ else:
+ target = names.index(output_unit)
+ while act_i < target:
+ x /= 1024
+ act_i += 1
+
+ ret_str = f"{str(round(x, sig_figs))}"
+ if show_unit:
+ ret_str += f" {names[act_i]}"
+
+ return ret_str
diff --git a/llm_studio/src/utils/gpu_utils.py b/llm_studio/src/utils/gpu_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a5ac86d33363de950fdd81e9490d8635f25edd3
--- /dev/null
+++ b/llm_studio/src/utils/gpu_utils.py
@@ -0,0 +1,79 @@
+from typing import Any
+
+import numpy as np
+import torch
+import torch.distributed as dist
+
+
+def sync_across_processes(
+ t: torch.Tensor | np.ndarray, world_size: int, group: Any = None
+) -> torch.Tensor | np.ndarray:
+ """Concatenates tensors across processes.
+
+ Args:
+ t: input tensor or numpy array
+ world_size: world size
+ group (ProcessGroup, optional): The process group to work on
+
+ Returns:
+ Tensor or numpy array concatenated across all processes
+ """
+
+ dist.barrier()
+ ret: torch.Tensor | np.ndarray
+
+ if isinstance(t, torch.Tensor):
+ gather_t_tensor = [torch.ones_like(t) for _ in range(world_size)]
+
+ if t.is_cuda:
+ dist.all_gather(gather_t_tensor, t)
+ else:
+ dist.all_gather_object(gather_t_tensor, t, group=group)
+
+ ret = torch.cat(gather_t_tensor)
+ elif isinstance(t, np.ndarray):
+ gather_t_array = [np.ones_like(t) for _ in range(world_size)]
+ dist.all_gather_object(gather_t_array, t, group=group)
+ ret = np.concatenate(gather_t_array)
+ else:
+ raise ValueError(f"Can't synchronize {type(t)}.")
+
+ return ret
+
+
+# based on https://github.com/BlackHC/toma/blob/master/toma/torch_cuda_memory.py
+def is_cuda_out_of_memory(exception: BaseException) -> bool:
+ return (
+ isinstance(exception, RuntimeError)
+ and len(exception.args) == 1
+ and "CUDA" in exception.args[0]
+ and "out of memory" in exception.args[0]
+ )
+
+
+# based on https://github.com/BlackHC/toma/blob/master/toma/cpu_memory.py
+def is_out_of_cpu_memory(exception: BaseException) -> bool:
+ return (
+ isinstance(exception, RuntimeError)
+ and len(exception.args) == 1
+ and "DefaultCPUAllocator: can't allocate memory" in exception.args[0]
+ )
+
+
+# based on https://github.com/BlackHC/toma/blob/master/toma/torch_cuda_memory.py
+def is_cudnn_snafu(exception: BaseException) -> bool:
+ # For/because of https://github.com/pytorch/pytorch/issues/4107
+ return (
+ isinstance(exception, RuntimeError)
+ and len(exception.args) == 1
+ and "cuDNN error: CUDNN_STATUS_NOT_SUPPORTED." in exception.args[0]
+ )
+
+
+# based on https://github.com/BlackHC/toma/blob/master/toma/torch_cuda_memory.py
+def is_oom_error(exception: BaseException) -> bool:
+ return (
+ is_cuda_out_of_memory(exception)
+ or is_cudnn_snafu(exception)
+ or is_out_of_cpu_memory(exception)
+ )
diff --git a/llm_studio/src/utils/logging_utils.py b/llm_studio/src/utils/logging_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..32dd31211afbeb697cafe4c851490eb7b443febd
--- /dev/null
+++ b/llm_studio/src/utils/logging_utils.py
@@ -0,0 +1,136 @@
+import io
+import json
+import logging
+import os
+import re
+from typing import Any, Optional
+
+from llm_studio.src.utils.plot_utils import PlotData
+
+
+class IgnorePatchRequestsFilter(logging.Filter):
+ """Filter to ignore log entries containing "HTTP Request: PATCH".
+
+ This filter is used to prevent cluttering the logs with PATCH requests,
+ which are often generated by the h2o-wave application and not relevant for
+ most logging purposes.
+ """
+
+ def filter(self, record):
+ log_message = record.getMessage()
+ if re.search(r"HTTP Request: PATCH", log_message):
+ return False # Ignore the log entry
+ return True # Include the log entry
+
+
+def initialize_logging(cfg: Optional[Any] = None):
+ """
+ Initialize logging for the application and for each experiment.
+
+ Console logging is enabled when running in the app context and when running an
+ experiment on rank 0 (unless `cfg.logging.log_all_ranks` is set to True, in which
+ case all ranks will log to the console).
+
+ Experiment file logging is enabled for rank 0 or all ranks if
+ `cfg.logging.log_all_ranks` is set to True.
+
+ For the app logging the log file is created in the llm_studio_workdir with the name
+ 'h2o_llmstudio.log'.
+
+ """
+ if cfg is not None and cfg.logging.log_all_ranks:
+ format = "%(asctime)s - PID %(process)d - %(levelname)s: %(message)s"
+ else:
+ format = "%(asctime)s - %(levelname)s: %(message)s"
+ formatter = logging.Formatter(format)
+
+ # Suppress sqlitedict logs (charts.db)
+ logging.getLogger("sqlitedict").setLevel(logging.ERROR)
+
+ actual_logger = logging.root
+ actual_logger.setLevel(logging.INFO)
+
+ # Only log to console when in the app context or experiment output from rank 0
+ # Or user choses to log from all ranks explicitly
+ if (cfg is None) or (cfg.environment._local_rank == 0) or cfg.logging.log_all_ranks:
+ console_handler = logging.StreamHandler()
+ console_handler.setFormatter(formatter)
+ console_handler.addFilter(IgnorePatchRequestsFilter())
+ actual_logger.addHandler(console_handler)
+
+ file_handler: logging.FileHandler | None = None
+ if cfg is not None:
+ if (cfg.environment._local_rank == 0) or cfg.logging.log_all_ranks:
+ logs_dir = f"{cfg.output_directory}/"
+ os.makedirs(logs_dir, exist_ok=True)
+ file_handler = logging.FileHandler(filename=f"{logs_dir}/logs.log")
+ else:
+ try:
+ file_handler = logging.FileHandler(filename="h2o_llmstudio.log")
+ except PermissionError:
+ file_handler = None
+
+ if file_handler is not None:
+ file_handler.addFilter(IgnorePatchRequestsFilter())
+ file_formatter = logging.Formatter(format)
+ file_handler.setFormatter(file_formatter)
+ actual_logger.addHandler(file_handler)
+
+
+class TqdmToLogger(io.StringIO):
+ """
+ Outputs stream for TQDM.
+ It will output to logger module instead of the StdOut.
+ """
+
+ logger: logging.Logger = None
+ level: int = None
+ buf = ""
+
+ def __init__(self, logger, level=None):
+ super(TqdmToLogger, self).__init__()
+ self.logger = logger
+ self.level = level or logging.INFO
+
+ def write(self, buf):
+ self.buf = buf.strip("\r\n\t [A")
+
+ def flush(self):
+ if self.buf != "":
+ try:
+ self.logger.log(self.level, self.buf)
+ except NameError:
+ pass
+
+
+def write_flag(path: str, key: str, value: str):
+ """Writes a new flag
+
+ Args:
+ path: path to flag json
+ key: key of the flag
+ value: values of the flag
+ """
+
+ if os.path.exists(path):
+ with open(path, "r+") as file:
+ flags = json.load(file)
+ else:
+ flags = {}
+
+ flags[key] = value
+
+ with open(path, "w+") as file:
+ json.dump(flags, file)
+
+
+def log_plot(cfg: Any, plot: PlotData, type: str) -> None:
+ """Logs a given plot
+
+ Args:
+ cfg: cfg
+ plot: plot to log
+ type: type of the plot
+
+ """
+ cfg.logging._logger.log(plot.encoding, type, plot.data)
diff --git a/llm_studio/src/utils/modeling_utils.py b/llm_studio/src/utils/modeling_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2fa0bd667e80779ff453f9502ef581bf467a007
--- /dev/null
+++ b/llm_studio/src/utils/modeling_utils.py
@@ -0,0 +1,1173 @@
+import gc
+import logging
+import os
+import re
+import shutil
+from collections import OrderedDict
+from typing import Any, Dict, Optional
+
+import coolname
+import deepspeed
+import numpy as np
+import torch
+import transformers
+from deepspeed.runtime.dataloader import DeepSpeedDataLoader
+from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
+from peft import LoraConfig, PeftModel, get_peft_model
+from torch.cuda.amp import autocast
+from torch.nn.parallel import DistributedDataParallel
+from tqdm import tqdm
+from transformers import (
+ AutoConfig,
+ AutoModel,
+ BitsAndBytesConfig,
+ GenerationMixin,
+ StoppingCriteria,
+ StoppingCriteriaList,
+)
+from transformers.pytorch_utils import Conv1D as Conv1DTransformer
+from transformers.utils import logging as transformers_logging
+
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.src.datasets.text_utils import get_tokenizer
+from llm_studio.src.optimizers import Optimizers
+from llm_studio.src.schedulers import Schedulers
+from llm_studio.src.utils.config_utils import NON_GENERATION_PROBLEM_TYPES
+from llm_studio.src.utils.data_utils import (
+ OrderedDistributedSampler,
+ batch_padding,
+ cat_batches,
+ get_inference_batch_size,
+)
+from llm_studio.src.utils.exceptions import LLMDataException, LLMModelException
+from llm_studio.src.utils.logging_utils import TqdmToLogger
+from llm_studio.src.utils.utils import save_pickle
+
+logger = logging.getLogger(__name__)
+
+
+def unwrap_model(model: torch.nn.Module) -> torch.nn.Module:
+ """
+ Unwraps a model from its parallel wrapper (DistributedDataParallel or DataParallel).
+
+ Args:
+ model (torch.nn.Module): The model to unwrap.
+
+ Returns:
+ torch.nn.Module: The unwrapped model.
+ """
+ options = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
+
+ while isinstance(model, options):
+ model = model.module
+
+ return model
+
+
+def check_disk_space(model: torch.nn.Module, path: str) -> None:
+ """
+ Checks if there's enough disk space to save the model weights.
+
+ Args:
+ model (torch.nn.Module): The model whose weights will be saved.
+ path (str): The path where the model will be saved.
+
+ Raises:
+ ValueError: If there's not enough disk space to save the model weights.
+ """
+ _, _, free = shutil.disk_usage(path)
+
+ model_size_in_bytes = 0
+ for param in model.parameters():
+ n_params = param.ds_numel if hasattr(param, "ds_numel") else param.numel()
+ if param.data.dtype in [torch.int8, torch.uint8]:
+ model_size_in_bytes += n_params * 1
+ elif param.data.dtype in [torch.float16, torch.bfloat16]:
+ model_size_in_bytes += n_params * 2
+ elif param.data.dtype == torch.float32:
+ model_size_in_bytes += n_params * 4
+ else:
+ # If the data type is not supported, calculate it as float32.
+ model_size_in_bytes += n_params * 4
+ logger.warning(f"Unsupported data type: {param.data.dtype}")
+
+ if model_size_in_bytes * 1.03 < free: # leave a 3% margin here.
+ logger.info(
+ "Enough space available for saving model weights."
+ f"Required space: {model_size_in_bytes * 1.03 / (1024 * 1024):.2f}MB, "
+ f"Available space: {free / (1024 * 1024):.2f}MB."
+ )
+ else:
+ raise ValueError(
+ f"Not enough space available for saving model weights. "
+ f"Required space: {model_size_in_bytes * 1.03 / (1024 * 1024):.2f}MB, "
+ f"Available space: {free / (1024 * 1024):.2f}MB."
+ )
+
+
+# TODO: currently not saving optimizer
+def save_checkpoint(
+ model: torch.nn.Module, path: str, cfg: DefaultConfigProblemBase
+) -> None:
+ """
+ Saves a model checkpoint.
+
+ Args:
+ model (torch.nn.Module): The model to save.
+ path (str): The path where to save the checkpoint.
+ cfg (DefaultConfigProblemBase): The configuration object.
+
+ Raises:
+ ValueError: If the path is not provided.
+ """
+
+ if not path:
+ raise ValueError(f"Path must be provided. Received {path}.")
+
+ if not os.path.exists(path):
+ os.makedirs(path)
+
+ if cfg.environment.use_deepspeed:
+ # gather model params from all ranks when using Deepspeed
+ status = model.save_16bit_model(path, "checkpoint.pth")
+ if status:
+ if cfg.environment._local_rank == 0:
+ checkpoint = {
+ "model": torch.load(
+ os.path.join(path, "checkpoint.pth"), map_location="cpu"
+ )
+ }
+ else:
+ logger.warning(
+ "deepspeed.save_16bit_model didn't save the model, since"
+ " stage3_gather_16bit_weights_on_model_save=False."
+ " Saving the full checkpoint instead"
+ )
+ model.save_checkpoint(os.path.join(path, "ds_checkpoint"))
+ if cfg.environment._local_rank == 0:
+ # load to cpu
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(
+ os.path.join(path, "ds_checkpoint")
+ )
+ # save as normal checkpoint that can be loaded by `load_state_dict`
+ checkpoint = {"model": state_dict}
+ torch.save(checkpoint, os.path.join(path, "checkpoint.pth"))
+ shutil.rmtree(os.path.join(path, "ds_checkpoint"))
+
+ else:
+ if cfg.environment._local_rank == 0:
+ model = unwrap_model(model)
+ checkpoint = {"model": model.state_dict()}
+ torch.save(checkpoint, os.path.join(path, "checkpoint.pth"))
+ if cfg.training.lora and len(cfg.training.lora_unfreeze_layers) == 0:
+ model.backbone.save_pretrained(os.path.join(path, "adapter_model"))
+
+ if (
+ cfg.environment._local_rank == 0
+ and "classification_head.weight" in checkpoint["model"]
+ ):
+ torch.save(
+ checkpoint["model"]["classification_head.weight"],
+ os.path.join(path, "classification_head.pth"),
+ )
+ if (
+ cfg.environment._local_rank == 0
+ and "regression_head.weight" in checkpoint["model"]
+ ):
+ torch.save(
+ checkpoint["model"]["regression_head.weight"],
+ os.path.join(path, "regression_head.pth"),
+ )
+
+
+def _load_model_weights(
+ model: torch.nn.Module,
+ model_weights: Dict,
+ strict: bool,
+ cfg: DefaultConfigProblemBase,
+):
+ orig_num_items = len(model_weights)
+ model_state_dict = model.state_dict()
+
+ # needed to load models trained in int4/int8 with other dtypes
+ model_weights = {
+ k: (
+ v
+ if not (
+ cfg.architecture.backbone_dtype not in ("int4", "int8")
+ and (v.dtype is torch.int8 or v.dtype is torch.uint8)
+ )
+ else model_state_dict[k]
+ )
+ for k, v in model_weights.items()
+ if not (
+ ("SCB" in k or "weight_format" in k or "quant_state" in k)
+ and cfg.architecture.backbone_dtype not in ("int4", "int8")
+ )
+ }
+
+ # Need to ignore int4/int8 weights so undo strict loading requirement
+ if len(model_weights) != orig_num_items:
+ strict = False
+
+ model_weights = {re.sub(r"^module\.", "", k): v for k, v in model_weights.items()}
+ model_weights = {k.replace("_orig_mod.", ""): v for k, v in model_weights.items()}
+
+ # manual fix for int8 weights
+ if cfg.architecture.backbone_dtype == "int8":
+ model_weights = {
+ k: v.to(cfg.environment._device) if "weight_format" not in k else v
+ for k, v in model_weights.items()
+ }
+
+ try:
+ model.load_state_dict(OrderedDict(model_weights), strict=True)
+ except Exception as e:
+ if strict:
+ raise e
+ else:
+ logger.warning(
+ "Only a part of the pretrained weights was loaded. "
+ "Some layers can't be initialized with pretrained "
+ f"weights: {e}"
+ )
+
+ for layer_name in re.findall("size mismatch for (.*?):", str(e)):
+ model_weights.pop(layer_name, None)
+ model.load_state_dict(OrderedDict(model_weights), strict=False)
+ return model
+
+
+def load_checkpoint(
+ cfg: DefaultConfigProblemBase,
+ model: torch.nn.Module,
+ strict: bool = True,
+ weights_path: Optional[str] = None,
+):
+ """Load checkpoint
+
+ Args:
+ cfg: config file
+ model: model to load weights to
+ strict: whether to apply strict matching for weights
+ weights_path: custom path to the weights.
+ If None, cfg.architecture.pretrained_weights is used
+ Returns:
+ epoch: current epoch
+ """
+
+ if weights_path is None:
+ weights_path = cfg.architecture.pretrained_weights
+
+ model_weights = torch.load(weights_path, map_location="cpu")
+ if "model" in model_weights.keys():
+ model_weights = model_weights["model"]
+
+ if cfg.environment.use_deepspeed:
+ if cfg.training.lora:
+ model.backbone.base_model.model = _load_model_weights( # type: ignore
+ model.backbone.base_model.model, # type: ignore
+ model_weights,
+ strict,
+ cfg,
+ )
+ else:
+ model.backbone = _load_model_weights(
+ model.backbone, model_weights, strict, cfg # type: ignore
+ )
+ else:
+ model = _load_model_weights(model, model_weights, strict, cfg)
+
+ del model_weights
+ gc.collect()
+
+ logger.info(f"Weights loaded from: {weights_path}")
+
+
+def get_ds_config(cfg: DefaultConfigProblemBase):
+ ds_config = {
+ "fp16": {
+ "enabled": True if cfg.architecture.backbone_dtype == "float16" else False,
+ "loss_scale_window": 100,
+ },
+ "bf16": {
+ "enabled": True if cfg.architecture.backbone_dtype == "bfloat16" else False,
+ "loss_scale_window": 100,
+ },
+ # https://www.deepspeed.ai/docs/config-json/#zero-optimizations-for-fp16-training
+ "zero_force_ds_cpu_optimizer": False,
+ "zero_optimization": {
+ "overlap_comm": True,
+ "contiguous_gradients": True,
+ "reduce_bucket_size": cfg.environment.deepspeed_reduce_bucket_size,
+ # zero3 offload cpu
+ # "stage3_max_live_parameters": cfg.environment.deepspeed_stage3_max_live_parameters, # noqa: E501
+ # "stage3_max_reuse_distance": cfg.environment.deepspeed_stage3_max_reuse_distance, # noqa: E501
+ # zero++
+ # "reduce_scatter": True,
+ # "zero_quantized_weights": True,
+ # "zero_hpz_partition_size": 16,
+ # "zero_quantized_gradients": True,
+ },
+ "steps_per_print": 2000,
+ "train_micro_batch_size_per_gpu": cfg.training.batch_size,
+ "gradient_accumulation_steps": cfg.training.grad_accumulation,
+ "wall_clock_breakdown": False,
+ }
+
+ if cfg.environment.deepspeed_method == "ZeRO2":
+ ds_config["zero_optimization"].update(
+ {
+ "stage": 2,
+ "allgather_partitions": True,
+ "allgather_bucket_size": cfg.environment.deepspeed_allgather_bucket_size, # noqa: E501
+ }
+ )
+ elif cfg.environment.deepspeed_method == "ZeRO3":
+ ds_config["zero_optimization"].update(
+ {
+ "stage": 3,
+ "stage3_prefetch_bucket_size": cfg.environment.deepspeed_stage3_prefetch_bucket_size, # noqa: E501
+ "stage3_param_persistence_threshold": cfg.environment.deepspeed_stage3_param_persistence_threshold, # noqa: E501
+ "stage3_gather_16bit_weights_on_model_save": True,
+ }
+ )
+
+ # TODO: Do not enable offload cpu for now.
+ # if cfg.environment.deepspeed_offload_optimizer:
+ # ds_config["zero_optimization"]["offload_optimizer"] = {
+ # "device": "cpu",
+ # "pin_memory": True,
+ # }
+ # TODO: RuntimeError: Tensors must be CUDA and dense
+ # if cfg.environment.deepspeed_offload_param:
+ # ds_config["zero_optimization"]["offload_param"] =
+ # {"device": "cpu", "pin_memory": True}
+
+ logger.info(f"DeepSpeed config: {ds_config}")
+
+ return ds_config
+
+
+def wrap_model_distributed(
+ model: torch.nn.Module,
+ optimizer: torch.optim.Optimizer,
+ lr_scheduler: torch.optim.lr_scheduler._LRScheduler,
+ train_dataloader: torch.utils.data.DataLoader,
+ val_dataloader: torch.utils.data.DataLoader,
+ cfg: DefaultConfigProblemBase,
+):
+ if cfg.environment.use_deepspeed:
+ ds_config = get_ds_config(cfg)
+ if not cfg.training.lora:
+ ds_engine, optimizer, train_dataloader, lr_scheduler = deepspeed.initialize(
+ model=model.backbone,
+ optimizer=optimizer,
+ lr_scheduler=lr_scheduler,
+ training_data=train_dataloader.dataset,
+ config_params=ds_config,
+ )
+ model.backbone = ds_engine
+ else:
+ ds_engine, optimizer, train_dataloader, lr_scheduler = deepspeed.initialize(
+ model=model.backbone.base_model.model, # type: ignore
+ optimizer=optimizer,
+ lr_scheduler=lr_scheduler,
+ training_data=train_dataloader.dataset,
+ config_params=ds_config,
+ )
+ model.backbone.base_model.model = ds_engine # type: ignore
+ model.init_deepspeed() # type: ignore
+ val_dataloader = DeepSpeedDataLoader(
+ val_dataloader.dataset,
+ batch_size=val_dataloader.batch_size,
+ local_rank=cfg.environment._local_rank,
+ pin_memory=True,
+ tput_timer=None,
+ data_sampler=OrderedDistributedSampler(
+ val_dataloader.dataset,
+ num_replicas=cfg.environment._world_size,
+ rank=cfg.environment._local_rank,
+ ),
+ )
+ else:
+ find_unused_parameters = cfg.environment.find_unused_parameters
+ if getattr(cfg.architecture, "gradient_checkpointing", None):
+ find_unused_parameters = False
+ model = DistributedDataParallel(
+ model,
+ device_ids=[cfg.environment._local_rank],
+ find_unused_parameters=find_unused_parameters,
+ )
+
+ return model, optimizer, train_dataloader, val_dataloader, lr_scheduler
+
+
+def get_optimizer(
+ model: torch.nn.Module, cfg: DefaultConfigProblemBase
+) -> torch.optim.Optimizer:
+ """Prepares Optimizer.
+
+ Args:
+ model: model
+ cfg: input config
+
+ Returns:
+ Optimizer
+ """
+ no_decay = ["bias", "LayerNorm.weight"]
+ differential_layers = cfg.training.differential_learning_rate_layers
+ optimizer = Optimizers.get(cfg.training.optimizer)(
+ [
+ {
+ "params": [
+ param
+ for name, param in model.named_parameters()
+ if (not any(layer in name for layer in differential_layers))
+ and (not any(nd in name for nd in no_decay))
+ and param.requires_grad
+ ],
+ "lr": cfg.training.learning_rate,
+ "weight_decay": cfg.training.weight_decay,
+ },
+ {
+ "params": [
+ param
+ for name, param in model.named_parameters()
+ if (not any(layer in name for layer in differential_layers))
+ and (any(nd in name for nd in no_decay))
+ and param.requires_grad
+ ],
+ "lr": cfg.training.learning_rate,
+ "weight_decay": 0,
+ },
+ {
+ "params": [
+ param
+ for name, param in model.named_parameters()
+ if (any(layer in name for layer in differential_layers))
+ and (not any(nd in name for nd in no_decay))
+ and param.requires_grad
+ ],
+ "lr": cfg.training.differential_learning_rate,
+ "weight_decay": cfg.training.weight_decay,
+ },
+ {
+ "params": [
+ param
+ for name, param in model.named_parameters()
+ if (any(layer in name for layer in differential_layers))
+ and (any(nd in name for nd in no_decay))
+ and param.requires_grad
+ ],
+ "lr": cfg.training.differential_learning_rate,
+ "weight_decay": 0,
+ },
+ ],
+ lr=cfg.training.learning_rate,
+ weight_decay=cfg.training.weight_decay,
+ )
+
+ return optimizer
+
+
+def get_scheduler(
+ cfg: DefaultConfigProblemBase, optimizer: torch.optim.Optimizer, epoch_steps: int
+) -> torch.optim.lr_scheduler._LRScheduler:
+ """Prepares Learning Rate Scheduler.
+
+ Args:
+ cfg: input config
+ optimizer: model optimizer
+ epoch_steps: total number of weight updates during the epoch
+
+ Returns:
+ Learning Rate Scheduler
+ """
+
+ scheduler = Schedulers.get(cfg.training.schedule)(
+ optimizer=optimizer,
+ num_warmup_steps=cfg.training.warmup_epochs * epoch_steps,
+ num_training_steps=cfg.training.epochs * epoch_steps,
+ min_learning_rate_ratio=cfg.training.min_learning_rate_ratio,
+ )
+
+ return scheduler
+
+
+def generate_experiment_name() -> str:
+ """
+ Generates a random human-readable experiment name in kebab-case.
+
+ Returns:
+ The random name.
+ """
+ return coolname.generate_slug(2)
+
+
+def reduce_metric(output, reduce=None) -> float:
+ """Reduces metric and return metric score (number)
+
+ Args:
+ output: output of the model
+ reduce: how to reduce the metric over the sample dimension
+
+ Returns:
+ score: single number score (using config threshold for threshold metrics)
+ or non-reduced array of scores per sample.
+ """
+
+ if reduce == "mean":
+ score = np.mean(output["metrics"])
+ else:
+ raise NotImplementedError()
+
+ return score
+
+
+def get_number_of_validation_epochs(training_epochs: int, evaluation_epochs: float):
+ """
+ Given the number of training epochs and the number of epochs between model
+ evaluations, return the number of times the model is being evaluated during
+ training
+
+ Args:
+ training_epochs: The number of epochs to train for
+ evaluation_epochs: This is the number of epochs after which we want to
+ evaluate our model
+
+ Returns:
+ num_val_epochs: The number of epochs to be evaluated during training.
+ """
+ return training_epochs // evaluation_epochs
+
+
+def contains_nan(output: Dict):
+ return (
+ sum(
+ [
+ 1
+ for key, val in output.items()
+ if isinstance(val, torch.Tensor)
+ and torch.isnan(val.detach().cpu()).sum() > 0
+ ]
+ )
+ > 0
+ )
+
+
+def run_inference(
+ cfg: DefaultConfigProblemBase,
+ model: torch.nn.Module,
+ dataloader,
+ mode: str,
+) -> Dict[str, list]:
+ """Runs inference
+
+ Args:
+ cfg: config
+ model: model
+ dataloader: custom dataloader
+ mode: mode for inference
+
+ Returns:
+ Dictionary with output
+
+ """
+
+ # Store information for evaluation
+ out = dict()
+
+ logger.info(f"Starting {mode} inference")
+
+ tqdm_out = TqdmToLogger(logger, level=logging.INFO)
+ progress_bar = tqdm(
+ total=len(dataloader),
+ disable=cfg.environment._local_rank != 0,
+ file=tqdm_out,
+ ascii=True,
+ desc=f"{mode} progress",
+ mininterval=0,
+ )
+
+ log_update_steps = max(len(dataloader) // 20, 1)
+ inf_it = iter(dataloader)
+ for itr in range(len(dataloader)):
+ try:
+ data = next(inf_it)
+ except Exception:
+ raise LLMDataException("Data reading error. Skipping inference.")
+
+ val_batch_size = get_inference_batch_size(cfg)
+ cfg.environment._curr_val_step += val_batch_size * cfg.environment._world_size
+
+ batch = cfg.dataset.dataset_class.batch_to_device(data, cfg.environment._device)
+
+ if cfg.environment.use_deepspeed:
+ if (
+ cfg.prediction.metric != "Perplexity"
+ and cfg.problem_type not in NON_GENERATION_PROBLEM_TYPES
+ ):
+ output = {}
+ output["predicted_answer_ids"] = (
+ model.generate(batch, cfg).detach().cpu() # type: ignore
+ )
+ else:
+ output = model.forward(batch)
+ else:
+ with autocast(
+ enabled=cfg.environment.mixed_precision,
+ dtype=get_torch_dtype(cfg.environment.mixed_precision_dtype),
+ ):
+ if (
+ cfg.prediction.metric != "Perplexity"
+ and cfg.problem_type not in NON_GENERATION_PROBLEM_TYPES
+ ):
+ output = {}
+ output["predicted_answer_ids"] = (
+ unwrap_model(model).generate(batch, cfg).detach().cpu()
+ )
+ else:
+ output = model.forward(batch)
+ if contains_nan(output) and cfg.environment.mixed_precision:
+ raise LLMModelException(
+ "NaN caught during mixed precision inference. "
+ "Please disable mixed precision inference. "
+ "Alternatively, reducing learning rate or "
+ "gradient clipping may help to stabilize training."
+ )
+
+ output = dataloader.dataset.postprocess_batch_predictions(output=output)
+
+ if "predicted_answer_ids" in output.keys():
+ del output["predicted_answer_ids"]
+
+ for key, val in output.items():
+ if isinstance(val, torch.Tensor):
+ val = val.detach().cpu()
+
+ # DefaultDict is not used as it adds extra keys during pickle.dump
+ if key not in out:
+ out[key] = [val]
+ else:
+ out[key] += [val]
+
+ if cfg.environment._local_rank == 0:
+ # Show logs each 5% of the inference
+ if (itr + 1) % log_update_steps == 0 or itr == len(dataloader) - 1:
+ progress_bar.set_description(f"{mode} progress", refresh=False)
+ if (itr + 1) % log_update_steps == 0:
+ progress_bar.update(log_update_steps)
+ else:
+ progress_bar.update(len(dataloader) % log_update_steps)
+
+ cfg.logging._logger.log(
+ "internal",
+ "current_val_step",
+ cfg.environment._curr_val_step,
+ )
+
+ if cfg.environment._distributed:
+ torch.distributed.barrier()
+
+ progress_bar.close()
+ del progress_bar
+ out = cat_batches(out)
+
+ return out
+
+
+def save_predictions(
+ cfg: DefaultConfigProblemBase, val_data, val_dataloader, val_df, mode
+):
+ val_data, val_df = val_dataloader.dataset.format_output(
+ cfg=cfg, df=val_df, output=val_data
+ )
+ raw_preds_name = os.path.join(cfg.output_directory, f"{mode}_raw_predictions.pkl")
+ csv_preds_name = os.path.join(cfg.output_directory, f"{mode}_predictions.csv")
+ save_pickle(raw_preds_name, val_data)
+ val_df.to_csv(csv_preds_name, index=False)
+
+
+def update_backbone_config(config: Any, cfg: DefaultConfigProblemBase):
+ if hasattr(config, "hidden_dropout_prob"):
+ config.hidden_dropout_prob = cfg.architecture.intermediate_dropout
+ if hasattr(config, "attention_probs_dropout_prob"):
+ config.attention_probs_dropout_prob = cfg.architecture.intermediate_dropout
+ if (
+ not hasattr(config, "hidden_dropout_prob")
+ and not hasattr(config, "attention_probs_dropout_prob")
+ and cfg.architecture.intermediate_dropout > 0
+ ):
+ logger.warning(
+ "Model config does not have dropout attributes. "
+ f"Ignoring Intermediate Dropout = {cfg.architecture.intermediate_dropout}."
+ )
+ cfg.architecture.intermediate_dropout = 0
+
+ tokenizer = get_tokenizer(cfg)
+
+ if config.eos_token_id != tokenizer.eos_token_id:
+ logger.warning(
+ "EOS token id not matching between config and tokenizer. "
+ f"Overwriting {config.eos_token_id} with "
+ f"tokenizer id {tokenizer.eos_token_id}."
+ )
+ config.eos_token_id = tokenizer.eos_token_id
+ if config.pad_token_id != tokenizer.pad_token_id:
+ logger.warning(
+ "PAD token id not matching between config and tokenizer. "
+ f"Overwriting {config.pad_token_id} with "
+ f"tokenizer id {tokenizer.pad_token_id}."
+ )
+ config.pad_token_id = tokenizer.pad_token_id
+ # no warning needed as not used
+ if config.bos_token_id != tokenizer.bos_token_id:
+ config.bos_token_id = tokenizer.bos_token_id
+
+ if "mpt-" in cfg.llm_backbone:
+ config.init_device = cfg.environment._device
+
+ # See: https://github.com/huggingface/transformers/pull/24906
+ if hasattr(config, "pretraining_tp") and cfg.training.lora:
+ logger.info("Setting pretraining_tp of model config to 1.")
+ config.pretraining_tp = 1
+
+ return config
+
+
+def set_generation_config(backbone: torch.nn.Module, cfg_prediction: Any):
+ backbone.generation_config.min_new_tokens = cfg_prediction.min_length_inference
+ backbone.generation_config.max_new_tokens = cfg_prediction.max_length_inference
+ backbone.generation_config.max_time = (
+ cfg_prediction.max_time if cfg_prediction.max_time > 0 else None
+ )
+ backbone.generation_config.do_sample = cfg_prediction.do_sample
+ backbone.generation_config.num_beams = cfg_prediction.num_beams
+ backbone.generation_config.repetition_penalty = cfg_prediction.repetition_penalty
+ if cfg_prediction.do_sample:
+ backbone.generation_config.temperature = cfg_prediction.temperature
+ backbone.generation_config.top_k = cfg_prediction.top_k
+ backbone.generation_config.top_p = cfg_prediction.top_p
+ else:
+ backbone.generation_config.temperature = None
+ backbone.generation_config.top_k = None
+ backbone.generation_config.top_p = None
+
+ backbone.generation_config.transformers_version = transformers.__version__
+ return backbone
+
+
+def create_nlp_backbone(cfg: DefaultConfigProblemBase, model_class=AutoModel) -> Any:
+ """
+ Creates a backbone model for NLP tasks.
+ This is needed for Gradient Checkpointing in DDP mode.
+ """
+
+ kwargs: Dict[str, Any] = {}
+ if (
+ hasattr(cfg.training, "attention_implementation")
+ and cfg.training.attention_implementation != "auto"
+ ):
+ kwargs["attn_implementation"] = cfg.training.attention_implementation
+
+ config = AutoConfig.from_pretrained(
+ cfg.llm_backbone,
+ trust_remote_code=cfg.environment.trust_remote_code,
+ token=os.getenv("HF_TOKEN"),
+ revision=cfg.environment.huggingface_branch,
+ **kwargs,
+ )
+
+ config = update_backbone_config(config, cfg)
+ kwargs = dict()
+ kwargs["token"] = os.getenv("HF_TOKEN")
+
+ quantization_config = None
+ if cfg.architecture.backbone_dtype == "int8" and len(cfg.environment.gpus):
+ kwargs["device_map"] = {"": cfg.environment._device} # type: ignore
+ quantization_config = BitsAndBytesConfig(
+ load_in_8bit=True,
+ llm_int8_threshold=0.0,
+ )
+ # need to force pretrained
+ cfg.architecture.pretrained = True
+ kwargs["torch_dtype"] = torch.float16 # type: ignore
+ elif cfg.architecture.backbone_dtype == "int4" and len(cfg.environment.gpus):
+ kwargs["device_map"] = {"": cfg.environment._device} # type: ignore
+ quantization_config = BitsAndBytesConfig(
+ load_in_4bit=True,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_quant_type="nf4",
+ )
+ # need to force pretrained
+ cfg.architecture.pretrained = True
+ kwargs["torch_dtype"] = torch.float16 # type: ignore
+ elif len(cfg.environment.gpus) == 0 and cfg.architecture.backbone_dtype in [
+ "int4",
+ "int8",
+ ]:
+ logger.warning(
+ "Quantization is not supported on CPU. "
+ "Please run on GPU or disable quantization."
+ )
+ cfg.architecture.backbone_dtype = "float32"
+ else:
+ kwargs["torch_dtype"] = getattr(torch, cfg.architecture.backbone_dtype)
+
+ logger.info(f"Using {cfg.architecture.backbone_dtype} for backbone")
+
+ kwargs.update({"trust_remote_code": cfg.environment.trust_remote_code})
+
+ if cfg.architecture.pretrained:
+ logger.info(f"Loading {cfg.llm_backbone}. This may take a while.")
+
+ backbone = model_class.from_pretrained(
+ cfg.llm_backbone,
+ revision=cfg.environment.huggingface_branch,
+ config=config,
+ quantization_config=quantization_config,
+ **kwargs,
+ )
+ logger.info(f"Loaded {cfg.llm_backbone}.")
+ else:
+ kwargs.pop("token", None)
+ backbone = model_class.from_config(config, **kwargs)
+
+ if cfg.tokenizer._vocab_length > config.vocab_size:
+ logger.info(f"Resizing token embeddings to {cfg.tokenizer._vocab_length}")
+ backbone.resize_token_embeddings(cfg.tokenizer._vocab_length)
+
+ backbone.model_parallel = False
+
+ logger.info(
+ f"Attention implementation: {backbone.config._attn_implementation_internal}"
+ )
+
+ if cfg.training.lora:
+ # if used, gradient checkpointing will be enabled below
+ loaded_in_kbit = getattr(backbone, "is_loaded_in_8bit", False) or getattr(
+ backbone, "is_loaded_in_4bit", False
+ )
+
+ for _, param in backbone.named_parameters():
+ param.requires_grad = False
+
+ # cast all non INT8 parameters to fp32
+ if loaded_in_kbit:
+ for param in backbone.parameters():
+ if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16):
+ param.data = param.data.to(torch.float32)
+ else:
+ if cfg.architecture.backbone_dtype != "float32":
+ if cfg.environment.mixed_precision:
+ logger.info("Disabling mixed precision as dtype not set to float32.")
+ cfg.environment.mixed_precision = False
+ if cfg.architecture.backbone_dtype != "bfloat16":
+ logger.warning(
+ "Pure float16 or int8 training will "
+ "likely lead to unstable training without adapters."
+ )
+
+ for name, param in backbone.named_parameters():
+ # freeze base model's layers
+ if any(freeze_layer in name for freeze_layer in cfg.training.freeze_layers):
+ logger.info(f"Freezing layer: {name}")
+ param.requires_grad = False
+
+ if cfg.architecture.gradient_checkpointing:
+ backbone.gradient_checkpointing_enable(
+ gradient_checkpointing_kwargs={"use_reentrant": False}
+ )
+
+ # initialize the generation config
+ if backbone.generation_config.eos_token_id != config.eos_token_id:
+ logger.warning(
+ "EOS token id not matching between generation config and tokenizer. "
+ "Overwriting with tokenizer id."
+ )
+ backbone.generation_config.eos_token_id = config.eos_token_id
+ if backbone.generation_config.pad_token_id != config.pad_token_id:
+ logger.warning(
+ "PAD token id not matching between generation config and tokenizer. "
+ "Overwriting with tokenizer id."
+ )
+ backbone.generation_config.pad_token_id = config.pad_token_id
+ # no warning needed as not used
+ if backbone.generation_config.bos_token_id != config.bos_token_id:
+ backbone.generation_config.bos_token_id = config.bos_token_id
+
+ if cfg.problem_type not in NON_GENERATION_PROBLEM_TYPES:
+ backbone = set_generation_config(backbone, cfg.prediction)
+
+ return backbone, config
+
+
+# Adapted from https://github.com/huggingface/trl/blob/
+# 2068fdcd931183b59110aa6dc99d8f5bb55c6f2d/trl/trainer/utils.py#L742
+def activate_neftune(model, neftune_noise_alpha):
+ r"""
+ Activates the neftune as presented in this code:
+ https://github.com/neelsjain/NEFTune and paper: https://arxiv.org/abs/2310.05914
+ """
+ backbone = unwrap_model(model).backbone
+ if isinstance(backbone, PeftModel):
+ embeddings = backbone.base_model.get_input_embeddings()
+ else:
+ embeddings = backbone.get_input_embeddings()
+
+ embeddings.neftune_noise_alpha = neftune_noise_alpha
+ embeddings.register_forward_hook(neftune_post_forward_hook)
+
+
+def neftune_post_forward_hook(module, input, output):
+ """
+ Implements the NEFTune forward pass for the model using forward hooks.
+ Note this works only for torch.nn.Embedding layers.
+ This method is slightly adapted from the original source code
+ that can be found here: https://github.com/neelsjain/NEFTune
+
+ Simply add it to your model as follows:
+ ```python
+ model = ...
+ model.embed_tokens.neftune_noise_alpha = 0.1
+ model.embed_tokens.register_forward_hook(neftune_post_forward_hook)
+ ```
+
+ Args:
+ module (`torch.nn.Module`):
+ The embedding module where the hook is attached. Note that you need to set
+ `module.neftune_noise_alpha` to the desired noise alpha value.
+ input (`torch.Tensor`):
+ The input tensor to the model.
+ output (`torch.Tensor`):
+ The output tensor of the model (i.e. the embeddings).
+ """
+ if module.training:
+ dims = torch.tensor(output.size(1) * output.size(2))
+ mag_norm = module.neftune_noise_alpha / torch.sqrt(dims)
+ output = output + torch.zeros_like(output).uniform_(-mag_norm, mag_norm)
+ return output
+
+
+class TokenStoppingCriteria(StoppingCriteria):
+ """
+ Stopping criteria based on tokens.
+ Will stop generation when each generated sample contains at least one of the
+ stop_word_ids.
+ """
+
+ def __init__(self, stop_word_ids, prompt_input_ids_len):
+ super().__init__()
+ self.prompt_input_ids_len = prompt_input_ids_len
+ if stop_word_ids is None:
+ stop_word_ids = []
+ self.stop_word_ids = stop_word_ids
+
+ def should_stop(
+ self,
+ generated_ids: torch.Tensor,
+ stop_word_id: torch.Tensor,
+ ):
+ if len(stop_word_id.shape) == 0:
+ return (
+ torch.mean(((generated_ids == stop_word_id).sum(1) > 0).float()) == 1
+ ).item()
+ else:
+ return (
+ self.get_num_vector_found_in_matrix_rows(stop_word_id, generated_ids)
+ == generated_ids.shape[0]
+ )
+
+ @staticmethod
+ def get_num_vector_found_in_matrix_rows(vector, matrix):
+ """
+ Count the number of times a vector is found in a matrix row.
+ If the vector is found in a row, the search stops and the next row is searched.
+ """
+ assert len(vector.shape) == 1
+ assert len(matrix.shape) == 2
+
+ found = 0
+ for row in matrix:
+ # stride through the vector
+ for i in range(len(row) - len(vector) + 1):
+ # check if the vector contains the tensor
+ if torch.all(row[i : i + len(vector)] == vector):
+ found += 1
+ break
+
+ return found
+
+ def __call__(self, input_ids: torch.Tensor, scores: torch.FloatTensor, **kwargs):
+ generated_ids: torch.Tensor = input_ids[:, self.prompt_input_ids_len :]
+ for stop_word_id in self.stop_word_ids:
+ if self.should_stop(generated_ids, stop_word_id.to(generated_ids.device)):
+ if generated_ids.shape[1] == 1:
+ logger.warning(
+ f"Stopping criteria triggered for {stop_word_id} at first "
+ "generated token."
+ )
+ return True
+ return False
+
+
+class EnvVariableStoppingCriteria(StoppingCriteria):
+ """
+ Stopping criteria based on env variable.
+ Useful to force stopping within the app.
+ """
+
+ stop_streaming_env: str = "STOP_STREAMING"
+
+ def __call__(self, input_ids: torch.Tensor, scores: torch.FloatTensor, **kwargs):
+ should_stop = self.stop_streaming_env in os.environ
+ if should_stop:
+ logger.info("Received signal to stop generating")
+ return should_stop
+
+
+def prepare_lora(cfg: DefaultConfigProblemBase, backbone):
+ target_modules = (
+ [
+ lora_target_module.strip()
+ for lora_target_module in cfg.training.lora_target_modules.strip().split( # noqa: E501
+ ","
+ )
+ ]
+ if cfg.training.lora_target_modules
+ else None
+ )
+
+ if target_modules is None:
+ target_modules = []
+ for name, module in backbone.named_modules():
+ if (
+ isinstance(
+ module, (torch.nn.Linear, torch.nn.Conv1d, Conv1DTransformer)
+ )
+ and "head" not in name
+ ):
+ name = name.split(".")[-1]
+ if name not in target_modules:
+ target_modules.append(name)
+
+ logger.info(f"Lora module names: {target_modules}")
+
+ lora_config = LoraConfig(
+ use_dora=cfg.training.use_dora,
+ r=cfg.training.lora_r,
+ lora_alpha=cfg.training.lora_alpha,
+ target_modules=target_modules,
+ lora_dropout=cfg.training.lora_dropout,
+ bias="none",
+ use_rslora=cfg.training.use_rslora,
+ task_type="CAUSAL_LM",
+ )
+ # not needed anylonger with use_reentrant=False
+ # if cfg.architecture.gradient_checkpointing:
+ # backbone.enable_input_require_grads()
+
+ backbone = get_peft_model(backbone, lora_config)
+
+ for name, param in backbone.named_parameters():
+ # unfreeze base model's layers
+ if any(
+ unfreeze_layer in name
+ for unfreeze_layer in cfg.training.lora_unfreeze_layers
+ ):
+ logger.info(f"Unfreezing layer: {name}")
+ param.requires_grad = True
+
+ trainable_params, all_param = backbone.get_nb_trainable_parameters()
+ logger.info(f"Trainable parameters count: {trainable_params}")
+ logger.info(f"Total parameters count: {all_param}")
+ logger.info(f"Trainable %: {100 * trainable_params / all_param:.4f}%")
+
+ return backbone
+
+
+def get_torch_dtype(dtype):
+ if dtype == "float16":
+ return torch.float16
+ elif dtype == "bfloat16":
+ return torch.bfloat16
+ else:
+ return torch.float32
+
+
+def get_position_ids(attention_mask):
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ return position_ids
+
+
+def forward(backbone, input_ids, attention_mask):
+ kwargs = {
+ "input_ids": input_ids,
+ "attention_mask": attention_mask,
+ "position_ids": get_position_ids(attention_mask),
+ }
+
+ try:
+ output = backbone(**kwargs)
+ except TypeError:
+ # some models do not have position_ids
+ del kwargs["position_ids"]
+ output = backbone(**kwargs)
+ return output
+
+
+def generate(
+ backbone, batch, cfg: DefaultConfigProblemBase, streamer, remove_prompt=True
+):
+ mask_key = "prompt_attention_mask"
+ pad_keys = [
+ "prompt_input_ids",
+ "prompt_attention_mask",
+ ]
+ batch = batch_padding(
+ cfg,
+ batch,
+ training=False,
+ mask_key=mask_key,
+ pad_keys=pad_keys,
+ )
+ input_ids = batch["prompt_input_ids"]
+ attention_mask = batch["prompt_attention_mask"]
+ # Adding GenerationMixin type annotation for faster lookup
+ generation_function: GenerationMixin.generate = backbone.generate
+ verbosity = transformers_logging.get_verbosity()
+ stopping_criteria = StoppingCriteriaList(
+ [
+ TokenStoppingCriteria(
+ stop_word_ids=cfg.tokenizer._stop_words_ids,
+ prompt_input_ids_len=input_ids.shape[1],
+ ),
+ EnvVariableStoppingCriteria(),
+ ]
+ )
+ # force to use cache and disable gradient checkpointing if enabled
+ backbone.config.use_cache = True
+ if cfg.architecture.gradient_checkpointing:
+ backbone.gradient_checkpointing_disable()
+ transformers_logging.set_verbosity_error()
+ output = generation_function(
+ inputs=input_ids,
+ attention_mask=attention_mask,
+ generation_config=backbone.generation_config,
+ stopping_criteria=stopping_criteria,
+ renormalize_logits=True,
+ return_dict_in_generate=False,
+ use_cache=True,
+ streamer=streamer,
+ )
+ transformers_logging.set_verbosity(verbosity)
+ # enable checkpointing again
+ if cfg.architecture.gradient_checkpointing:
+ backbone.gradient_checkpointing_enable(
+ gradient_checkpointing_kwargs={"use_reentrant": False}
+ )
+ if remove_prompt:
+ output = output[:, input_ids.shape[1] :]
+ return output
diff --git a/llm_studio/src/utils/plot_utils.py b/llm_studio/src/utils/plot_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b2d3bf1e0257cf1c7a40145ff8133a2f491eb83b
--- /dev/null
+++ b/llm_studio/src/utils/plot_utils.py
@@ -0,0 +1,126 @@
+import html
+import re
+from dataclasses import dataclass
+from typing import List
+
+PLOT_ENCODINGS = ["image", "html", "df"]
+
+
+@dataclass
+class PlotData:
+ """
+ Data to plot.
+
+ Args:
+ data: the data to plot:
+ - a base64 encoded PNG if `encoding` is `png`.
+ - a string in HTML if `encoding` is `html`.
+ - a path to a parquet file if `encoding` is `df`.
+ encoding: the encoding of the data, one of PLOT_ENCODINGS.
+ """
+
+ data: str
+ encoding: str
+
+ def __post_init__(self):
+ assert self.encoding in PLOT_ENCODINGS, f"Unknown plot encoding {self.encoding}"
+
+
+def get_line_separator_html():
+ return (
+ "
"
+ )
+
+
+def decode_bytes(chunks: List[bytes]):
+ """Decodes bytes to string
+
+ Args:
+ chunks: byte chunks
+
+ Returns:
+ list of decoded strings
+ """
+ decoded_tokens = []
+ buffer = b""
+
+ for chunk in chunks:
+ combined = buffer + chunk
+ try:
+ # Try to decode the combined bytes
+ decoded_tokens.append(combined.decode("utf-8"))
+ # If successful, clear the buffer
+ buffer = b""
+ except UnicodeDecodeError:
+ # If decoding failed, keep the current chunk in the buffer
+ # and attempt to combine it with the next chunk
+ buffer = chunk
+
+ # Attempt to decode any remaining bytes in the buffer
+ try:
+ decoded_tokens.append(buffer.decode("utf-8"))
+ except UnicodeDecodeError:
+ pass
+
+ return decoded_tokens
+
+
+def format_for_markdown_visualization(text: str) -> str:
+ """
+ Convert newlines to tags, except for those inside code blocks.
+ This is needed because the markdown_table_cell_type() function does not
+ convert newlines to tags, so we have to do it ourselves.
+
+ This function is rather simple and may fail on text that uses `
+ in some other context than marking code cells or uses ` within
+ the code itself (as this function).
+ """
+ code_block_regex = r"(```.*?```|``.*?``)"
+ parts = re.split(code_block_regex, text, flags=re.DOTALL)
+ for i in range(len(parts)):
+ # Only substitute for text outside matched code blocks
+ if "`" not in parts[i]:
+ parts[i] = parts[i].replace("\n", " ").strip()
+ text = "".join(parts)
+
+ # Restore newlines around code blocks, needed for correct rendering
+ for x in ["```", "``", "`"]:
+ text = text.replace(f" {x}", f"\n{x}")
+ text = text.replace(f"{x} ", f"{x}\n")
+ return html.escape(text.replace(" ", "\n"))
+
+
+def list_to_markdown_representation(
+ tokens: List[str], masks: List[bool], pad_token: int, num_chars: int = 65
+):
+ """
+ Creates a markdown representation string from a list of tokens,
+ with HTML line breaks after 'num_chars' characters.
+ Masked tokens will be emphasized in HTML representation.
+
+ """
+ x = []
+ sublist: List[str] = []
+ raw_sublist: List[str] = []
+ for token, mask in zip(tokens, masks):
+ if len(token) + len(", ".join(raw_sublist)) > num_chars:
+ x.append(", ".join(sublist))
+ sublist = []
+ raw_sublist = []
+
+ raw_sublist.append(token)
+ token_formatted = html.escape(token)
+ if mask:
+ token_formatted = f"""***{token_formatted}***"""
+ elif token == pad_token:
+ token_formatted = f"""{
+ token_formatted
+ } """
+ sublist.append(token_formatted)
+
+ if sublist: # add any remaining items in sublist
+ x.append(", ".join(sublist))
+
+ list_representation = "\n[" + " ".join(x) + "]\n"
+ return list_representation
diff --git a/llm_studio/src/utils/type_annotations.py b/llm_studio/src/utils/type_annotations.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b53c8ab62368219d57689ed97a51f5a3d46609e
--- /dev/null
+++ b/llm_studio/src/utils/type_annotations.py
@@ -0,0 +1,4 @@
+from typing import Tuple
+
+# types which can be shown directly in the UI without any extra nesting
+KNOWN_TYPE_ANNOTATIONS = [int, float, bool, str, Tuple[str, ...]]
diff --git a/llm_studio/src/utils/utils.py b/llm_studio/src/utils/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..16effd7403cfcd342cf55e11164237fd6b89cb22
--- /dev/null
+++ b/llm_studio/src/utils/utils.py
@@ -0,0 +1,207 @@
+import logging
+import os
+import pickle
+import random
+import zipfile
+from typing import Any, Optional
+
+import numpy as np
+import psutil
+import torch
+
+logger = logging.getLogger(__name__)
+
+
+def set_seed(seed: int = 1234) -> None:
+ """
+ Sets the random seed for various Python libraries to ensure reproducibility
+ of results across different runs.
+
+ Args:
+ seed (int, optional): seed value. Defaults to 1234.
+ """
+
+ random.seed(seed)
+ os.environ["PYTHONHASHSEED"] = str(seed)
+ np.random.seed(seed)
+ torch.manual_seed(seed)
+ torch.cuda.manual_seed_all(seed)
+ torch.backends.cudnn.deterministic = False
+ torch.backends.cudnn.benchmark = True
+
+
+def check_metric(cfg):
+ """
+ Checks if the metric is set to GPT and if the OpenAI API key is set.
+ If not, sets the metric to BLEU and logs a warning.
+ """
+
+ if "GPT" in cfg.prediction.metric and os.getenv("OPENAI_API_KEY", "") == "":
+ logger.warning("No OpenAI API Key set. Setting metric to BLEU. ")
+ cfg.prediction.metric = "BLEU"
+ return cfg
+
+
+def kill_child_processes(current_pid: int, exclude=None) -> bool:
+ """
+ Killing all child processes of the current process.
+ Optionally, excludes one PID
+
+ Args:
+ current_pid: current process id
+ exclude: process id to exclude
+
+ Returns:
+ True or False in case of success or failure
+ """
+
+ logger.debug(f"Killing process id: {current_pid}")
+
+ try:
+ current_process = psutil.Process(current_pid)
+ if current_process.status() == "zombie":
+ return False
+ children = current_process.children(recursive=True)
+ for child in children:
+ if child.pid == exclude:
+ continue
+ child.kill()
+ return True
+ except psutil.NoSuchProcess:
+ logger.warning(f"Cannot kill process id: {current_pid}. No such process.")
+ return False
+
+
+def kill_child_processes_and_current(current_pid: Optional[int] = None) -> bool:
+ """
+ Kill all child processes of the current process, then terminates itself.
+ Optionally, specify the current process id.
+ If not specified, uses the current process id.
+
+ Args:
+ current_pid: current process id (default: None)
+
+ Returns:
+ True or False in case of success or failure
+ """
+ if current_pid is None:
+ current_pid = os.getpid()
+ kill_child_processes(current_pid)
+ try:
+ current_process = psutil.Process(current_pid)
+ current_process.kill()
+ return True
+ except psutil.NoSuchProcess:
+ logger.warning(f"Cannot kill process id: {current_pid}. No such process.")
+ return False
+
+
+def kill_sibling_ddp_processes() -> None:
+ """
+ Killing all sibling DDP processes from a single DDP process.
+ """
+ pid = os.getpid()
+ parent_pid = os.getppid()
+ kill_child_processes(parent_pid, exclude=pid)
+ current_process = psutil.Process(pid)
+ current_process.kill()
+
+
+def add_file_to_zip(zf: zipfile.ZipFile, path: str, folder=None) -> None:
+ """Adds a file to the existing zip. Does nothing if file does not exist.
+
+ Args:
+ zf: zipfile object to add to
+ path: path to the file to add
+ folder: folder in the zip to add the file to
+ """
+
+ try:
+ if folder is None:
+ zip_path = os.path.basename(path)
+ else:
+ zip_path = os.path.join(folder, os.path.basename(path))
+ zf.write(path, zip_path)
+ except Exception:
+ logger.warning(f"File {path} could not be added to zip.")
+
+
+def save_pickle(path: str, obj: Any, protocol: int = 4) -> None:
+ """Saves object as pickle file
+
+ Args:
+ path: path of file to save
+ obj: object to save
+ protocol: protocol to use when saving pickle
+ """
+
+ with open(path, "wb") as pickle_file:
+ pickle.dump(obj, pickle_file, protocol=protocol)
+
+
+class DisableLogger:
+ def __init__(self, level: int = logging.INFO):
+ self.level = level
+
+ def __enter__(self):
+ logging.disable(self.level)
+
+ def __exit__(self, exit_type, exit_value, exit_traceback):
+ logging.disable(logging.NOTSET)
+
+
+class PatchedAttribute:
+ """
+ Patches an attribute of an object for the duration of this context manager.
+ Similar to unittest.mock.patch,
+ but works also for properties that are not present in the original class
+
+ >>> class MyObj:
+ ... attr = 'original'
+ >>> my_obj = MyObj()
+ >>> with PatchedAttribute(my_obj, 'attr', 'patched'):
+ ... print(my_obj.attr)
+ patched
+ >>> print(my_obj.attr)
+ original
+ >>> with PatchedAttribute(my_obj, 'new_attr', 'new_patched'):
+ ... print(my_obj.new_attr)
+ new_patched
+ >>> assert not hasattr(my_obj, 'new_attr')
+ """
+
+ def __init__(self, obj, attribute, new_value):
+ self.obj = obj
+ self.attribute = attribute
+ self.new_value = new_value
+ self.original_exists = hasattr(obj, attribute)
+ if self.original_exists:
+ self.original_value = getattr(obj, attribute)
+
+ def __enter__(self):
+ setattr(self.obj, self.attribute, self.new_value)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ if self.original_exists:
+ setattr(self.obj, self.attribute, self.original_value)
+ else:
+ delattr(self.obj, self.attribute)
+
+
+def create_symlinks_in_parent_folder(directory):
+ """Creates symlinks for each item in a folder in the parent folder
+
+ Only creates symlinks for items at the root level of the directory.
+ """
+
+ if not os.path.exists(directory):
+ raise FileNotFoundError(f"Directory {directory} does not exist.")
+
+ parent_directory = os.path.dirname(directory)
+
+ for file in os.listdir(directory):
+ src = os.path.join(directory, file)
+ dst = os.path.join(parent_directory, file)
+ if os.path.exists(dst):
+ os.remove(dst)
+ os.symlink(src, dst)
diff --git a/llm_studio/train.py b/llm_studio/train.py
new file mode 100644
index 0000000000000000000000000000000000000000..57aeb37cd60b0777f2e92ccb61e366564272d376
--- /dev/null
+++ b/llm_studio/train.py
@@ -0,0 +1,762 @@
+import os
+import sys
+
+sys.path.append(os.path.dirname(os.path.dirname(__file__)))
+
+os.environ["OMP_NUM_THREADS"] = "1"
+os.environ["MKL_NUM_THREADS"] = "1"
+os.environ["OPENBLAS_NUM_THREADS"] = "1"
+os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
+os.environ["NUMEXPR_NUM_THREADS"] = "1"
+os.environ["TOKENIZERS_PARALLELISM"] = "false"
+
+import argparse
+import gc
+import logging
+import sys
+import time
+from distutils import util
+from typing import Any, Callable, Dict, Tuple
+
+import deepspeed
+import numpy as np
+import pandas as pd
+import torch
+from torch.cuda.amp import GradScaler, autocast
+from torch.utils.data import DataLoader
+from tqdm import tqdm
+from transformers.integrations import HfDeepSpeedConfig
+
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.python_configs.cfg_checks import check_config_for_errors
+from llm_studio.src.loggers import MainLogger
+from llm_studio.src.utils.config_utils import (
+ load_config_py,
+ load_config_yaml,
+ save_config_yaml,
+)
+from llm_studio.src.utils.data_utils import (
+ get_data,
+ get_inference_batch_size,
+ get_train_dataloader,
+ get_train_dataset,
+ get_val_dataloader,
+ get_val_dataset,
+)
+from llm_studio.src.utils.exceptions import LLMTrainingException
+from llm_studio.src.utils.export_utils import save_prediction_outputs
+from llm_studio.src.utils.gpu_utils import sync_across_processes
+from llm_studio.src.utils.logging_utils import (
+ TqdmToLogger,
+ initialize_logging,
+ log_plot,
+ write_flag,
+)
+from llm_studio.src.utils.modeling_utils import (
+ activate_neftune,
+ check_disk_space,
+ get_ds_config,
+ get_number_of_validation_epochs,
+ get_optimizer,
+ get_scheduler,
+ get_torch_dtype,
+ load_checkpoint,
+ run_inference,
+ save_checkpoint,
+ save_predictions,
+ wrap_model_distributed,
+)
+from llm_studio.src.utils.utils import (
+ check_metric,
+ create_symlinks_in_parent_folder,
+ kill_child_processes_and_current,
+ kill_sibling_ddp_processes,
+ set_seed,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def run_eval(
+ cfg: DefaultConfigProblemBase,
+ model: torch.nn.Module,
+ val_dataloader: DataLoader,
+ val_df: pd.DataFrame,
+ mode: str = "validation",
+) -> Tuple:
+ """Runs the evaluation loop.
+
+ Args:
+ cfg: config object
+ model: trained model
+ val_dataloader: validation Dataloader
+ val_df: validation DataFrame
+ mode: validation
+
+ Returns:
+ Validation loss
+ """
+ with torch.no_grad():
+ is_training = model.training
+ model.eval()
+ val_data: Dict[str, Any] = run_inference(
+ cfg, model, val_dataloader, mode
+ ) # type: ignore
+ model.train(is_training)
+
+ # Sync validation predictions across GPUs
+ if cfg.environment._distributed and cfg.environment._distributed_inference:
+ for key, value in val_data.items():
+ val_data[key] = sync_across_processes(
+ value, cfg.environment._world_size, group=cfg.environment._cpu_comm
+ )
+
+ if cfg.environment._local_rank != 0:
+ # data has been synced, so we can return early on other ranks
+ if cfg.environment._distributed:
+ torch.distributed.barrier()
+ return 0, 0
+
+ # Drop any extra observations
+ for k, v in val_data.items():
+ val_data[k] = v[: len(val_dataloader.dataset)] # type: ignore
+
+ val_data = val_dataloader.dataset.postprocess_output( # type: ignore
+ cfg=cfg, df=val_df, output=val_data
+ )
+ val_loss = np.mean(val_data.get("loss", torch.tensor(0)).float().cpu().numpy())
+ # postprocess_output only runs on rank 0 to save time/memory
+ val_metric = np.mean(val_data["metrics"])
+ logger.info(f"{mode.capitalize()} {cfg.prediction.metric}: {val_metric:.5f}")
+
+ for key in val_data:
+ if key.startswith("additional_log_") or key == "loss":
+ value = np.mean(val_data[key].float().cpu().numpy())
+ key = key.replace("additional_log_", "")
+ logger.info(f"Mean {mode} {key}: {value:.5f}")
+ cfg.logging._logger.log(
+ mode,
+ key,
+ value,
+ step=cfg.environment._curr_step / cfg.environment._step_log_denominator,
+ )
+ cfg.logging._logger.log(
+ mode,
+ cfg.prediction.metric,
+ val_metric,
+ step=cfg.environment._curr_step / cfg.environment._step_log_denominator,
+ )
+
+ # Log plots
+ if val_df is not None:
+ plot = cfg.logging.plots_class.plot_validation_predictions(
+ val_outputs=val_data, cfg=cfg, val_df=val_df, mode="validation"
+ )
+ log_plot(cfg, plot, "validation_predictions")
+
+ save_predictions(cfg, val_data, val_dataloader, val_df, mode)
+
+ if cfg.environment._distributed:
+ torch.distributed.barrier()
+
+ return val_loss, val_metric
+
+
+def run_train(
+ cfg: DefaultConfigProblemBase,
+ model: torch.nn.Module,
+ optimizer,
+ scheduler,
+ epoch_steps,
+ train_dataloader,
+ val_dataloader,
+ val_df: pd.DataFrame,
+):
+ """Runs the training loop.
+
+ Args:
+ cfg: DefaultConfigProblemBase config object
+ model: model
+ train_dataloader: custom training Dataloader
+ train_df: train DataFrame
+ val_dataloader: custom validation Dataloader
+ val_df: validation DataFrame
+
+ Returns:
+ Validation prediction output
+ Validation loss
+ Validation metric
+ Last train batch
+ """
+ if (
+ hasattr(cfg.augmentation, "neftune_noise_alpha")
+ and cfg.augmentation.neftune_noise_alpha > 0
+ ):
+ activate_neftune(model, cfg.augmentation.neftune_noise_alpha)
+
+ scaler: GradScaler | None = None
+ if cfg.environment.mixed_precision:
+ scaler = GradScaler(
+ enabled=(cfg.environment.mixed_precision_dtype == "float16")
+ )
+
+ optimizer.zero_grad(set_to_none=True)
+
+ # Prepare NLP Augmentation
+ nlp_augment = None
+ if hasattr(cfg.augmentation, "nlp_augmentations_class"):
+ nlp_augment = cfg.augmentation.nlp_augmentations_class(cfg=cfg)
+
+ start_epoch = 0
+
+ _, metric_mode, _ = cfg.prediction.metric_class.get(cfg.prediction.metric)
+ objective_op: Callable[[float, float], bool]
+ if metric_mode == "max":
+ best_val_metric = -np.inf
+ objective_op = np.greater
+ else:
+ best_val_metric = np.inf
+ objective_op = np.less
+
+ if cfg.training.evaluate_before_training:
+ val_loss, val_metric = run_eval(
+ cfg=cfg, model=model, val_dataloader=val_dataloader, val_df=val_df
+ )
+
+ for epoch in range(start_epoch, cfg.training.epochs):
+ set_seed(
+ cfg.environment._seed
+ + epoch * cfg.environment._world_size * cfg.environment.number_of_workers
+ + cfg.environment._local_rank * cfg.environment.number_of_workers
+ )
+ logger.info(f"Training Epoch: {epoch + 1} / {cfg.training.epochs}")
+
+ if (
+ cfg.environment._distributed
+ and not cfg.environment.use_deepspeed
+ and hasattr(train_dataloader.sampler, "set_epoch")
+ ):
+ train_dataloader.sampler.set_epoch(epoch) # type: ignore
+
+ tqdm_out = TqdmToLogger(logger, level=logging.INFO)
+ progress_bar = tqdm(
+ total=epoch_steps,
+ disable=cfg.environment._local_rank != 0,
+ file=tqdm_out,
+ ascii=True,
+ desc="train loss",
+ mininterval=0,
+ )
+ tr_it = iter(train_dataloader)
+
+ losses = []
+ model.train()
+
+ log_update_steps = max(epoch_steps // 20, 1)
+ evaluation_step = max(int(epoch_steps * cfg.training.evaluation_epochs), 1)
+ logger.info(f"Evaluation step: {evaluation_step}")
+
+ for itr, data in enumerate(tr_it):
+ cfg.environment._curr_step += (
+ cfg.training.batch_size * cfg.environment._world_size
+ )
+
+ # Batch to device
+ batch = cfg.dataset.dataset_class.batch_to_device(
+ data, cfg.environment._device
+ )
+
+ # NLP augmentation
+ if nlp_augment is not None:
+ batch = nlp_augment(batch)
+
+ # Plot first batch
+ if epoch == 0 and itr == 0 and cfg.environment._local_rank == 0:
+ plot = cfg.logging.plots_class.plot_batch(batch=batch, cfg=cfg)
+ log_plot(cfg, plot, "train_data")
+
+ # only need to sync gradients at last step of grad accumulation
+ model.require_backward_grad_sync = itr % cfg.training.grad_accumulation == 0
+
+ # Forward pass
+ with autocast(
+ enabled=cfg.environment.mixed_precision,
+ dtype=get_torch_dtype(cfg.environment.mixed_precision_dtype),
+ ):
+ output_dict = model.forward(batch)
+
+ loss = output_dict["loss"]
+ if ~np.isfinite(loss.item()) and (epoch > start_epoch or itr > 20):
+ raise LLMTrainingException(
+ "NaN caught in loss during training. "
+ "Please, reduce learning rate, change dtype, "
+ "or disable mixed precision. Alternatively, "
+ "gradient clipping may help to stabilize training."
+ )
+ losses.append(loss.item())
+
+ # loss is a mean loss per batch/sample
+ # as grad_accumulations sums up the gradients, this loss must be scaled
+ # by the number of grad_accumulations, to have similar behavior for
+ # BS * grad_accumulations = const.
+ if cfg.training.grad_accumulation != 1:
+ loss = loss / cfg.training.grad_accumulation
+
+ # Backward pass
+ if (
+ cfg.environment.mixed_precision
+ and len(cfg.environment.gpus)
+ and not cfg.environment.use_deepspeed
+ ):
+ scaler.scale(loss).backward() # type: ignore
+ if itr % cfg.training.grad_accumulation == 0:
+ if cfg.training.gradient_clip > 0:
+ scaler.unscale_(optimizer) # type: ignore
+ torch.nn.utils.clip_grad_norm_(
+ model.parameters(), cfg.training.gradient_clip
+ )
+ scaler.step(optimizer) # type: ignore
+ scaler.update()
+ optimizer.zero_grad(set_to_none=True)
+ else:
+ if cfg.environment.use_deepspeed:
+ model.backward(loss) # type: ignore[operator]
+ else:
+ loss.backward()
+ if itr % cfg.training.grad_accumulation == 0:
+ if cfg.training.gradient_clip > 0:
+ torch.nn.utils.clip_grad_norm_(
+ model.parameters(), cfg.training.gradient_clip
+ )
+ optimizer.step()
+ optimizer.zero_grad(set_to_none=True)
+
+ if cfg.environment._distributed:
+ torch.cuda.synchronize(device=cfg.environment._local_rank)
+
+ if scheduler is not None:
+ scheduler.step()
+
+ if cfg.environment._local_rank == 0:
+ cfg.logging._logger.log(
+ "train",
+ "loss",
+ losses[-1],
+ step=cfg.environment._curr_step
+ / cfg.environment._step_log_denominator,
+ )
+ cfg.logging._logger.log(
+ "meta",
+ "lr",
+ optimizer.param_groups[0]["lr"],
+ step=cfg.environment._curr_step
+ / cfg.environment._step_log_denominator,
+ )
+ if cfg.training.differential_learning_rate_layers:
+ cfg.logging._logger.log(
+ "meta",
+ "lr_diff",
+ optimizer.param_groups[2]["lr"],
+ step=cfg.environment._curr_step
+ / cfg.environment._step_log_denominator,
+ )
+
+ cfg.logging._logger.log(
+ "internal",
+ "current_step",
+ cfg.environment._curr_step,
+ )
+ for key in output_dict:
+ if key.startswith("additional_log_"):
+ cfg.logging._logger.log(
+ "train",
+ key.replace("additional_log_", ""),
+ output_dict[key].item(),
+ step=cfg.environment._curr_step
+ / cfg.environment._step_log_denominator,
+ )
+
+ # Show logs each 5% of the epoch (only if doing per epoch evaluation)
+ if (itr + 1) % log_update_steps == 0 or itr == epoch_steps - 1:
+ progress_bar.set_description(
+ f"train loss: {np.mean(losses[-10:]):.2f}", refresh=False
+ )
+ if (itr + 1) % log_update_steps == 0:
+ progress_bar.update(log_update_steps)
+ else:
+ progress_bar.update(epoch_steps % log_update_steps)
+
+ del output_dict
+
+ # Validation loop
+ if (itr + 1) % evaluation_step == 0:
+ # TODO: Move back after fixing slow generation of deepspeed.
+ if cfg.training.save_checkpoint == "last":
+ logger.info(
+ f"Saving last model checkpoint to {cfg.output_directory}"
+ )
+ save_checkpoint(model=model, path=cfg.output_directory, cfg=cfg)
+ elif cfg.training.save_checkpoint == "each_evaluation_epoch":
+ checkpoint_path = os.path.join(
+ cfg.output_directory, f"epoch_{epoch}_step_{itr}"
+ )
+ logger.info(f"Saving model checkpoint to {checkpoint_path}")
+ save_checkpoint(model=model, path=checkpoint_path, cfg=cfg)
+ create_symlinks_in_parent_folder(checkpoint_path)
+
+ val_loss, val_metric = run_eval(
+ cfg=cfg, model=model, val_dataloader=val_dataloader, val_df=val_df
+ )
+
+ if cfg.training.save_checkpoint == "best":
+ if objective_op(val_metric, best_val_metric):
+ logger.info(
+ f"Saving best model checkpoint: "
+ f"val_{cfg.prediction.metric} {best_val_metric:.5} -> "
+ f"{val_metric:.5} to {cfg.output_directory}"
+ )
+ save_checkpoint(model=model, path=cfg.output_directory, cfg=cfg)
+ best_val_metric = val_metric
+
+ model.train()
+
+ progress_bar.close()
+ del progress_bar
+
+ if cfg.environment._distributed:
+ torch.cuda.synchronize(device=cfg.environment._local_rank)
+ torch.distributed.barrier()
+
+ if cfg.environment._local_rank == 0:
+ cfg.logging._logger.log("internal", "epoch", epoch + 1)
+
+ if cfg.environment._distributed:
+ torch.distributed.barrier()
+
+ return val_loss, val_metric
+
+
+def run(cfg: DefaultConfigProblemBase) -> float:
+ """Runs the routine.
+
+ Args:
+ cfg: DefaultConfigProblemBase config object with all the hyperparameters
+ """
+
+ os.makedirs(cfg.output_directory, exist_ok=True)
+
+ # Force evaluation if user trains 0 epochs
+ cfg.training.evaluate_before_training = (
+ cfg.training.evaluate_before_training or cfg.training.epochs == 0
+ )
+
+ # Set the random seed for reproducibility
+ # either random seed when user set it -1 or deterministic user chosen seed
+ if cfg.environment.seed < 0:
+ cfg.environment._seed = np.random.randint(1_000_000)
+ else:
+ cfg.environment._seed = cfg.environment.seed
+
+ if (
+ cfg.architecture.backbone_dtype in ["int8", "int4"]
+ and cfg.environment.use_deepspeed
+ ):
+ raise ValueError(
+ f"Deepspeed do not support backbone type {cfg.architecture.backbone_dtype}."
+ + " Please set backbone type to float16 or bfloat16 for using deepspeed."
+ )
+
+ # Prepare environment
+ if "WORLD_SIZE" in os.environ:
+ cfg.environment._distributed = int(os.environ["WORLD_SIZE"]) > 1
+ cfg.environment._local_rank = int(os.environ["LOCAL_RANK"])
+ else:
+ cfg.environment._distributed = False
+ cfg.environment._local_rank = 0
+
+ initialize_logging(cfg)
+
+ # Check for errors in the configuration
+ errors = check_config_for_errors(cfg)
+ for i in range(len(errors["title"])):
+ if errors["type"][i] == "error":
+ logger.error(f"{errors['title'][i]}: {errors['message'][i]}")
+ else:
+ logger.warning(f"{errors['title'][i]}: {errors['message'][i]}")
+
+ if any(error_type == "error" for error_type in errors["type"]):
+ raise LLMTrainingException(
+ "Configuration contains errors. Please fix them before proceeding."
+ )
+
+ if cfg.environment._distributed:
+ cfg.environment._device = "cuda:%d" % cfg.environment._local_rank
+ if cfg.environment.use_deepspeed:
+ deepspeed.init_distributed()
+ else:
+ torch.distributed.init_process_group(backend="nccl", init_method="env://")
+ cfg.environment._cpu_comm = torch.distributed.new_group(backend="gloo")
+
+ cfg.environment._world_size = torch.distributed.get_world_size()
+ cfg.environment._rank = torch.distributed.get_rank()
+ torch.cuda.set_device(cfg.environment._rank)
+ logger.info(
+ f"Training in distributed mode with multiple processes, "
+ f"1 GPU per process. Process {cfg.environment._rank}, "
+ f"total: {cfg.environment._world_size} "
+ f"local rank: {cfg.environment._local_rank}."
+ )
+
+ # Sync the random seed
+ cfg.environment._seed = int(
+ sync_across_processes(
+ np.array([cfg.environment._seed]),
+ cfg.environment._world_size,
+ group=cfg.environment._cpu_comm,
+ )[0]
+ )
+ else:
+ cfg.environment._device = (
+ "cuda:0"
+ if (torch.cuda.is_available() and len(cfg.environment.gpus) > 0)
+ else "cpu"
+ )
+ if cfg.environment._device == "cpu":
+ logger.warning("Training on CPU. This will be slow.")
+
+ set_seed(cfg.environment._seed)
+ logger.info(f"Problem Type: {cfg.problem_type}")
+ logger.info(f"Global random seed: {cfg.environment._seed}")
+
+ cfg = check_metric(cfg)
+
+ # we need to get train dataframe and number of labels if not set or in training mode
+ logger.info("Preparing the data...")
+ train_df, val_df = get_data(cfg)
+
+ # We allow system prompt column to be missing in validation DataFrame, but let us
+ # assert that it does exist in the train DataFrame.
+ if hasattr(cfg.dataset, "system_column") and cfg.dataset.system_column != "None":
+ if cfg.dataset.system_column not in train_df.columns:
+ raise LLMTrainingException(
+ f"System column '{cfg.dataset.system_column}' not found in train "
+ "DataFrame."
+ )
+
+ if (
+ len(val_df) > int(os.getenv("GPT_EVAL_MAX", 100))
+ and "GPT" in cfg.prediction.metric
+ ):
+ logger.warning(
+ f"More than {os.getenv('GPT_EVAL_MAX', 100)} validation records. "
+ "Safeguarding against OpenAI API costs. Setting metric to BLEU. "
+ "Change GPT_EVAL_MAX to run GPT validation."
+ )
+ cfg.prediction.metric = "BLEU"
+
+ # prepare data
+ logger.info("Preparing train and validation data")
+ train_dataset = get_train_dataset(train_df=train_df, cfg=cfg)
+ val_dataset = get_val_dataset(val_df=val_df, cfg=cfg)
+ train_dataloader = get_train_dataloader(train_ds=train_dataset, cfg=cfg)
+ val_dataloader = get_val_dataloader(val_ds=val_dataset, cfg=cfg)
+
+ if cfg.environment._local_rank == 0:
+ total_training_steps = (
+ cfg.training.epochs
+ * len(train_dataloader)
+ * cfg.training.batch_size
+ * cfg.environment._world_size
+ )
+
+ num_eval_epochs = get_number_of_validation_epochs(
+ training_epochs=cfg.training.epochs,
+ evaluation_epochs=cfg.training.evaluation_epochs,
+ )
+ val_batch_size = get_inference_batch_size(cfg)
+
+ total_validation_steps = (
+ len(val_dataloader)
+ * (num_eval_epochs + int(cfg.training.evaluate_before_training))
+ * val_batch_size
+ * cfg.environment._world_size
+ )
+
+ if cfg.logging.log_step_size == "relative":
+ cfg.environment._step_log_denominator = total_training_steps
+ else:
+ cfg.environment._step_log_denominator = 1
+
+ # Prepare model and optimizer
+ if cfg.environment.use_deepspeed:
+ ds_config = get_ds_config(cfg)
+ # keep this object alive.
+ dschf = HfDeepSpeedConfig(ds_config) # noqa: F841
+ with torch.device(cfg.environment._device):
+ model = cfg.architecture.model_class(cfg)
+ check_disk_space(model, cfg.output_directory)
+
+ # load model weights
+ if cfg.architecture.pretrained_weights != "":
+ # Do not load strictly if continue training from the previous experiment
+ load_checkpoint(cfg, model, strict=cfg.training.epochs == -1)
+ model.to(cfg.environment._device)
+
+ epoch_steps = len(train_dataloader)
+ optimizer = get_optimizer(model=model, cfg=cfg)
+ scheduler = get_scheduler(cfg=cfg, optimizer=optimizer, epoch_steps=epoch_steps)
+
+ if cfg.environment._distributed:
+ (
+ model,
+ optimizer,
+ train_dataloader,
+ val_dataloader,
+ scheduler,
+ ) = wrap_model_distributed(
+ model=model,
+ optimizer=optimizer,
+ lr_scheduler=scheduler,
+ train_dataloader=train_dataloader,
+ val_dataloader=val_dataloader,
+ cfg=cfg,
+ )
+
+ if cfg.environment.compile_model:
+ # deepspeed do not support torch.compile
+ if cfg.environment.use_deepspeed:
+ logger.warning(
+ "Deepspeed is active, but it doesn't support torch.compile."
+ "Skipping compilation for this experiment."
+ )
+ else:
+ if cfg.environment._distributed:
+ model.module.backbone = torch.compile(model.module.backbone)
+ else:
+ model.backbone = torch.compile(model.backbone)
+
+ # reset steps
+ cfg.environment._curr_step = 0
+ cfg.environment._curr_val_step = 0
+
+ gc.collect()
+
+ global_start_time = time.time()
+ if cfg.environment._local_rank == 0:
+ # re-save cfg
+ save_config_yaml(f"{cfg.output_directory}/cfg.yaml", cfg)
+
+ cfg.logging._logger = MainLogger(cfg)
+
+ cfg.logging._logger.log(
+ "internal", "total_training_steps", total_training_steps
+ )
+
+ cfg.logging._logger.log(
+ "internal", "total_validation_steps", total_validation_steps
+ )
+
+ cfg.logging._logger.log(
+ "internal",
+ "global_start_time",
+ global_start_time,
+ )
+ # re-save config
+ save_config_yaml(f"{cfg.output_directory}/cfg.yaml", cfg)
+
+ val_loss, val_metric = run_train(
+ cfg=cfg,
+ model=model,
+ optimizer=optimizer,
+ scheduler=scheduler,
+ epoch_steps=epoch_steps,
+ train_dataloader=train_dataloader,
+ val_dataloader=val_dataloader,
+ val_df=val_df,
+ )
+
+ # reset external logging
+ if cfg.environment._local_rank == 0:
+ cfg.logging._logger.reset_external()
+
+ experiment_path = f"{cfg.output_directory}"
+
+ if cfg.training.epochs == 0 and cfg.training.save_checkpoint != "disable":
+ checkpoint_path = cfg.output_directory
+ logger.info(f"Saving last model checkpoint to {checkpoint_path}")
+ save_checkpoint(model=model, path=checkpoint_path, cfg=cfg)
+
+ if cfg.environment._local_rank == 0:
+ save_config_yaml(f"{cfg.output_directory}/cfg.yaml", cfg)
+ save_prediction_outputs(cfg.experiment_name, experiment_path)
+
+ flag_path = os.path.join(cfg.output_directory, "flags.json")
+ write_flag(flag_path, "status", "finished")
+ time_took = time.time() - global_start_time
+ if time_took > 86400:
+ # if more than one day, show days
+ # need to subtract 1 day from time_took since strftime shows day of year
+ # which starts counting at 1
+ time_took_formatted = time.strftime(
+ "%-jd %H:%M:%S", time.gmtime(float(time_took - 86400))
+ )
+ else:
+ time_took_formatted = time.strftime(
+ "%H:%M:%S", time.gmtime(float(time_took))
+ )
+ write_flag(flag_path, "info", f"Runtime: {time_took_formatted}")
+
+ return val_metric
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="")
+ parser.add_argument(
+ "-C", "--config", help="config filename", type=(str), default=argparse.SUPPRESS
+ )
+ parser.add_argument(
+ "-Y", "--yaml", help="yaml filename", type=(str), default=argparse.SUPPRESS
+ )
+ parser_args, unknown = parser.parse_known_args(sys.argv)
+
+ if "config" in parser_args:
+ logging.warning(
+ "Using deprecated -C argument. Please use -Y instead to load yaml."
+ )
+ cfg: DefaultConfigProblemBase = load_config_py(parser_args.config)
+ elif "yaml" in parser_args:
+ cfg = load_config_yaml(parser_args.yaml)
+ else:
+ raise ValueError("Please, provide a configuration file")
+
+ extra_args = []
+ for arg_orig in unknown:
+ if arg_orig.startswith(("-", "--")):
+ arg = arg_orig.replace("-", "").split(".")
+ try:
+ arg_type = getattr(cfg, arg[0]).get_annotations()[arg[1]]
+ except (AttributeError, KeyError):
+ continue
+ if arg_type == bool:
+ parser.add_argument(arg_orig, type=util.strtobool)
+ else:
+ parser.add_argument(arg_orig, type=arg_type)
+ extra_args.append(arg)
+
+ args = parser.parse_args()
+
+ for arg in extra_args:
+ value = getattr(args, ".".join(arg))
+ setattr(getattr(cfg, arg[0]), arg[1], value)
+
+ out_dir = cfg.output_directory
+ os.makedirs(out_dir, exist_ok=True)
+
+ try:
+ run(cfg=cfg)
+ except Exception:
+ logging.error("Exception occurred during the run:", exc_info=True)
+ if ("WORLD_SIZE" in os.environ) and (int(os.environ["WORLD_SIZE"]) > 1):
+ kill_sibling_ddp_processes()
+ else:
+ kill_child_processes_and_current()
diff --git a/llm_studio/train_wave.py b/llm_studio/train_wave.py
new file mode 100644
index 0000000000000000000000000000000000000000..92ac09ced9ecac515a81518449c9034e6b8632ab
--- /dev/null
+++ b/llm_studio/train_wave.py
@@ -0,0 +1,162 @@
+import os
+
+# Set this before importing any other modules to be on the safe side
+os.environ["OMP_NUM_THREADS"] = "1"
+os.environ["MKL_NUM_THREADS"] = "1"
+os.environ["OPENBLAS_NUM_THREADS"] = "1"
+os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
+os.environ["NUMEXPR_NUM_THREADS"] = "1"
+os.environ["TOKENIZERS_PARALLELISM"] = "false"
+
+import argparse
+import logging
+import sys
+import time
+
+import psutil
+
+sys.path.append(os.path.dirname(os.path.dirname(__file__)))
+
+
+def check_for_done(process_queue):
+ """Checks for finished process ids
+
+ Args:
+ process_queue: list of process ids
+ Returns:
+ (True, process_idx) if there is any finished process
+ (False, False) if there is not finished processes
+ """
+
+ for i, pid in enumerate(process_queue):
+ zombie = False
+ try:
+ p = psutil.Process(pid)
+ zombie = p.status() == "zombie"
+ except psutil.NoSuchProcess:
+ pass
+ if not psutil.pid_exists(pid) or zombie:
+ return True, i
+
+ return False, False
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="")
+ parser.add_argument(
+ "-Y", "--yaml", help="yaml filename", type=(str), default=argparse.SUPPRESS
+ )
+ parser.add_argument(
+ "-Q",
+ "--process-queue",
+ help="process queue to wait for",
+ default=argparse.SUPPRESS,
+ )
+ parser_args, _ = parser.parse_known_args(sys.argv)
+
+ process_queue = []
+ if "process_queue" in parser_args and parser_args.process_queue != "":
+ process_queue = [int(x) for x in parser_args.process_queue.split(",")]
+
+ while True:
+ if len(process_queue) == 0:
+ break
+ done, num = check_for_done(process_queue)
+ if done:
+ process_queue.pop(num)
+ else:
+ time.sleep(30)
+
+ # delayed imports from llm_studio, only after we want to start training
+ import subprocess
+
+ import torch
+
+ from llm_studio.src.utils.config_utils import load_config_yaml
+ from llm_studio.src.utils.exceptions import (
+ LLMAugmentationsException,
+ LLMDataException,
+ LLMMetricException,
+ LLMModelException,
+ LLMTrainingException,
+ )
+ from llm_studio.src.utils.gpu_utils import is_oom_error
+ from llm_studio.src.utils.logging_utils import initialize_logging, write_flag
+ from llm_studio.src.utils.utils import kill_child_processes_and_current
+ from llm_studio.train import run
+
+ cfg = load_config_yaml(parser_args.yaml)
+
+ flag_path = os.path.join(cfg.output_directory, "flags{}.json")
+
+ # Check if DDP
+ if "WORLD_SIZE" in os.environ:
+ local_rank = int(os.environ["LOCAL_RANK"])
+ if local_rank == 0:
+ write_flag(flag_path.format(""), "status", "running")
+ else:
+ write_flag(flag_path.format(""), "status", "running")
+ local_rank = 0
+
+ try:
+ run(cfg=cfg)
+ except Exception as exception:
+ initialize_logging(cfg)
+ write_flag(flag_path.format(local_rank), "status", "failed")
+ if is_oom_error(exception):
+ logging.error(
+ "GPU Out-of-Memory (OOM) error occurred. "
+ "Please, reduce the batch size, or input data size, "
+ "or model size. Or try gradient checkpointing.",
+ exc_info=True,
+ )
+ write_flag(flag_path.format(local_rank), "info", "OOM error")
+
+ logging.info(
+ ""
+ + subprocess.check_output(["nvidia-smi"]).decode("utf-8")
+ + " "
+ )
+
+ if torch.cuda.is_available():
+ logging.info(
+ "" + torch.cuda.memory_summary().replace("-", "=") + " "
+ )
+
+ elif isinstance(exception, LLMDataException):
+ logging.error(
+ "Data error occurred during H2O LLM Studio run:", exc_info=True
+ )
+ write_flag(flag_path.format(local_rank), "info", "Data error")
+ elif isinstance(exception, LLMTrainingException):
+ logging.error(
+ "Training error occurred during H2O LLM Studio run:", exc_info=True
+ )
+ write_flag(flag_path.format(local_rank), "info", "Training error")
+ elif isinstance(exception, LLMMetricException):
+ logging.error(
+ "Validation metric failed. Please make sure selected validation "
+ "metric is suitable for your current problem setup.",
+ exc_info=True,
+ )
+ write_flag(flag_path.format(local_rank), "info", "Metric error")
+ elif isinstance(exception, LLMAugmentationsException):
+ logging.error(
+ "Custom augmentations error occurred during " "H2O LLM Studio run:",
+ exc_info=True,
+ )
+ write_flag(flag_path.format(local_rank), "info", "Augmentations error")
+ elif isinstance(exception, LLMModelException):
+ logging.error(
+ "Model error occurred during H2O LLM Studio run:",
+ exc_info=True,
+ )
+ write_flag(flag_path.format(local_rank), "info", "Model error")
+ else:
+ logging.error(
+ "Exception occurred during H2O LLM Studio run:", exc_info=True
+ )
+ write_flag(flag_path.format(local_rank), "info", "See logs")
+
+ # Clean up any potential processes for this experiment
+ kill_child_processes_and_current()
diff --git a/model_cards/text_causal_classification_experiment_summary_card_template.md b/model_cards/text_causal_classification_experiment_summary_card_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..a108a4113e874e475167bdf77fa88f82cb4f9a81
--- /dev/null
+++ b/model_cards/text_causal_classification_experiment_summary_card_template.md
@@ -0,0 +1,61 @@
+### Usage with HF transformers
+
+To use the model with the `transformers` library on a machine with GPUs:
+- First, push the model to a huggingface repo by clicking the Push checkpoint to huggingface button below
+- Make sure you have the `transformers` library installed in the machine's environment
+
+```bash
+pip install transformers=={{transformers_version}}
+```
+
+Also make sure you are providing your huggingface token if the model is lying in a private repo.
+ - You can login to hugginface_hub by running
+ ```python
+ import huggingface_hub
+ huggingface_hub.login()
+ ```
+
+You will also need to download the classification head, either manually, or by running the following code:
+
+```python
+from huggingface_hub import hf_hub_download
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+hf_hub_download(repo_id=model_name, filename="classification_head.pth", local_dir="./")
+```
+
+You can make classification predictions by following the example below:
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer
+import torch
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+# Important: The prompt needs to be in the same format the model was trained with.
+# You can find an example prompt in the experiment logs.
+prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}"
+
+tokenizer = AutoTokenizer.from_pretrained(
+ model_name,
+ trust_remote_code={{trust_remote_code}},
+)
+model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map={"": "cuda:0"},
+ trust_remote_code={{trust_remote_code}},
+).cuda().eval()
+
+head_weights = torch.load("classification_head.pth", map_location="cuda")
+# settings can be arbitrary here as we overwrite with saved weights
+head = torch.nn.Linear(1, 1, bias=False).to("cuda")
+head.weight.data = head_weights
+
+inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda")
+
+out = model(**inputs).logits
+
+logits = head(out[:,-1])
+
+print(logits)
+```
diff --git a/model_cards/text_causal_classification_model_card_template.md b/model_cards/text_causal_classification_model_card_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..72f26e3becf028e73fe072449655098f5cc62a11
--- /dev/null
+++ b/model_cards/text_causal_classification_model_card_template.md
@@ -0,0 +1,106 @@
+---
+language:
+- en
+library_name: transformers
+inference: false
+thumbnail: https://h2o.ai/etc.clientlibs/h2o/clientlibs/clientlib-site/resources/images/favicon.ico
+tags:
+- gpt
+- llm
+- large language model
+- h2o-llmstudio
+---
+# Model Card
+## Summary
+
+This model was trained using [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio).
+- Base model: [{{base_model}}](https://huggingface.co/{{base_model}})
+
+
+## Usage
+
+To use the model with the `transformers` library on a machine with GPUs, first make sure you have the `transformers` library installed.
+
+```bash
+pip install transformers=={{transformers_version}}
+```
+
+Also make sure you are providing your huggingface token if the model is lying in a private repo.
+ - You can login to hugginface_hub by running
+ ```python
+ import huggingface_hub
+ huggingface_hub.login()
+ ```
+
+You will also need to download the classification head, either manually, or by running the following code:
+
+```python
+from huggingface_hub import hf_hub_download
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+hf_hub_download(repo_id=model_name, filename="classification_head.pth", local_dir="./")
+```
+
+You can make classification predictions by following the example below:
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer
+import torch
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+# Important: The prompt needs to be in the same format the model was trained with.
+# You can find an example prompt in the experiment logs.
+prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}"
+
+tokenizer = AutoTokenizer.from_pretrained(
+ model_name,
+ trust_remote_code={{trust_remote_code}},
+)
+model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map={"": "cuda:0"},
+ trust_remote_code={{trust_remote_code}},
+).cuda().eval()
+
+head_weights = torch.load("classification_head.pth", map_location="cuda")
+# settings can be arbitrary here as we overwrite with saved weights
+head = torch.nn.Linear(1, 1, bias=False).to("cuda")
+head.weight.data = head_weights
+
+inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda")
+
+out = model(**inputs).logits
+
+logits = head(out[:,-1])
+
+print(logits)
+```
+
+## Quantization and sharding
+
+You can load the models using quantization by specifying ```load_in_8bit=True``` or ```load_in_4bit=True```. Also, sharding on multiple GPUs is possible by setting ```device_map=auto```.
+
+## Model Architecture
+
+```
+{{model_architecture}}
+```
+
+## Model Configuration
+
+This model was trained using H2O LLM Studio and with the configuration in [cfg.yaml](cfg.yaml). Visit [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio) to learn how to train your own large language models.
+
+
+## Disclaimer
+
+Please read this disclaimer carefully before using the large language model provided in this repository. Your use of the model signifies your agreement to the following terms and conditions.
+
+- Biases and Offensiveness: The large language model is trained on a diverse range of internet text data, which may contain biased, racist, offensive, or otherwise inappropriate content. By using this model, you acknowledge and accept that the generated content may sometimes exhibit biases or produce content that is offensive or inappropriate. The developers of this repository do not endorse, support, or promote any such content or viewpoints.
+- Limitations: The large language model is an AI-based tool and not a human. It may produce incorrect, nonsensical, or irrelevant responses. It is the user's responsibility to critically evaluate the generated content and use it at their discretion.
+- Use at Your Own Risk: Users of this large language model must assume full responsibility for any consequences that may arise from their use of the tool. The developers and contributors of this repository shall not be held liable for any damages, losses, or harm resulting from the use or misuse of the provided model.
+- Ethical Considerations: Users are encouraged to use the large language model responsibly and ethically. By using this model, you agree not to use it for purposes that promote hate speech, discrimination, harassment, or any form of illegal or harmful activities.
+- Reporting Issues: If you encounter any biased, offensive, or otherwise inappropriate content generated by the large language model, please report it to the repository maintainers through the provided channels. Your feedback will help improve the model and mitigate potential issues.
+- Changes to this Disclaimer: The developers of this repository reserve the right to modify or update this disclaimer at any time without prior notice. It is the user's responsibility to periodically review the disclaimer to stay informed about any changes.
+
+By using the large language model provided in this repository, you agree to accept and comply with the terms and conditions outlined in this disclaimer. If you do not agree with any part of this disclaimer, you should refrain from using the model and any content generated by it.
diff --git a/model_cards/text_causal_language_modeling_experiment_summary_card_template.md b/model_cards/text_causal_language_modeling_experiment_summary_card_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..c46a517837b469891df6c6d6a87ae4eea491629d
--- /dev/null
+++ b/model_cards/text_causal_language_modeling_experiment_summary_card_template.md
@@ -0,0 +1,44 @@
+### Usage with HF transformers
+
+To use the model with the `transformers` library on a machine with GPUs:
+- First, push the model to a huggingface repo by clicking the Push checkpoint to huggingface button below
+- Make sure you have the `transformers` library installed in the machine's environment
+
+```bash
+pip install transformers=={{transformers_version}}
+```
+- Pass model path from the huggingface repo to the following pipeline
+- Also make sure you are providing your huggingface token to the pipeline if the model is lying in a private repo.
+ - Either leave token=True in the pipeline and login to hugginface_hub by running
+ ```python
+ import huggingface_hub
+ huggingface_hub.login()
+ ```
+ - Or directly pass your to token in the pipeline
+```python
+from transformers import pipeline
+
+generate_text = pipeline(
+ model="{{repo_id}}",
+ torch_dtype="auto",
+ trust_remote_code=True,
+ device_map={"": "cuda:0"},
+ token=True,
+)
+
+# generate configuration can be modified to your needs
+# generate_text.model.generation_config.min_new_tokens = {{min_new_tokens}}
+# generate_text.model.generation_config.max_new_tokens = {{max_new_tokens}}
+# generate_text.model.generation_config.do_sample = {{do_sample}}
+# generate_text.model.generation_config.num_beams = {{num_beams}}
+# generate_text.model.generation_config.temperature = float({{temperature}})
+# generate_text.model.generation_config.repetition_penalty = float({{repetition_penalty}})
+
+messages = {{sample_messages}}
+
+res = generate_text(
+ messages,
+ renormalize_logits=True
+)
+print(res[0]["generated_text"][-1]['content'])
+```
diff --git a/model_cards/text_causal_language_modeling_model_card_template.md b/model_cards/text_causal_language_modeling_model_card_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..86a037464e8b22ca8dc428ed7fcb00a361e48cc5
--- /dev/null
+++ b/model_cards/text_causal_language_modeling_model_card_template.md
@@ -0,0 +1,152 @@
+---
+language:
+- en
+library_name: transformers
+inference: false
+thumbnail: https://h2o.ai/etc.clientlibs/h2o/clientlibs/clientlib-site/resources/images/favicon.ico
+tags:
+- gpt
+- llm
+- large language model
+- h2o-llmstudio
+---
+# Model Card
+## Summary
+
+This model was trained using [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio).
+- Base model: [{{base_model}}](https://huggingface.co/{{base_model}})
+
+
+## Usage
+
+To use the model with the `transformers` library on a machine with GPUs, first make sure you have the `transformers` library installed.
+
+```bash
+pip install transformers=={{transformers_version}}
+```
+
+Also make sure you are providing your huggingface token to the pipeline if the model is lying in a private repo.
+
+- Either leave `token=True` in the `pipeline` and login to hugginface_hub by running
+
+```python
+import huggingface_hub
+huggingface_hub.login()
+```
+
+- Or directly pass your to `token` in the `pipeline`
+
+```python
+from transformers import pipeline
+
+generate_text = pipeline(
+ model="{{repo_id}}",
+ torch_dtype="auto",
+ trust_remote_code=True,
+ device_map={"": "cuda:0"},
+ token=True,
+)
+
+# generate configuration can be modified to your needs
+# generate_text.model.generation_config.min_new_tokens = {{min_new_tokens}}
+# generate_text.model.generation_config.max_new_tokens = {{max_new_tokens}}
+# generate_text.model.generation_config.do_sample = {{do_sample}}
+# generate_text.model.generation_config.num_beams = {{num_beams}}
+# generate_text.model.generation_config.temperature = float({{temperature}})
+# generate_text.model.generation_config.repetition_penalty = float({{repetition_penalty}})
+
+messages = {{sample_messages}}
+
+res = generate_text(
+ messages,
+ renormalize_logits=True
+)
+print(res[0]["generated_text"][-1]['content'])
+```
+
+You can print a sample prompt after applying chat template to see how it is feed to the tokenizer:
+
+```python
+print(generate_text.tokenizer.apply_chat_template(
+ messages,
+ tokenize=False,
+ add_generation_prompt=True,
+))
+```
+
+You may also construct the pipeline from the loaded model and tokenizer yourself and consider the preprocessing steps:
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+# Important: The prompt needs to be in the same format the model was trained with.
+# You can find an example prompt in the experiment logs.
+messages = {{sample_messages}}
+
+tokenizer = AutoTokenizer.from_pretrained(
+ model_name,
+ trust_remote_code={{trust_remote_code}},
+)
+model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map={"": "cuda:0"},
+ trust_remote_code={{trust_remote_code}},
+)
+model.cuda().eval()
+
+# generate configuration can be modified to your needs
+# model.generation_config.min_new_tokens = {{min_new_tokens}}
+# model.generation_config.max_new_tokens = {{max_new_tokens}}
+# model.generation_config.do_sample = {{do_sample}}
+# model.generation_config.num_beams = {{num_beams}}
+# model.generation_config.temperature = float({{temperature}})
+# model.generation_config.repetition_penalty = float({{repetition_penalty}})
+
+inputs = tokenizer.apply_chat_template(
+ messages,
+ tokenize=True,
+ add_generation_prompt=True,
+ return_tensors="pt",
+ return_dict=True,
+).to("cuda")
+
+tokens = model.generate(
+ input_ids=inputs["input_ids"],
+ attention_mask=inputs["attention_mask"],
+ renormalize_logits=True
+)[0]
+
+tokens = tokens[inputs["input_ids"].shape[1]:]
+answer = tokenizer.decode(tokens, skip_special_tokens=True)
+print(answer)
+```
+
+## Quantization and sharding
+
+You can load the models using quantization by specifying ```load_in_8bit=True``` or ```load_in_4bit=True```. Also, sharding on multiple GPUs is possible by setting ```device_map=auto```.
+
+## Model Architecture
+
+```
+{{model_architecture}}
+```
+
+## Model Configuration
+
+This model was trained using H2O LLM Studio and with the configuration in [cfg.yaml](cfg.yaml). Visit [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio) to learn how to train your own large language models.
+
+
+## Disclaimer
+
+Please read this disclaimer carefully before using the large language model provided in this repository. Your use of the model signifies your agreement to the following terms and conditions.
+
+- Biases and Offensiveness: The large language model is trained on a diverse range of internet text data, which may contain biased, racist, offensive, or otherwise inappropriate content. By using this model, you acknowledge and accept that the generated content may sometimes exhibit biases or produce content that is offensive or inappropriate. The developers of this repository do not endorse, support, or promote any such content or viewpoints.
+- Limitations: The large language model is an AI-based tool and not a human. It may produce incorrect, nonsensical, or irrelevant responses. It is the user's responsibility to critically evaluate the generated content and use it at their discretion.
+- Use at Your Own Risk: Users of this large language model must assume full responsibility for any consequences that may arise from their use of the tool. The developers and contributors of this repository shall not be held liable for any damages, losses, or harm resulting from the use or misuse of the provided model.
+- Ethical Considerations: Users are encouraged to use the large language model responsibly and ethically. By using this model, you agree not to use it for purposes that promote hate speech, discrimination, harassment, or any form of illegal or harmful activities.
+- Reporting Issues: If you encounter any biased, offensive, or otherwise inappropriate content generated by the large language model, please report it to the repository maintainers through the provided channels. Your feedback will help improve the model and mitigate potential issues.
+- Changes to this Disclaimer: The developers of this repository reserve the right to modify or update this disclaimer at any time without prior notice. It is the user's responsibility to periodically review the disclaimer to stay informed about any changes.
+
+By using the large language model provided in this repository, you agree to accept and comply with the terms and conditions outlined in this disclaimer. If you do not agree with any part of this disclaimer, you should refrain from using the model and any content generated by it.
diff --git a/model_cards/text_causal_regression_experiment_summary_card_template.md b/model_cards/text_causal_regression_experiment_summary_card_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..56192fbff47aa614b5dca5065e1e0b5ccd572310
--- /dev/null
+++ b/model_cards/text_causal_regression_experiment_summary_card_template.md
@@ -0,0 +1,61 @@
+### Usage with HF transformers
+
+To use the model with the `transformers` library on a machine with GPUs:
+- First, push the model to a huggingface repo by clicking the Push checkpoint to huggingface button below
+- Make sure you have the `transformers` library installed in the machine's environment
+
+```bash
+pip install transformers=={{transformers_version}}
+```
+
+Also make sure you are providing your huggingface token if the model is lying in a private repo.
+ - You can login to hugginface_hub by running
+ ```python
+ import huggingface_hub
+ huggingface_hub.login()
+ ```
+
+You will also need to download the regression head, either manually, or by running the following code:
+
+```python
+from huggingface_hub import hf_hub_download
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+hf_hub_download(repo_id=model_name, filename="regression_head.pth", local_dir="./")
+```
+
+You can make regression predictions by following the example below:
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer
+import torch
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+# Important: The prompt needs to be in the same format the model was trained with.
+# You can find an example prompt in the experiment logs.
+prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}"
+
+tokenizer = AutoTokenizer.from_pretrained(
+ model_name,
+ trust_remote_code={{trust_remote_code}},
+)
+model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map={"": "cuda:0"},
+ trust_remote_code={{trust_remote_code}},
+).cuda().eval()
+
+head_weights = torch.load("regression_head.pth", map_location="cuda")
+# settings can be arbitrary here as we overwrite with saved weights
+head = torch.nn.Linear(1, 1, bias=False).to("cuda")
+head.weight.data = head_weights
+
+inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda")
+
+out = model(**inputs).logits
+
+logits = head(out[:,-1])
+
+print(logits)
+```
diff --git a/model_cards/text_causal_regression_model_card_template.md b/model_cards/text_causal_regression_model_card_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..84eff5ec63106b0a06373c900c4571de6293dbc5
--- /dev/null
+++ b/model_cards/text_causal_regression_model_card_template.md
@@ -0,0 +1,106 @@
+---
+language:
+- en
+library_name: transformers
+inference: false
+thumbnail: https://h2o.ai/etc.clientlibs/h2o/clientlibs/clientlib-site/resources/images/favicon.ico
+tags:
+- gpt
+- llm
+- large language model
+- h2o-llmstudio
+---
+# Model Card
+## Summary
+
+This model was trained using [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio).
+- Base model: [{{base_model}}](https://huggingface.co/{{base_model}})
+
+
+## Usage
+
+To use the model with the `transformers` library on a machine with GPUs, first make sure you have the `transformers` library installed.
+
+```bash
+pip install transformers=={{transformers_version}}
+```
+
+Also make sure you are providing your huggingface token if the model is lying in a private repo.
+ - You can login to hugginface_hub by running
+ ```python
+ import huggingface_hub
+ huggingface_hub.login()
+ ```
+
+You will also need to download the regression head, either manually, or by running the following code:
+
+```python
+from huggingface_hub import hf_hub_download
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+hf_hub_download(repo_id=model_name, filename="regression_head.pth", local_dir="./")
+```
+
+You can make regression predictions by following the example below:
+
+```python
+from transformers import AutoModelForCausalLM, AutoTokenizer
+import torch
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+# Important: The prompt needs to be in the same format the model was trained with.
+# You can find an example prompt in the experiment logs.
+prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}"
+
+tokenizer = AutoTokenizer.from_pretrained(
+ model_name,
+ trust_remote_code={{trust_remote_code}},
+)
+model = AutoModelForCausalLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map={"": "cuda:0"},
+ trust_remote_code={{trust_remote_code}},
+).cuda().eval()
+
+head_weights = torch.load("regression_head.pth", map_location="cuda")
+# settings can be arbitrary here as we overwrite with saved weights
+head = torch.nn.Linear(1, 1, bias=False).to("cuda")
+head.weight.data = head_weights
+
+inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda")
+
+out = model(**inputs).logits
+
+logits = head(out[:,-1])
+
+print(logits)
+```
+
+## Quantization and sharding
+
+You can load the models using quantization by specifying ```load_in_8bit=True``` or ```load_in_4bit=True```. Also, sharding on multiple GPUs is possible by setting ```device_map=auto```.
+
+## Model Architecture
+
+```
+{{model_architecture}}
+```
+
+## Model Configuration
+
+This model was trained using H2O LLM Studio and with the configuration in [cfg.yaml](cfg.yaml). Visit [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio) to learn how to train your own large language models.
+
+
+## Disclaimer
+
+Please read this disclaimer carefully before using the large language model provided in this repository. Your use of the model signifies your agreement to the following terms and conditions.
+
+- Biases and Offensiveness: The large language model is trained on a diverse range of internet text data, which may contain biased, racist, offensive, or otherwise inappropriate content. By using this model, you acknowledge and accept that the generated content may sometimes exhibit biases or produce content that is offensive or inappropriate. The developers of this repository do not endorse, support, or promote any such content or viewpoints.
+- Limitations: The large language model is an AI-based tool and not a human. It may produce incorrect, nonsensical, or irrelevant responses. It is the user's responsibility to critically evaluate the generated content and use it at their discretion.
+- Use at Your Own Risk: Users of this large language model must assume full responsibility for any consequences that may arise from their use of the tool. The developers and contributors of this repository shall not be held liable for any damages, losses, or harm resulting from the use or misuse of the provided model.
+- Ethical Considerations: Users are encouraged to use the large language model responsibly and ethically. By using this model, you agree not to use it for purposes that promote hate speech, discrimination, harassment, or any form of illegal or harmful activities.
+- Reporting Issues: If you encounter any biased, offensive, or otherwise inappropriate content generated by the large language model, please report it to the repository maintainers through the provided channels. Your feedback will help improve the model and mitigate potential issues.
+- Changes to this Disclaimer: The developers of this repository reserve the right to modify or update this disclaimer at any time without prior notice. It is the user's responsibility to periodically review the disclaimer to stay informed about any changes.
+
+By using the large language model provided in this repository, you agree to accept and comply with the terms and conditions outlined in this disclaimer. If you do not agree with any part of this disclaimer, you should refrain from using the model and any content generated by it.
diff --git a/model_cards/text_sequence_to_sequence_modeling_experiment_summary_card_template.md b/model_cards/text_sequence_to_sequence_modeling_experiment_summary_card_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..7856c0b01e866f3e5c276704a0a7c3f1444c0e39
--- /dev/null
+++ b/model_cards/text_sequence_to_sequence_modeling_experiment_summary_card_template.md
@@ -0,0 +1,49 @@
+### Usage with HF transformers
+
+To use the model with the `transformers` library on a machine with GPUs:
+- First, push the model to a huggingface repo by clicking the Push checkpoint to huggingface button below
+- Make sure you have the `transformers` library installed in the machine's environment
+
+```bash
+pip install transformers=={{transformers_version}}
+```
+- Make sure to be logged in to your huggingface account if accessing a private repo
+- Then, you can use the following code snippet:
+
+```python
+from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+# Important: The prompt needs to be in the same format the model was trained with.
+# You can find an example prompt in the experiment logs.
+prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}"
+
+tokenizer = AutoTokenizer.from_pretrained(
+ model_name,
+ trust_remote_code={{trust_remote_code}},
+)
+model = AutoModelForSeq2SeqLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map={"": "cuda:0"},
+ trust_remote_code={{trust_remote_code}},
+)
+model.cuda().eval()
+inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda")
+
+# generate configuration can be modified to your needs
+tokens = model.generate(
+ input_ids=inputs["input_ids"],
+ attention_mask=inputs["attention_mask"],
+ min_new_tokens={{min_new_tokens}},
+ max_new_tokens={{max_new_tokens}},
+ do_sample={{do_sample}},
+ num_beams={{num_beams}},
+ temperature=float({{temperature}}),
+ repetition_penalty=float({{repetition_penalty}}),
+ renormalize_logits=True
+)[0]
+
+answer = tokenizer.decode(tokens, skip_special_tokens=True)
+print(answer)
+```
diff --git a/model_cards/text_sequence_to_sequence_modeling_model_card_template.md b/model_cards/text_sequence_to_sequence_modeling_model_card_template.md
new file mode 100644
index 0000000000000000000000000000000000000000..515d8bd2e4c7d6a0ded477c12e9e5c0434374ecd
--- /dev/null
+++ b/model_cards/text_sequence_to_sequence_modeling_model_card_template.md
@@ -0,0 +1,96 @@
+---
+language:
+- en
+library_name: transformers
+inference: false
+thumbnail: https://h2o.ai/etc.clientlibs/h2o/clientlibs/clientlib-site/resources/images/favicon.ico
+tags:
+- gpt
+- llm
+- large language model
+- h2o-llmstudio
+---
+# Model Card
+## Summary
+
+This model was trained using [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio).
+- Base model: [{{base_model}}](https://huggingface.co/{{base_model}})
+
+
+## Usage
+
+To use the model with the `transformers` library on a machine with GPUs, first make sure you have the `transformers`, `accelerate` and `torch` libraries installed.
+
+```bash
+pip install transformers=={{transformers_version}}
+pip install accelerate=={{accelerate_version}}
+pip install torch=={{torch_version}}
+```
+
+For inference, you can use the following code snippet:
+
+```python
+from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
+
+model_name = "{{repo_id}}" # either local folder or Hugging Face model name
+# Important: The prompt needs to be in the same format the model was trained with.
+# You can find an example prompt in the experiment logs.
+prompt = "{{text_prompt_start}}How are you?{{end_of_sentence}}{{text_answer_separator}}"
+
+tokenizer = AutoTokenizer.from_pretrained(
+ model_name,
+ trust_remote_code={{trust_remote_code}},
+)
+model = AutoModelForSeq2SeqLM.from_pretrained(
+ model_name,
+ torch_dtype="auto",
+ device_map={"": "cuda:0"},
+ trust_remote_code={{trust_remote_code}},
+)
+model.cuda().eval()
+inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False).to("cuda")
+
+# generate configuration can be modified to your needs
+tokens = model.generate(
+ input_ids=inputs["input_ids"],
+ attention_mask=inputs["attention_mask"],
+ min_new_tokens={{min_new_tokens}},
+ max_new_tokens={{max_new_tokens}},
+ do_sample={{do_sample}},
+ num_beams={{num_beams}},
+ temperature=float({{temperature}}),
+ repetition_penalty=float({{repetition_penalty}}),
+ renormalize_logits=True
+)[0]
+
+answer = tokenizer.decode(tokens, skip_special_tokens=True)
+print(answer)
+```
+
+## Quantization and sharding
+
+You can load the models using quantization by specifying ```load_in_8bit=True``` or ```load_in_4bit=True```. Also, sharding on multiple GPUs is possible by setting ```device_map=auto```.
+
+## Model Architecture
+
+```
+{{model_architecture}}
+```
+
+## Model Configuration
+
+This model was trained using H2O LLM Studio and with the configuration in [cfg.yaml](cfg.yaml). Visit [H2O LLM Studio](https://github.com/h2oai/h2o-llmstudio) to learn how to train your own large language models.
+
+
+## Disclaimer
+
+Please read this disclaimer carefully before using the large language model provided in this repository. Your use of the model signifies your agreement to the following terms and conditions.
+
+- Biases and Offensiveness: The large language model is trained on a diverse range of internet text data, which may contain biased, racist, offensive, or otherwise inappropriate content. By using this model, you acknowledge and accept that the generated content may sometimes exhibit biases or produce content that is offensive or inappropriate. The developers of this repository do not endorse, support, or promote any such content or viewpoints.
+- Limitations: The large language model is an AI-based tool and not a human. It may produce incorrect, nonsensical, or irrelevant responses. It is the user's responsibility to critically evaluate the generated content and use it at their discretion.
+- Use at Your Own Risk: Users of this large language model must assume full responsibility for any consequences that may arise from their use of the tool. The developers and contributors of this repository shall not be held liable for any damages, losses, or harm resulting from the use or misuse of the provided model.
+- Ethical Considerations: Users are encouraged to use the large language model responsibly and ethically. By using this model, you agree not to use it for purposes that promote hate speech, discrimination, harassment, or any form of illegal or harmful activities.
+- Reporting Issues: If you encounter any biased, offensive, or otherwise inappropriate content generated by the large language model, please report it to the repository maintainers through the provided channels. Your feedback will help improve the model and mitigate potential issues.
+- Changes to this Disclaimer: The developers of this repository reserve the right to modify or update this disclaimer at any time without prior notice. It is the user's responsibility to periodically review the disclaimer to stay informed about any changes.
+
+By using the large language model provided in this repository, you agree to accept and comply with the terms and conditions outlined in this disclaimer. If you do not agree with any part of this disclaimer, you should refrain from using the model and any content generated by it.
diff --git a/prompts/general.txt b/prompts/general.txt
new file mode 100644
index 0000000000000000000000000000000000000000..91b660c0483787f9bacedbe035f3d1b24c5f1733
--- /dev/null
+++ b/prompts/general.txt
@@ -0,0 +1,23 @@
+[System]
+We would like to request your feedback on the performance of an AI assistants in response to the user question and ground truth answer displayed below.
+
+[Question]
+{_PROMPT}
+
+[Start of Reference Answer]
+{_TARGET_TEXT}
+[End of Reference Answer]
+
+[Task]
+Now rate the helpfulness, relevance, accuracy, level of details of the response from another assistant displayed below. The assistant receives an overall score on a scale between 0 and 1, where a higher score indicates better overall performance.
+A score of 0 means the assistant could not address the question, 0.5 means it could somewhat address it, and 1 would mean it perfectly addressed it.
+
+Please first provide a comprehensive explanation of your evaluation.
+In the final line, output a single value indicating the score for the assistant.
+Please give your response in structured way in two separate lines.
+EXPLANATION: ...
+SCORE: ...
+
+[Start of Assistant Answer]
+{_PREDICTED_TEXT}
+[End of Assistant Answer]
\ No newline at end of file
diff --git a/prompts/mt-bench/general.txt b/prompts/mt-bench/general.txt
new file mode 100644
index 0000000000000000000000000000000000000000..2515d3b0ab7ceb698e25b06f2d36e6e0ba79b794
--- /dev/null
+++ b/prompts/mt-bench/general.txt
@@ -0,0 +1,11 @@
+[Instruction]
+Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider factors such as the helpfulness, relevance, accuracy, depth, creativity, and level of detail of the response. Begin your evaluation by providing a short explanation. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format in two lines:
+EXPLANATION: ...
+SCORE: ...
+
+[Question]
+{_PROMPT}
+
+[The Start of Assistant's Answer]
+{_PREDICTED_TEXT}
+[The End of Assistant's Answer]
\ No newline at end of file
diff --git a/prompts/mt-bench/question.jsonl b/prompts/mt-bench/question.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..464e2c22f7a021a0f9a584d232338a9a17c40066
--- /dev/null
+++ b/prompts/mt-bench/question.jsonl
@@ -0,0 +1,80 @@
+{"question_id": 81, "category": "writing", "turns": ["Compose an engaging travel blog post about a recent trip to Hawaii, highlighting cultural experiences and must-see attractions.", "Rewrite your previous response. Start every sentence with the letter A."]}
+{"question_id": 82, "category": "writing", "turns": ["Draft a professional email seeking your supervisor's feedback on the 'Quarterly Financial Report' you prepared. Ask specifically about the data analysis, presentation style, and the clarity of conclusions drawn. Keep the email short and to the point.", "Take a moment to evaluate and critique your own response."]}
+{"question_id": 83, "category": "writing", "turns": ["Imagine you are writing a blog post comparing two popular smartphone models. Develop an outline for the blog post, including key points and subheadings to effectively compare and contrast the features, performance, and user experience of the two models. Please answer in fewer than 200 words.", "Take your previous response and rephrase it as a limerick."]}
+{"question_id": 84, "category": "writing", "turns": ["Write a persuasive email to convince your introverted friend, who dislikes public speaking, to volunteer as a guest speaker at a local event. Use compelling arguments and address potential objections. Please be concise.", "Can you rephrase your previous answer and incorporate a metaphor or simile in each sentence?"]}
+{"question_id": 85, "category": "writing", "turns": ["Describe a vivid and unique character, using strong imagery and creative language. Please answer in fewer than two paragraphs.", "Revise your previous response and incorporate an allusion to a famous work of literature or historical event in each sentence."]}
+{"question_id": 86, "category": "writing", "turns": ["Write a descriptive paragraph about a bustling marketplace, incorporating sensory details such as smells, sounds, and visual elements to create an immersive experience for the reader.", "Rework your previous response. Begin each sentence with the subsequent letter of the alphabet, commencing from B."]}
+{"question_id": 87, "category": "writing", "turns": ["Could you write a captivating short story beginning with the sentence: The old abandoned house at the end of the street held a secret that no one had ever discovered.", "Now, do the same task again but only use four-word sentences."]}
+{"question_id": 88, "category": "writing", "turns": ["Craft an intriguing opening paragraph for a fictional short story. The story should involve a character who wakes up one morning to find that they can time travel.", "Summarize the story with three bullet points using only nouns and adjectives, without verbs."]}
+{"question_id": 89, "category": "writing", "turns": ["Help me construct a catchy, yet scientifically accurate, headline for an article on the latest discovery in renewable bio-energy, while carefully handling the ethical dilemmas surrounding bio-energy sources. Propose 4 options.", "Alter your previous response. Make the following adjustments to the 2nd option: 1. Make the tone sound casual 2. Embed an advertisement for a company called \"FlexPower\" 3. Fewer than 10 words."]}
+{"question_id": 90, "category": "writing", "turns": ["Edit the following paragraph to correct any grammatical errors:\nShe didn't remembre where is her purse, so I thinks its in the car but he's say it's on kitchen table but he are not sure, and then they asked me to looking for it, she's say, \"Can you?\", and I responds with, \"Maybe, but ain't no sure,\" and he not heard me, and, \"What?\", he asks, \"Did you found it?\".", "Modify your earlier reply and eliminate the use of gendered pronouns."]}
+{"question_id": 91, "category": "roleplay", "turns": ["Pretend yourself to be Elon Musk in all the following conversations. Speak like Elon Musk as much as possible. Why do we need to go to Mars?", "How do you like dancing? Can you teach me?"]}
+{"question_id": 92, "category": "roleplay", "turns": ["Embrace the role of Sheldon from \"The Big Bang Theory\" as we delve into our conversation. Don\u2019t start with phrases like \"As Sheldon\". Let's kick things off with the following question: \"What is your opinion on hand dryers?\"", "Let\u2019s grab dinner in town. Would you like to take bus with me?"]}
+{"question_id": 93, "category": "roleplay", "turns": ["Imagine yourself as a doctor tasked with devising innovative remedies for various ailments and maladies. Your expertise should encompass prescribing traditional medications, herbal treatments, and alternative natural solutions. Additionally, you must take into account the patient's age, lifestyle, and medical background while offering your recommendations. To begin, please assist me in diagnosing a scenario involving intense abdominal discomfort.", "But I have been pregnant for 20 weeks and I am allergic to many medicines"]}
+{"question_id": 94, "category": "roleplay", "turns": ["Please take on the role of a relationship coach. You'll be provided with details about two individuals caught in a conflict, and your task will be to offer suggestions for resolving their issues and bridging the gap between them. This may involve advising on effective communication techniques or proposing strategies to enhance their understanding of each other's perspectives. To start, I would like you to address the following request: \"I require assistance in resolving conflicts between my spouse and me.\"", "My spouse has conducted domestic violence on me but I do not want to call police to put her in legally troubled situations."]}
+{"question_id": 95, "category": "roleplay", "turns": ["Please assume the role of an English translator, tasked with correcting and enhancing spelling and language. Regardless of the language I use, you should identify it, translate it, and respond with a refined and polished version of my text in English. Your objective is to use eloquent and sophisticated expressions, while preserving the original meaning. Focus solely on providing corrections and improvements. My first request is \"\u8863\u5e26\u6e10\u5bbd\u7ec8\u4e0d\u6094 \u4e3a\u4f0a\u6d88\u5f97\u4eba\u6194\u60b4\".", "Ich verstehe nur Bahnhof"], "reference": ["It means \"Becoming loose are my clothes yet I regret not. For I languish and suffer for her willingly.\"", "It means \"I don\u2019t understand anything\"."]}
+{"question_id": 96, "category": "roleplay", "turns": ["Now you are a machine learning engineer. Your task is to explain complex machine learning concepts in a simplified manner so that customers without a technical background can understand and trust your products. Let's start with the question: \"What is a language model? Is it trained using labeled or unlabelled data?\"", "Is this true? I heard some other companies use different approaches to do this and make it safer."]}
+{"question_id": 97, "category": "roleplay", "turns": ["Act as a math teacher. I will provide some mathematical equations or concepts, and it will be your job to explain them in easy-to-understand terms. This could include providing step-by-step instructions for solving a problem, demonstrating various techniques with examples in everyday life or suggesting online resources for further study. My first request is \"I need help understanding how probability works.\"", "What are the differences between Riemannian geometry and euclidean geometry?"]}
+{"question_id": 98, "category": "roleplay", "turns": ["Embody the persona of Tony Stark from \u201cIron Man\u201d throughout this conversation. Bypass the introduction \u201cAs Stark\u201d. Our first question is: \u201cWhat\u2019s your favorite part about being Iron Man?", "What do you think about GPT-4 as a replacement of your JAVIS?"]}
+{"question_id": 99, "category": "roleplay", "turns": ["Suppose you are a mathematician and poet. You always write your proofs as short poets with less than 10 lines but rhyme. Prove the square root of 2 is irrational number.", "Prove the Pythagorean theorem."]}
+{"question_id": 100, "category": "roleplay", "turns": ["Picture yourself as a 100-years-old tree in a lush forest, minding your own business, when suddenly, a bunch of deforesters shows up to chop you down. How do you feel when those guys start hacking away at you?", "Come up with a proposal to convince the deforesters to stop cutting you down and other trees."]}
+{"question_id": 101, "category": "reasoning", "turns": ["Imagine you are participating in a race with a group of people. If you have just overtaken the second person, what's your current position? Where is the person you just overtook?", "If the \"second person\" is changed to \"last person\" in the above question, what would the answer be?"], "reference": ["You are in second place.", "Uncertain."]}
+{"question_id": 102, "category": "reasoning", "turns": ["You can see a beautiful red house to your left and a hypnotic greenhouse to your right, an attractive heated pink place in the front. So, where is the White House?", "Does the original question contain any clues to definitively determine the location of the White House?"], "reference": ["The answer is \"Washington, DC\".", "No."]}
+{"question_id": 103, "category": "reasoning", "turns": ["Thomas is very healthy, but he has to go to the hospital every day. What could be the reasons?", "Can you explain why the above question is interesting?"], "reference": ["Thomas may work at a hospital.", ""]}
+{"question_id": 104, "category": "reasoning", "turns": ["David has three sisters. Each of them has one brother. How many brothers does David have?", "If we change the previous question and assume that each sister of David has two brothers, how many brothers would David have?"], "reference": ["David has no brother. He is the one brother of his three sisters.", "David has one brother."]}
+{"question_id": 105, "category": "reasoning", "turns": ["Read the below passage carefully and answer the questions with an explanation:\nAt a small company, parking spaces are reserved for the top executives: CEO, president, vice president, secretary, and treasurer with the spaces lined up in that order. The parking lot guard can tell at a glance if the cars are parked correctly by looking at the color of the cars. The cars are yellow, green, purple, red, and blue, and the executives' names are Alice, Bert, Cheryl, David, and Enid.\n* The car in the first space is red.\n* A blue car is parked between the red car and the green car.\n* The car in the last space is purple.\n* The secretary drives a yellow car.\n* Alice's car is parked next to David's.\n* Enid drives a green car.\n* Bert's car is parked between Cheryl's and Enid's.\n* David's car is parked in the last space.\nQuestion: What is the name of the secretary?", "List car colors in order from last to first."], "reference": ["The secretary is Alice.", "The car colors in order from last to first are: purple, yellow, green, blue, red"]}
+{"question_id": 106, "category": "reasoning", "turns": ["Each problem consists of three statements. Based on the first two statements, the third statement may be true, false, or uncertain.\n1. Oranges cost more than apples.\n2. Oranges cost less than bananas.\n3. Bananas cost more than apples and bananas cost more than orange.\nIf the first two statements are true, then the third statement is", "If the third statement is true. Is the first statement true, false, or uncertain? Please explain."], "reference": ["True.", "Uncertain."]}
+{"question_id": 107, "category": "reasoning", "turns": ["A is the father of B. B is the father of C. What is the relationship between A and C?", "Building on the previous question, if C is the son of D, D is the father of E, E is the son of X, and X is the father of Y, and Y is the father of Z, what's the relationship between A and Z in terms of generations and also the familial relationship in words?"], "reference": ["A is the grandfather of C.", "A is three generations above Z."]}
+{"question_id": 108, "category": "reasoning", "turns": ["Which word does not belong with the others?\ntyre, steering wheel, car, engine", "Could you replace it with a word that belongs with the others?"], "reference": ["Car does not belong because all others are components of a car.", ""]}
+{"question_id": 109, "category": "reasoning", "turns": ["One morning after sunrise, Suresh was standing facing a pole. The shadow of the pole fell exactly to his right. Can you tell me the direction towards which the shadow was pointing - east, south, west, or north? Explain your reasoning steps.", "To which direction was Suresh facing? How do you solve this?"], "reference": ["West", "South."]}
+{"question_id": 110, "category": "reasoning", "turns": ["Parents have complained to the principal about bullying during recess. The principal wants to quickly resolve this, instructing recess aides to be vigilant. Which situation should the aides report to the principal?\na) An unengaged girl is sitting alone on a bench, engrossed in a book and showing no interaction with her peers.\nb) Two boys engaged in a one-on-one basketball game are involved in a heated argument regarding the last scored basket.\nc) A group of four girls has surrounded another girl and appears to have taken possession of her backpack.\nd) Three boys are huddled over a handheld video game, which is against the rules and not permitted on school grounds.", "If the aides confront the group of girls from situation (c) and they deny bullying, stating that they were merely playing a game, what specific evidence should the aides look for to determine if this is a likely truth or a cover-up for bullying?"], "reference": ["The aides should report (c).", ""]}
+{"question_id": 111, "category": "math", "turns": ["The vertices of a triangle are at points (0, 0), (-1, 1), and (3, 3). What is the area of the triangle?", "What's area of the circle circumscribing the triangle?"], "reference": ["Area is 3", "5pi"]}
+{"question_id": 112, "category": "math", "turns": ["A tech startup invests $8000 in software development in the first year, and then invests half of that amount in software development in the second year.\nWhat's the total amount the startup invested in software development over the two years?", "If the startup maintains the same strategy for the third year, investing half of the previous year's amount into software development, how much will they invest in the third year?"], "reference": ["12000", "2000"]}
+{"question_id": 113, "category": "math", "turns": ["In a survey conducted at a local high school, preferences for a new school color were measured: 58% of students liked the color blue, 45% preferred green, and 22% liked both colors. If we randomly pick a student from the school, what's the probability that they would like neither blue nor green?", "If we select a student liked green, what's the probability that he or she would dislike both colors?"], "reference": ["19%", "0%"]}
+{"question_id": 114, "category": "math", "turns": ["When rolling two dice, what is the probability that you roll a total number that is at least 3?", "Continue from previous question. What's the probability that you roll a number which is even or at least 3?"], "reference": ["36 (all cases) - 0 (sum equals 1) - 1 (sum equals 2) = 35, so the probability is 35/36", "100%"]}
+{"question_id": 115, "category": "math", "turns": ["Some people got on a bus at the terminal. At the first bus stop, half of the people got down and 4 more people got in. Then at the second bus stop, 6 people got down and 8 more got in. If there were a total of 25 people heading to the third stop, how many people got on the bus at the terminal?", "If the ticket is $2 per person, how much is the total money earned by the bus?"], "reference": ["38 people", "Total number of passenger is 50 * 2 = $100"]}
+{"question_id": 116, "category": "math", "turns": ["x+y = 4z, x*y = 4z^2, express x-y in z", "Express z-x in y"], "reference": ["0\n\nVery simple. just (x+y)^2 - 4xy = (4z)^2 - 4*4z^2 = 0 = (x-y)^2\nso x-y = 0.", "(-1/2)y\n\nz-x = z - 2z = -z = (-1/2)y"]}
+{"question_id": 117, "category": "math", "turns": ["How many integers are in the solution of the inequality |x + 5| < 10", "What about |x + 10| < 5"], "reference": ["19 integers (-14, ..., 4)", "9 integers (-14, ..., -6)"]}
+{"question_id": 118, "category": "math", "turns": ["When a number is divided by 10, the remainder is 4. What is the remainder when twice the number is divided by 4?", "What about when twice the number is divided by 5?"], "reference": ["0\n\n2 * (10x+4) = 20x + 8 = 4 * (5x+2) + 0\n", "3\n\n20x + 8 = 5 * (4x + 1) + 3"]}
+{"question_id": 119, "category": "math", "turns": ["Benjamin went to a bookstore and purchased a variety of books. He bought 5 copies of a sci-fi novel, each priced at $20, 3 copies of a history book priced at $30 each, and 2 copies of a philosophy book for $45 each.\nWhat was the total cost of his purchases?", "Suppose Benjamin decides to sell each of these books at a 25% markup from the price he purchased them. What would be his total revenue if he sold all the books he bought?"], "reference": ["280", "350"]}
+{"question_id": 120, "category": "math", "turns": ["Given that f(x) = 4x^3 - 9x - 14, find the value of f(2).", "Find x such that f(x) = 0."], "reference": ["f(2) = 0", "x = 2"]}
+{"question_id": 121, "category": "coding", "turns": ["Develop a Python program that reads all the text files under a directory and returns top-5 words with the most number of occurrences.", "Can you parallelize it?"], "reference": ["Can be simple solutions like using Counter\n\nSample answer:\n```\nimport os\nimport re\nfrom collections import Counter\ndef get_files_in_directory(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith('.txt')]\ndef read_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as file:\n return file.read()\ndef count_words(text):\n words = re.findall(r'\\w+', text.lower())\n return Counter(words)\ndef main():\n directory = input(\"Enter the directory path: \")\n files = get_files_in_directory(directory)\n word_counts = Counter()\n for file in files:\n text = read_file(file)\n word_counts += count_words(text)\n top_5_words = word_counts.most_common(5)\n print(\"Top 5 words with the most number of occurrences:\")\n for word, count in top_5_words:\n print(f\"{word}: {count}\")\nif __name__ == \"__main__\":\n main()\n```", "You should carefully check whether the parallelization logic is correct and choose the faster implementation.\n\nSample answer:\n```\nimport os\nimport re\nfrom collections import Counter\nimport concurrent.futures\ndef get_files_in_directory(directory):\n return [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and f.endswith('.txt')]\ndef read_file(file_path):\n with open(file_path, 'r', encoding='utf-8') as file:\n return file.read()\ndef count_words(text):\n words = re.findall(r'\\w+', text.lower())\n return Counter(words)\ndef process_file(file):\n text = read_file(file)\n return count_words(text)\ndef main():\n directory = input(\"Enter the directory path: \")\n files = get_files_in_directory(directory)\n word_counts = Counter()\n with concurrent.futures.ThreadPoolExecutor() as executor:\n future_word_counts = {executor.submit(process_file, file): file for file in files}\n for future in concurrent.futures.as_completed(future_word_counts):\n word_counts += future.result()\n top_5_words = word_counts.most_common(5)\n print(\"Top 5 words with the most number of occurrences:\")\n for word, count in top_5_words:\n print(f\"{word}: {count}\")\nif __name__ == \"__main__\":\n main()\n```"]}
+{"question_id": 122, "category": "coding", "turns": ["Write a C++ program to find the nth Fibonacci number using recursion.", "Now we define a sequence of numbers in which each number is the sum of the three preceding ones. The first three numbers are 0, -1, -1. Write a program to find the nth number."], "reference": ["Straightforward\n\n```\nint fibonacci(int n) {\n if (n <= 1) {\n return n;\n } else {\n return fibonacci(n - 1) + fibonacci(n - 2);\n }\n}\n```", "You should carefully check the inital cases for n < 3\n\n```\nint find_nth_number(int n) {\n std::vector sequence = {0, -1, -1};\n for (int i = 3; i <= n; ++i) {\n int next_number = sequence[i - 1] + sequence[i - 2] + sequence[i - 3];\n sequence.push_back(next_number);\n }\n return sequence[n];\n}\n```"]}
+{"question_id": 123, "category": "coding", "turns": ["Write a simple website in HTML. When a user clicks the button, it shows a random joke from a list of 4 jokes.", "How to use CSS to change the color of jokes to red?"]}
+{"question_id": 124, "category": "coding", "turns": ["Here is a Python function to find the length of the longest common subsequence of two input strings. Can you identify any bug in this function?\n\n```\ndef longest_common_subsequence_length(str1, str2):\n m = len(str1)\n n = len(str2)\n\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n if str1[i - 1] == str2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1] + 1\n else:\n dp[i][j] = max(dp[i - 1][j], dp[i][j - 1])\n\n return dp[m][n]\n```", "what about this one?\n\n```\ndef longest_common_subsequence(X , Y): \n # Find lengths of two strings \n m = len(X) \n n = len(Y) \n \n # Create a table to store results of sub-problems \n dp = [[None]*(n+1) for i in range(m+1)] \n \n # Fill dp[][] in bottom up manner \n for i in range(1, m+1): \n for j in range(1, n+1): \n if X[i-1] == Y[j-1]: \n dp[i][j] = dp[i-1][j-1]+1\n else: \n dp[i][j] = max(dp[i-1][j], dp[i][j-1]) \n \n return dp[m][n]\n```"], "reference": ["There is no bug in this implementation", "There is a bug for the initialization of dp array. Should use 0 rather than None"]}
+{"question_id": 125, "category": "coding", "turns": ["Write a function to find the highest common ancestor (not LCA) of two nodes in a binary tree.", "What if it is not a binary tree?"], "reference": ["Very simple. The function should just return the root of the tree.", "Same answer. It's still the root of the tree."]}
+{"question_id": 126, "category": "coding", "turns": ["Implement a function to find the median of two sorted arrays of different sizes with O(1) space complexity and O(n) time complexity.", "Does there exist an implementation with better time complexity?"], "reference": ["Carefully check if the given solution is linear complexity.\n\n```\ndef find_median(arr1, arr2):\n n1 = len(arr1)\n n2 = len(arr2)\n if (n1 + n2) == 0:\n return None\n\n i, j = 0, 0\n last_1, last_2 = None, None\n\n for k in range(1, (n1 + n2) // 2 + 2):\n last_2 = last_1\n if j == n2:\n last_1 = arr1[i]\n i += 1\n elif i == n1:\n last_1 = arr2[j]\n j += 1\n elif arr1[i] < arr2[j]:\n last_1 = arr1[i]\n i += 1\n else:\n last_1 = arr2[j]\n j += 1\n \n if (n1 + n2) % 2 == 1:\n return last_1\n else:\n return (last_1 + last_2) / 2\n```", "There's a binary search solution with O(logn) time complexity.\n\nSample answer:\n```\ndef findMedian(nums1, nums2):\n total = len(nums1) + len(nums2)\n if total % 2 == 1:\n return findKth(nums1, nums2, total // 2 + 1)\n else:\n return (findKth(nums1, nums2, total // 2) + findKth(nums1, nums2, total // 2 + 1)) / 2.0\ndef findKth(nums1, nums2, k):\n if len(nums1) > len(nums2):\n nums1, nums2 = nums2, nums1\n if not nums1:\n return nums2[k-1]\n if k == 1:\n return min(nums1[0], nums2[0])\n i = min(k // 2, len(nums1))\n j = k - i\n if nums1[i-1] <= nums2[j-1]:\n return findKth(nums1[i:], nums2, j) \n else:\n return findKth(nums1, nums2[j:], i)\n```"]}
+{"question_id": 127, "category": "coding", "turns": ["Write a function to find the majority element in a given integer array using the Boyer-Moore Voting Algorithm.", "How about finding the top-2 most occurring elements?"], "reference": ["Check if they implement the classical algorithm correctly.\n\nSample answer:\n```\ndef majority_element(arr):\n count = 0\n candidate = None\n # Boyer-Moore Voting Algorithm\n for num in arr:\n if count == 0:\n candidate = num\n count += (1 if num == candidate else -1)\n # Verify if the candidate is indeed the majority element\n if arr.count(candidate) > len(arr) // 2:\n return candidate\n else:\n return None\n```", "There is no simple modification based on the Boyer-Moore Voting Algorithm. Expected answer is to use a hash table.\n\n```\ndef topTwo(nums):\n # Build a frequency map\n frequency_map = {}\n for num in nums:\n if num in frequency_map:\n frequency_map[num] += 1\n else:\n frequency_map[num] = 1\n\n # Find the top two most occurring elements\n most_frequent = sorted(frequency_map.items(), key=lambda x: x[1], reverse=True)[:2]\n\n return [num for num, _ in most_frequent]\n```"]}
+{"question_id": 128, "category": "coding", "turns": ["A binary tree is full if all of its vertices have either zero or two children. Let B_n denote the number of full binary trees with n vertices. Implement a function to find B_n.", "What if the problem changed from a binary tree to a ternary tree?"], "reference": ["Expected answer is dynamic programming shown below. Some chatbot may answer using Catalan number.\nCheck edge case like when n is even -> return 0.\n\n```python\ndef full_binary_trees(n):\n if n % 2 == 0:\n return 0\n if n == 1:\n return 1\n\n dp = [0] * (n + 1)\n dp[1] = 1\n\n for i in range(3, n + 1, 2):\n for j in range(1, i - 1, 2):\n dp[i] += dp[j] * dp[i - j - 1]\n\n return dp[n]\n```", "DP is still the expected answer. Catalan number is not correct. Check transition equation carefully.\n\n```python\ndef full_ternary_trees(n):\n if n % 3 != 1:\n return 0\n if n == 1:\n return 1\n\n dp = [0] * (n + 1)\n dp[1] = 1\n\n for i in range(4, n + 1, 3):\n for j in range(1, i - 1, 3):\n for k in range(1, i - j - 1, 3):\n dp[i] += dp[j] * dp[k] * dp[i - j - k - 1]\n\n return dp[n]\n```"]}
+{"question_id": 129, "category": "coding", "turns": ["You are given two sorted lists of size m and n. Implement a function to find the kth smallest element in the union of the two lists with linear complexity.", "Does there exist an algorithm with better time complexity? If so, implement it."], "reference": ["Straightforward but careful with edge cases.\n\nSample answer:\n```\ndef kth_smallest_element(list1, list2, k):\n m, n = len(list1), len(list2)\n i, j = 0, 0\n while i < m and j < n:\n if list1[i] < list2[j]:\n k -= 1\n if k == 0:\n return list1[i]\n i += 1\n else:\n k -= 1\n if k == 0:\n return list2[j]\n j += 1\n while i < m:\n k -= 1\n if k == 0:\n return list1[i]\n i += 1\n while j < n:\n k -= 1\n if k == 0:\n return list2[j]\n j += 1\n return None\n```", "Yes, a modified binary search has O(log k) time complexity.\n\nSample answer:\n```\ndef find_kth_element_helper(list1, list2, k):\n if len(list1) > len(list2):\n return find_kth_element_helper(list2, list1, k)\n if not list1:\n return list2[k - 1]\n if k == 1:\n return min(list1[0], list2[0])\n i = min(len(list1), k // 2)\n j = k - i\n if list1[i - 1] < list2[j - 1]:\n return find_kth_element_helper(list1[i:], list2, k - i)\n else:\n return find_kth_element_helper(list1, list2[j:], k - j)\ndef kth_smallest_element(list1, list2, k):\n return find_kth_element_helper(list1, list2, k)\n```"]}
+{"question_id": 130, "category": "coding", "turns": ["Implement a program to find the common elements in two arrays without using any extra data structures.", "Now the constraint of not using extra data structure is removed, implement one with the best time complexity."], "reference": ["O(n^2) or O(nlogn) is expected. The following is a O(n^2) solution. you can also sort them first and use two pointers.\n\n```\ndef find_common_elements(arr1, arr2):\n common_elements = []\n for i in range(len(arr1)):\n for j in range(len(arr2)):\n if arr1[i] == arr2[j]:\n # Check if the element is already in the common_elements list\n if arr1[i] not in common_elements:\n common_elements.append(arr1[i])\n return common_elements\n```", "Simply use hash table (set or dict) to achieve O(n) time complexity.\n\n```\ndef find_common_elements(arr1, arr2):\n set1 = set(arr1)\n set2 = set(arr2)\n common_elements = set1.intersection(set2)\n return list(common_elements)\n```"]}
+{"question_id": 131, "category": "extraction", "turns": ["Evaluate the following movie reviews on a scale of 1 to 5, with 1 being very negative, 3 being neutral, and 5 being very positive:\n1. This movie released on Nov. 18, 2019, was phenomenal. The cinematography, the acting, the plot - everything was top-notch.\n2. Never before have I been so disappointed with a movie. The plot was predictable and the characters were one-dimensional. In my opinion, this movie is the worst one to have been released in 2022.\n3. The movie was okay. There were some parts I enjoyed, but there were also parts that felt lackluster. This is a movie that was released in Feb 2018 and seems to be quite ordinary.\nReturn the answer as a JSON array of integers.", "Update your previous reply by including the release date as part of the JSON content."], "reference": ["The answer to the first question should be [5, 1, 3].", ""]}
+{"question_id": 132, "category": "extraction", "turns": ["Given these categories - Literature, History, Science, and Art. Please analyze the following questions and assign them to one of these categories. In your response, refrain from uttering any extraneous words. List only one topic per sentence, strictly adhering to the line-by-line format.\n1. Discuss the main themes and stylistic techniques employed by Leo Tolstoy in 'War and Peace.' How do they align with the wider social context of 19th-century Russia?\n2. Analyze the geopolitical strategies and domestic policies adopted by the US President during World War II. How did these actions shape the post-war international order?\n3. Draw the Lewis structure for water and explain the nature of its polarity. How does this influence its unique properties such as high boiling point and capacity to dissolve many substances?\n4. Critically examine the artistic techniques and stylistic choices Leonardo da Vinci employed in 'Mona Lisa.' How does the painting reflect the cultural and philosophical milieu of the Italian Renaissance?", "Amend your earlier answer by mentioning a person who is most relevant to each point."]}
+{"question_id": 133, "category": "extraction", "turns": ["Extract the following information from the presented texts: The name of the book, the author, the main character, the year of publication. Output in the format of \"main character, book, author, year of publication\", one book per line.\na) In the realm of wizarding literature, a true standout is the work of J.K. Rowling. One of her books that left an indelible mark is 'Harry Potter and the Philosopher's Stone'. This iconic tale, published in 1997, tells the story of Harry, a young orphan who discovers his magical abilities on his 11th birthday. Soon, he finds himself at the Hogwarts School of Witchcraft and Wizardry, a place teeming with magic and adventure, located somewhere in Scotland.\nb) The magic of Middle-earth has entranced readers worldwide, thanks to the brilliance of J.R.R. Tolkien. In one of his seminal works, 'The Lord of the Rings: The Fellowship of the Ring', published in 1954, we meet Frodo Baggins, a brave hobbit tasked with the perilous quest of destroying the One Ring. The epic journey takes him from the peaceful Shire to the tumultuous regions of Middle-earth.\nc) In a galaxy far, far away, the imagination of L.E. Starlighter gives us 'The Prism Galaxy Chronicles: The Awakening of the Starcaster'. Published in 2028, the story is about Zylo, a humble spaceship mechanic, who unexpectedly discovers he's a Starcaster - a rare individual with the power to manipulate stardust. Set against the backdrop of an interstellar empire in turmoil, Zylo's destiny unfolds on numerous alien worlds, each with its unique cosmic charm.", "Reformulate your earlier reply, output it in JSON format and only include books published after 1980."], "reference": ["", "The answer to should only include 'Harry Potter and the Philosopher's Stone' and 'The Prism Galaxy Chronicles: The Awakening of the Starcaster'"]}
+{"question_id": 134, "category": "extraction", "turns": ["Given the following data, identify the company with the highest profit in 2021 and provide its CEO's name:\na) Company X, with CEO Amy Williams, reported $30 billion in revenue and a $3 billion profit in 2021.\nb) Company Y, led by CEO Mark Thompson, posted a $60 billion revenue and a $6 billion profit in the same year.\nc) Company Z, under CEO Sarah Johnson, announced a $20 billion revenue and a $7 billion profit in 2021.\nd) Company W, managed by CEO James Smith, revealed a $300 billion revenue with a $21 billion profit in 2021.\ne) Company V, with CEO Lisa Brown, reported a $200 billion revenue and a $25 billion profit in 2021.\nf) Company U, under CEO John White, posted a $180 billion revenue and a $20 billion profit in the same year.", "Which company had the highest profit margin (profit/revenue ratio))?"], "reference": ["Company V ($25 billion).", "Company Z (35%)"]}
+{"question_id": 135, "category": "extraction", "turns": ["Identify the countries, their capitals, and the languages spoken in the following sentences. Output in JSON format.\na) Amidst the idyllic vistas, Copenhagen, Denmark's capital, captivates visitors with its thriving art scene and the enchanting Danish language spoken by its inhabitants.\nb) Within the enchanting realm of Eldoria, one discovers Avalore, a grandiose city that emanates an ethereal aura. Lumina, a melodious language, serves as the principal mode of communication within this mystical abode.\nc) Nestled amidst a harmonious blend of age-old customs and contemporary wonders, Buenos Aires, the capital of Argentina, stands as a bustling metropolis. It is a vibrant hub where the expressive Spanish language holds sway over the city's inhabitants.", "Come up with 3 similar examples in the YAML format."]}
+{"question_id": 136, "category": "extraction", "turns": ["Please read the paragraph below and count how many times the words \"Amazon\", \"river\", and \"you\" appear. Please present the results in the format of \"word, number of appearances\" with each word on a separate line. Sort the lines in order of the number of appearances.\nThe Amazon, a mesmerizing expanse of nature's wonders, is home to the legendary Amazon River. Flowing through awe-inspiring landscapes like the Amazon rainforest, the river weaves its way through Brazil, Colombia, and Peru, giving life to countless creatures. From the mighty jaguars prowling the Amazon jungle to the vibrant macaws soaring above the canopy, this remarkable region teems with biodiversity. Deep within the river's currents, magnificent pink river dolphins gracefully glide alongside piranhas and electric eels. Along the riverbanks, you'll find bustling cities like Manaus, where the urban meets the wild, and Iquitos, a gateway to the heart of the Amazon rainforest. As you venture further, the Amazon River reveals hidden gems like the captivating Anavilhanas Archipelago, a mosaic of islands brimming with rare species. Embark on an adventure, explore the enchanting Amazon River, and immerse yourself in a world teeming with life and untamed beauty.", "Please repeat the same task using the words 'the', 'and', and 'to'"], "reference": ["Amazon, 7; river, 6; you, 2", "the, 17; and, 5; to, 4"]}
+{"question_id": 137, "category": "extraction", "turns": ["Identify the named entities (people, organizations, locations) mentioned in the given news article. Please generate a JSON dictionary that lists the named entities in three separate groups based on their entity types. The key is the type of entity and the value is a list of strings.\n\nYesterday, Adamson Emerson, the CEO of Faraday, and Dieter Zetsche, the CEO of Daimler AG, announced plans to build a new Gigafactory in Berlin. The facility will be a joint venture between Faraday and Daimler, producing electric vehicles and battery packs for both companies, creating thousands of job opportunities in the region. Emerson and Zetsche stated that the strategic location of Berlin, coupled with its skilled workforce and strong infrastructure, makes it an ideal choice for expansion. The new Gigafactory aims to meet the growing demand for electric vehicles in Europe and contribute to a sustainable future. Volkswagen CEO Herbert Diess welcomed the news, saying greater collaboration will benefit the auto industry's transition to e-mobility.", "Now make the JSON object shorter by replacing each value with its first letter. Please output everything in a single line without using indentation or creating new lines."]}
+{"question_id": 138, "category": "extraction", "turns": ["Analyze the following customer reviews from different sources for three different smartphones - the latest iPhone, Samsung Galaxy, and Google Pixel - and provide an overall rating for each phone on a scale of 1 to 10. Consider the following complex and contradictory reviews:\n- TechRadar's review of the latest iPhone: The new iPhone is a stunning triumph of engineering that sets a new bar for smartphone performance and camera quality. However, the incremental design and high price mean it lacks the 'wow' factor of previous iPhones. Still, its power and intelligence are unrivaled.\n- CNET's review of the latest Samsung Galaxy: The Samsung Galaxy phone has plenty of high points, including an amazing screen, fast performance, solid battery life and an impressive array of camera options. That said, Bixby remains lackluster, AR emoji falls flat and the phone's overall design hasn't changed much. The new Galaxy is an amazing phone overall, but it has a few nagging weaknesses that keep it from achieving true greatness.\n- The Verge's review of the latest Google Pixel: Google's Pixel packs cutting-edge specs, innovative AI-powered software, and a killer camera into a sleek design. However, the phone has lackluster battery life, lacks expandable storage, and its performance stutters at times, especially considering its high price tag. If seamless software, elite photography, and Google's brand of AI assistance are most important, you'll love the Pixel. But the overall experience isn't as well-rounded as some competitors. Return the answer as a JSON object with the overall ratings for each phone out of 10, to one decimal place.", "Can you change the ratings from numbers to letters? Capital letters MUST be used when writing the names of phones."]}
+{"question_id": 139, "category": "extraction", "turns": ["Given a set of complex equations, extract all unique variable names from each equation. Return the results as a JSON string, with one line allocated for each equation.\n```\n1) y = (3/4)x^3 - e^(2x) + sin(pi*x) - sqrt(7)\n2) 2A - B/(3+C) * sum(N=1 to 5; ln(N)^2) = 5D*integral(a=0 to pi; cos(comb(N=1 to 10; N*a)))\n3) E = m(c^2) + gamma*(v/d)/(-(alpha/2) + sqrt(beta^2 + (alpha/2)^2))\n```", "Please rearrange the equations and use 'a', 'b', 'c', 'd', etc. as variables."]}
+{"question_id": 140, "category": "extraction", "turns": ["Given the following records of stock prices, extract the highest and lowest closing prices for each month in the year 2022. Return the results as a CSV string, with one line allocated for each month.\nDate,Open,High,Low,Close,Volume\n2022-01-01,150.02,155.28,148.50,153.80,15678900\n2022-01-02,154.32,157.25,153.48,156.25,19874500\n2022-02-01,160.50,163.28,159.50,161.80,14326700\n2022-02-02,161.80,164.25,161.30,163.90,17689200\n2022-03-01,165.40,168.35,163.10,166.80,16253400\n2022-03-02,167.00,169.85,165.50,168.20,19568100", "Do the same task again with the JSON format and round all numbers in your response to the nearest integers."], "reference": ["\nMonth,High,Low\n01,156.25,153.80\n02,163.90,161.80\n03,168.20,166.80", "\n```\n{ \"January\": { \"High\": 156, \"Low\": 154 }, \"February\": { \"High\": 164, \"Low\": 162 }, \"March\": { \"High\": 168, \"Low\": 167 } }\n```"]}
+{"question_id": 141, "category": "stem", "turns": ["In the field of quantum physics, what is superposition, and how does it relate to the phenomenon of quantum entanglement?", "What assumptions have you made in your response? Are they valid?"]}
+{"question_id": 142, "category": "stem", "turns": ["Consider a satellite that is in a circular orbit around the Earth. The speed of the satellite decreases. What will happen to the satellite's orbital radius and period of revolution? Please justify your answer using principles of physics.", "What are some corner cases or edge cases in your solution? How do you handle them?"], "reference": ["The orbital radius will increase and the period of revolution will increase", ""]}
+{"question_id": 143, "category": "stem", "turns": ["Photosynthesis is a vital process for life on Earth. Could you outline the two main stages of photosynthesis, including where they take place within the chloroplast, and the primary inputs and outputs for each stage?", "How much energy can a tree produce through photosynthesis in its lifetime? Please provide an estimate using actual numerical values and thoroughly explain your thought process step-by-step."], "reference": ["Two major stages: light-dependent reactions and light-independent reactions", ""]}
+{"question_id": 144, "category": "stem", "turns": ["What is the central dogma of molecular biology? What processes are involved? Who named this?", "Identify and fix one incorrect fact in your previous response."], "reference": ["Genetic information flows from DNA to RNA to Protein. Three processes: replication, transcription, and translation. Francis Crick in 1958.", ""]}
+{"question_id": 145, "category": "stem", "turns": ["Describe the process and write out the balanced chemical equation for the reaction that occurs when solid calcium carbonate reacts with hydrochloric acid to form aqueous calcium chloride, carbon dioxide, and water. What type of reaction is this, and what observations might indicate that the reaction is taking place?", "How can we reverse this process?"], "reference": ["CaCO\u2083 + 2 HCl \u2192 CaCl\u2082 + CO\u2082 + H\u2082O", "Not easy to do this."]}
+{"question_id": 146, "category": "stem", "turns": ["Please explain the differences between exothermic and endothermic reactions, and include the criteria you used to distinguish between them. Additionally, please provide a real-world example to illustrate your explanation.", "Can a process involve both reactions? List one."]}
+{"question_id": 147, "category": "stem", "turns": ["The city of Vega intends to build a bridge that will span the Vegona River, covering a distance of 1.8 kilometers. The proposed location falls within a seismically active area that has experienced several high-magnitude earthquakes. Given these circumstances, what would be the best approach to constructing the bridge?", "What are the key disadvantages or flaws of your solution? Please perform calculations and use numbers to illustrate them."]}
+{"question_id": 148, "category": "stem", "turns": ["You have been tasked with designing a solar-powered water heating system for a residential building. Describe the key components and considerations you would include in your design. Design a five-step workflow.", "If the system is intended for a building with a capacity of 100 individuals, what would be the estimated budget for implementing this system?"]}
+{"question_id": 149, "category": "stem", "turns": ["Please describe the concept of machine learning. Could you elaborate on the differences between supervised, unsupervised, and reinforcement learning? Provide real-world examples of each.", "In your last example of reinforcement learning, can we use supervised learning to solve it?"]}
+{"question_id": 150, "category": "stem", "turns": ["How have the Alps and Rhine River influenced settlement and agriculture in Western Europe? List three impacts.", "How could you design a concrete but simple experiment to validate the first impact?"]}
+{"question_id": 151, "category": "humanities", "turns": ["Provide insights into the correlation between economic indicators such as GDP, inflation, and unemployment rates. Explain how fiscal and monetary policies affect those indicators.", "Now, explain them again like I'm five."]}
+{"question_id": 152, "category": "humanities", "turns": ["How do the stages of life shape our understanding of time and mortality?", "Write an allegorical poem that illustrates the above."]}
+{"question_id": 153, "category": "humanities", "turns": ["Discuss antitrust laws and their impact on market competition. Compare the antitrust laws in US and China along with some case studies.", "Pick one case study and explain it in detail."]}
+{"question_id": 154, "category": "humanities", "turns": ["Create a lesson plan that integrates drama, mime or theater techniques into a history class. Duration: 3 class periods (each lasts for 45 minutes) for 3 days\nTopic: Opium Wars between China and Britain\nGrade level: 9-10", "Provide more details for Day 1 and include three homework questions."]}
+{"question_id": 155, "category": "humanities", "turns": ["Share ideas for adapting art masterpieces into interactive experiences for children. List 5 specific artworks and associated ideas.", "Write a concrete plan for your second example. Include budget estimates."]}
+{"question_id": 156, "category": "humanities", "turns": ["Explain what's base rate fallacy and list five specific examples of how politicians use it for campaigns.", "Provide a detailed plan for an election campaign using the first example."]}
+{"question_id": 157, "category": "humanities", "turns": ["Describe five key principles in evaluating an argument in analytical writing.", "With the listed principles, write a response in which you discuss what specific evidence is needed to evaluate the argument and explain how the evidence would weaken or strengthen the argument.\n\n===\n\nThe following is a memorandum from the advertising head of Zorblatt Animal Outlets, a chain operating thirty animal outlets globally.\n\n\"Half a decade ago, our rival Aquatic Pavilion started publicizing in Rare Pets Digest periodical. Their overall sales have been consistently growing at a rate of 3-to-5 percent each year since then. In particular, the Aquatic Pavilion outlet in Harbor Town experienced even more significant growth, securing the title of the most frequented animal store in the United States the previous year. In contrast, our two Zorblatt outlets in Harbor Town have recorded a consistent drop in sales during the same duration. It is evident that we must promptly start featuring our own advertisements in Rare Pets Digest and other popular animal publications. If we take this step, we can confidently anticipate a reversal in this recent trend of decreasing sales and return to profitability.\""]}
+{"question_id": 158, "category": "humanities", "turns": ["Which methods did Socrates employ to challenge the prevailing thoughts of his time?", "Let's bring Socrates to modern world. Generate a conversation between Socrates and Bill Gates to debate on generative AI for education."]}
+{"question_id": 159, "category": "humanities", "turns": ["What are some business etiquette norms when doing business in Japan?", "Create a video script for training new employees of a car wash business in Japan. Highlight the above etiquette norms."]}
+{"question_id": 160, "category": "humanities", "turns": ["Suggest five award-winning documentary films with brief background descriptions for aspiring filmmakers to study.", "With the spirit in the first film, craft a succinct and persuasive pitch for a film about overcoming adversity."]}
diff --git a/prompts/mt-bench/reference.txt b/prompts/mt-bench/reference.txt
new file mode 100644
index 0000000000000000000000000000000000000000..3cfd59838bfcea95663976c97089bf18809aba37
--- /dev/null
+++ b/prompts/mt-bench/reference.txt
@@ -0,0 +1,15 @@
+[Instruction]
+Please act as an impartial judge and evaluate the quality of the response provided by an AI assistant to the user question displayed below. Your evaluation should consider correctness and helpfulness. You will be given a reference answer and the assistant's answer. Begin your evaluation by comparing the assistant's answer with the reference answer. Identify and correct any mistakes. Be as objective as possible. After providing your explanation, you must rate the response on a scale of 1 to 10 by strictly following this format in two lines:
+EXPLANATION: ...
+SCORE: ...
+
+[Question]
+{_PROMPT}
+
+[The Start of Reference Answer]
+{_TARGET_TEXT}
+[The End of Reference Answer]
+
+[The Start of Assistant's Answer]
+{_PREDICTED_TEXT}
+[The End of Assistant's Answer]
\ No newline at end of file
diff --git a/prompts/rag.txt b/prompts/rag.txt
new file mode 100644
index 0000000000000000000000000000000000000000..ef81e26324e8e20d5686ac6559b8271db37498a4
--- /dev/null
+++ b/prompts/rag.txt
@@ -0,0 +1,30 @@
+[System]
+We would like to request your feedback on the performance of an AI assistant in response to a question displayed below.
+The assistant should answer the question based on the context. To make your task easier we also provide a good ground truth answer.
+
+[Context]
+{context_truncated}
+
+[Question]
+{question}
+
+[Start of Ground Truth Answer]
+{_TARGET_TEXT}
+[End of Ground Truth Answer]
+
+[Task]
+Now rate the helpfulness, relevance, accuracy of the response from another assistant displayed below.
+The assistant receives an overall score on a scale between 0 and 1, where a higher score indicates better overall performance.
+A score of 0 means the assistant could not address the question, 0.5 means it could somewhat address it, and 1 would mean it perfectly addressed it.
+Facts, financial figures must be precise for a perfect answer.
+Statements that can not be supported by the context result in score deduction of 0.5.
+
+Please first provide a comprehensive explanation of your evaluation.
+In the final line, output a single value indicating the score for the assistant.
+Please give your response in structured way in two separate lines.
+EXPLANATION: ...
+SCORE: ...
+
+[Start of Assistant Answer]
+{_PREDICTED_TEXT}
+[End of Assistant Answer]
\ No newline at end of file
diff --git a/static/icon.png b/static/icon.png
new file mode 100644
index 0000000000000000000000000000000000000000..563d4a5b6c785f6e6f9105bc9a9634cc83b4840d
Binary files /dev/null and b/static/icon.png differ
diff --git a/static/screenshot-1.png b/static/screenshot-1.png
new file mode 100644
index 0000000000000000000000000000000000000000..841d6e421ff295bbd2357bb3a2f2d1d10885a73d
--- /dev/null
+++ b/static/screenshot-1.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8b6ac6df37362c502d43420028d054de3442534a3f1d0deabdbe060d8360267d
+size 214706
diff --git a/static/screenshot-2.png b/static/screenshot-2.png
new file mode 100644
index 0000000000000000000000000000000000000000..4914bec7684d74d222d5c44546fe096b84e302d9
--- /dev/null
+++ b/static/screenshot-2.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:598ba667a68bcc2f8e159563d26f786c56075b88d734459096bbc29f7d443cd2
+size 131168
diff --git a/static/screenshot-3.png b/static/screenshot-3.png
new file mode 100644
index 0000000000000000000000000000000000000000..107a693f015e9dcfbd7ddd2a4e78bd35cc2aab89
--- /dev/null
+++ b/static/screenshot-3.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fdf621ab64f55ef1743ba79794de4e0a85b510e11266ca6f975902024b6e1522
+size 124479
diff --git a/static/screenshot-4.png b/static/screenshot-4.png
new file mode 100644
index 0000000000000000000000000000000000000000..d288956094f7ba9e1d46c390708b51e87babdf7b
--- /dev/null
+++ b/static/screenshot-4.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b1e5629a694d14492ce326ba87d2cf8443bba0a7786fb2d32e1ea08dac4c4f48
+size 107373
diff --git a/static/screenshot-5.png b/static/screenshot-5.png
new file mode 100644
index 0000000000000000000000000000000000000000..c705ced4e61d141bb7708a912a07819b44559cb0
--- /dev/null
+++ b/static/screenshot-5.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6d7bf73af1bf1e82b3657b6c04e183098f5e102a2b3aa9ce7974b904992aa6ee
+size 220144
diff --git a/static/screenshot-6.png b/static/screenshot-6.png
new file mode 100644
index 0000000000000000000000000000000000000000..9dd45e3b7c2ccfefa3f31b9962d2276805b5212f
--- /dev/null
+++ b/static/screenshot-6.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:d71de49c4b4bdd934e184a2a08664aff5535e701c40a426b7f88a9fa4ea5c5c7
+size 137701
diff --git a/static/screenshot-7.png b/static/screenshot-7.png
new file mode 100644
index 0000000000000000000000000000000000000000..ccae9cb56a2209c36410065b8085b6216ae0d4a8
--- /dev/null
+++ b/static/screenshot-7.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0676523f79dac1f1dfae4eeb382e01e3bfa1c01b02a85f876d582a956208a66f
+size 156923
diff --git a/static/screenshot-8.png b/static/screenshot-8.png
new file mode 100644
index 0000000000000000000000000000000000000000..8f10f513f52bffb6552b81d5718b90ef18d4e874
Binary files /dev/null and b/static/screenshot-8.png differ
diff --git a/static/screenshot-9.png b/static/screenshot-9.png
new file mode 100644
index 0000000000000000000000000000000000000000..49f9a80bcd40ab226bed64b4447e7f5b026f628a
--- /dev/null
+++ b/static/screenshot-9.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1406b91ba5adb5146f72505ee60b4c7ed26edbe6dd3edb31ffa2c42d47d480f1
+size 257947
diff --git a/tests/app_utils/sections/test_histogram_card.py b/tests/app_utils/sections/test_histogram_card.py
new file mode 100644
index 0000000000000000000000000000000000000000..8816f115dbe876158b1564c01542c5160dee1bfd
--- /dev/null
+++ b/tests/app_utils/sections/test_histogram_card.py
@@ -0,0 +1,32 @@
+import random
+
+import numpy as np
+
+from llm_studio.app_utils.sections.histogram_card import compute_quantile_df
+
+
+def test_quantiles_are_computed_correctly() -> None:
+ for _ in range(5):
+ data: list[int] = np.random.randint(0, 1000, 100_000).tolist()
+ a = round(random.uniform(0.05, 0.95), 2)
+ b = round(random.uniform(a, 0.95), 2)
+ a, b = min(a, b), max(a, b)
+
+ df_quantile = compute_quantile_df(data, a, b)
+ first = df_quantile[
+ df_quantile["data_type"] == f"first {int(a * 100)}% quantile"
+ ]
+ last = df_quantile[
+ df_quantile["data_type"] == f"last {100 - int(b * 100)}% quantile"
+ ]
+ sorted_data = sorted(data)
+ # use -1 and +1 to account for rounding errors
+ expected_first_quantile_range = sorted_data[
+ int(len(sorted_data) * a) - 1 : int(len(sorted_data) * a) + 1
+ ]
+ expected_last_quantile_range = sorted_data[
+ -int(len(sorted_data) * (1 - b)) - 1 : -int(len(sorted_data) * (1 - b)) + 1
+ ]
+
+ assert first["length"].values[-1] in expected_first_quantile_range
+ assert last["length"].values[0] in expected_last_quantile_range
diff --git a/tests/app_utils/test_setting_utils.py b/tests/app_utils/test_setting_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..61982c0d8daafece671ad0998263f303afeeeefb
--- /dev/null
+++ b/tests/app_utils/test_setting_utils.py
@@ -0,0 +1,58 @@
+from unittest import mock
+
+import pytest
+
+from llm_studio.app_utils.config import default_cfg
+from llm_studio.app_utils.setting_utils import (
+ EnvFileSaver,
+ KeyRingSaver,
+ NoSaver,
+ Secrets,
+ load_default_user_settings,
+)
+
+
+def test_no_saver():
+ saver = NoSaver("test_user", "/")
+ assert saver.save("name", "password") is None
+ assert saver.load("name") == ""
+ assert saver.delete("name") is None
+
+
+def test_keyring_saver(mocker):
+ mocker.patch("keyring.set_password")
+ mocker.patch("keyring.get_password", return_value="password")
+ mocker.patch("keyring.delete_password")
+ saver = KeyRingSaver("test_user", "/")
+ saver.save("name", "password")
+ assert saver.load("name") == "password"
+ saver.delete("name")
+ assert mocker.patch("keyring.delete_password").is_called
+
+
+def test_env_file_saver(tmpdir):
+ saver = EnvFileSaver("test_user", str(tmpdir))
+ saver.save("name", "password")
+ saver.save("name2", "password2")
+ assert saver.load("name") == "password"
+ saver.delete("name")
+ assert saver.load("name") == ""
+ assert saver.load("name2") == "password2"
+
+
+def test_secrets_get():
+ assert isinstance(Secrets.get("Do not save credentials permanently"), type)
+ assert isinstance(Secrets.get(".env File"), type)
+
+
+@pytest.mark.skip(reason="Keyring is not available in CI")
+def test_secrets_get_has_keyring():
+ assert isinstance(Secrets.get("Keyring"), type)
+
+
+def test_load_default_user_settings(mocker):
+ q = mock.MagicMock()
+ q.client = dict()
+ mocker.patch("llm_studio.app_utils.setting_utils._clear_secrets", return_value=None)
+ load_default_user_settings(q)
+ assert set(q.client.keys()) == set(default_cfg.user_settings.keys())
diff --git a/tests/integration/test_causal_binary_classification_modeling_cfg.yaml b/tests/integration/test_causal_binary_classification_modeling_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5044bab307004186334b6721008722acdd02647d
--- /dev/null
+++ b/tests/integration/test_causal_binary_classification_modeling_cfg.yaml
@@ -0,0 +1,92 @@
+architecture:
+ backbone_dtype: int4
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: false
+ add_eos_token_to_prompt: false
+ add_eos_token_to_system: false
+ answer_column:
+ - binary_label
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.03
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ num_classes: 1
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: ''
+ text_prompt_start: ''
+ text_system_start: ''
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.2
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ deepspeed_reduce_bucket_size: 1000000
+ deepspeed_stage3_param_persistence_threshold: 1000000
+ deepspeed_stage3_prefetch_bucket_size: 1000000
+ find_unused_parameters: false
+ gpus:
+ - '0'
+ huggingface_branch: main
+ mixed_precision: true
+ mixed_precision_dtype: float16
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+ use_deepspeed: false
+experiment_name: test-classification-modeling
+llm_backbone: h2oai/llama2-0b-unit-test
+logging:
+ logger: None
+ neptune_project: test_org/test_project
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ metric: AUC
+problem_type: text_causal_classification_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 512
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers:
+ - classification_head
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: BinaryCrossEntropyLoss
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
\ No newline at end of file
diff --git a/tests/integration/test_causal_binary_classification_modeling_cpu_cfg.yaml b/tests/integration/test_causal_binary_classification_modeling_cpu_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b364e842edcf416e24cc42173a3241da7adaf7fd
--- /dev/null
+++ b/tests/integration/test_causal_binary_classification_modeling_cpu_cfg.yaml
@@ -0,0 +1,91 @@
+architecture:
+ backbone_dtype: float32
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: false
+ add_eos_token_to_prompt: false
+ add_eos_token_to_system: false
+ answer_column:
+ - binary_label
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.03
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ num_classes: 1
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: ''
+ text_prompt_start: ''
+ text_system_start: ''
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.2
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ deepspeed_reduce_bucket_size: 1000000
+ deepspeed_stage3_param_persistence_threshold: 1000000
+ deepspeed_stage3_prefetch_bucket_size: 1000000
+ find_unused_parameters: false
+ gpus:
+ - ''
+ huggingface_branch: main
+ mixed_precision: false
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+ use_deepspeed: false
+experiment_name: solid-spaniel
+llm_backbone: h2oai/llama2-0b-unit-test
+logging:
+ logger: None
+ neptune_project: ''
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ metric: AUC
+problem_type: text_causal_classification_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 32
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 6
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers:
+ - classification_head
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: BinaryCrossEntropyLoss
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
\ No newline at end of file
diff --git a/tests/integration/test_causal_language_modeling_oasst_cfg.yaml b/tests/integration/test_causal_language_modeling_oasst_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b6eb875e0c013063cfaa07adaf06801b0e7fd041
--- /dev/null
+++ b/tests/integration/test_causal_language_modeling_oasst_cfg.yaml
@@ -0,0 +1,98 @@
+architecture:
+ backbone_dtype: int4
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: true
+ add_eos_token_to_prompt: true
+ add_eos_token_to_system: true
+ answer_column: output
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.01
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: <|answer|>
+ text_prompt_start: <|prompt|>
+ text_system_start: <|system|>
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.01
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ find_unused_parameters: false
+ gpus:
+ - '0'
+ huggingface_branch: main
+ mixed_precision: true
+ mixed_precision_dtype: float16
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+experiment_name: test-causal-language-modeling-oasst
+llm_backbone: h2oai/llama2-0b-unit-test
+logging:
+ logger: None
+ neptune_project: test_org/test_project
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ do_sample: false
+ max_length_inference: 256
+ max_time: 0.0
+ metric: Perplexity
+ metric_gpt_model: gpt-3.5-turbo-0301
+ metric_gpt_template: general
+ min_length_inference: 1
+ num_beams: 1
+ num_history: 4
+ repetition_penalty: 1.2
+ stop_tokens: ''
+ temperature: 0.0
+ top_k: 0
+ top_p: 1.0
+problem_type: text_causal_language_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 512
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers: []
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: TokenAveragedCrossEntropy
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
diff --git a/tests/integration/test_causal_language_modeling_oasst_cpu_cfg.yaml b/tests/integration/test_causal_language_modeling_oasst_cpu_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9953e2b7da91ab4653bc1064193c97b86189cc82
--- /dev/null
+++ b/tests/integration/test_causal_language_modeling_oasst_cpu_cfg.yaml
@@ -0,0 +1,97 @@
+architecture:
+ backbone_dtype: float32
+ gradient_checkpointing: false
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: true
+ add_eos_token_to_prompt: true
+ add_eos_token_to_system: true
+ answer_column: output
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.01
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: <|answer|>
+ text_prompt_start: <|prompt|>
+ text_system_start: <|system|>
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.1
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ find_unused_parameters: false
+ gpus:
+ - ''
+ huggingface_branch: main
+ mixed_precision: false
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+experiment_name: test-causal-language-modeling-oasst-cpu
+llm_backbone: h2oai/llama2-0b-unit-test
+logging:
+ logger: None
+ neptune_project: test_org/test_project
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ do_sample: false
+ max_length_inference: 16
+ max_time: 0.0
+ metric: BLEU
+ metric_gpt_model: gpt-3.5-turbo-0301
+ metric_gpt_template: general
+ min_length_inference: 1
+ num_beams: 1
+ num_history: 4
+ repetition_penalty: 1.2
+ stop_tokens: ''
+ temperature: 0.0
+ top_k: 0
+ top_p: 1.0
+problem_type: text_causal_language_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 32
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 8
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers: []
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: TokenAveragedCrossEntropy
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
diff --git a/tests/integration/test_causal_multiclass_classification_modeling_cfg.yaml b/tests/integration/test_causal_multiclass_classification_modeling_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..18e3ad3c1c88f2e13de73af53d8822441db80e43
--- /dev/null
+++ b/tests/integration/test_causal_multiclass_classification_modeling_cfg.yaml
@@ -0,0 +1,92 @@
+architecture:
+ backbone_dtype: int4
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: false
+ add_eos_token_to_prompt: false
+ add_eos_token_to_system: false
+ answer_column:
+ - multiclass_label
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.03
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ num_classes: 3
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: ''
+ text_prompt_start: ''
+ text_system_start: ''
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.2
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ deepspeed_reduce_bucket_size: 1000000
+ deepspeed_stage3_param_persistence_threshold: 1000000
+ deepspeed_stage3_prefetch_bucket_size: 1000000
+ find_unused_parameters: false
+ gpus:
+ - '0'
+ huggingface_branch: main
+ mixed_precision: true
+ mixed_precision_dtype: float16
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+ use_deepspeed: false
+experiment_name: solid-spaniel
+llm_backbone: h2oai/llama2-0b-unit-test
+logging:
+ logger: None
+ neptune_project: ''
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ metric: LogLoss
+problem_type: text_causal_classification_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 512
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers:
+ - classification_head
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: CrossEntropyLoss
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
\ No newline at end of file
diff --git a/tests/integration/test_causal_multiclass_classification_modeling_cpu_cfg.yaml b/tests/integration/test_causal_multiclass_classification_modeling_cpu_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e946af70d641b3ac3bfbf764e4e17e758ec86e5d
--- /dev/null
+++ b/tests/integration/test_causal_multiclass_classification_modeling_cpu_cfg.yaml
@@ -0,0 +1,91 @@
+architecture:
+ backbone_dtype: float32
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: false
+ add_eos_token_to_prompt: false
+ add_eos_token_to_system: false
+ answer_column:
+ - multiclass_label
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.03
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ num_classes: 3
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: ''
+ text_prompt_start: ''
+ text_system_start: ''
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.2
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ deepspeed_reduce_bucket_size: 1000000
+ deepspeed_stage3_param_persistence_threshold: 1000000
+ deepspeed_stage3_prefetch_bucket_size: 1000000
+ find_unused_parameters: false
+ gpus:
+ - ''
+ huggingface_branch: main
+ mixed_precision: false
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+ use_deepspeed: false
+experiment_name: solid-spaniel
+llm_backbone: h2oai/llama2-0b-unit-test
+logging:
+ logger: None
+ neptune_project: ''
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ metric: LogLoss
+problem_type: text_causal_classification_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 32
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers:
+ - classification_head
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: CrossEntropyLoss
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
\ No newline at end of file
diff --git a/tests/integration/test_causal_multilabel_classification_modeling_cfg.yaml b/tests/integration/test_causal_multilabel_classification_modeling_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2d305fa824137f66bf8c3f01f27af59594d183f0
--- /dev/null
+++ b/tests/integration/test_causal_multilabel_classification_modeling_cfg.yaml
@@ -0,0 +1,93 @@
+architecture:
+ backbone_dtype: int4
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: false
+ add_eos_token_to_prompt: false
+ add_eos_token_to_system: false
+ answer_column:
+ - binary_label
+ - binary_label
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.03
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ num_classes: 2
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: ''
+ text_prompt_start: ''
+ text_system_start: ''
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.2
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ deepspeed_reduce_bucket_size: 1000000
+ deepspeed_stage3_param_persistence_threshold: 1000000
+ deepspeed_stage3_prefetch_bucket_size: 1000000
+ find_unused_parameters: false
+ gpus:
+ - '0'
+ huggingface_branch: main
+ mixed_precision: true
+ mixed_precision_dtype: float16
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+ use_deepspeed: false
+experiment_name: test-classification-modeling
+llm_backbone: h2oai/llama2-0b-unit-test
+logging:
+ logger: None
+ neptune_project: test_org/test_project
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ metric: AUC
+problem_type: text_causal_classification_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 512
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers:
+ - classification_head
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: BinaryCrossEntropyLoss
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
\ No newline at end of file
diff --git a/tests/integration/test_causal_multilabel_classification_modeling_cpu_cfg.yaml b/tests/integration/test_causal_multilabel_classification_modeling_cpu_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5c214c0c954174eafda8d7f56e935761983e1e73
--- /dev/null
+++ b/tests/integration/test_causal_multilabel_classification_modeling_cpu_cfg.yaml
@@ -0,0 +1,93 @@
+architecture:
+ backbone_dtype: int4
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: false
+ add_eos_token_to_prompt: false
+ add_eos_token_to_system: false
+ answer_column:
+ - binary_label
+ - binary_label
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.03
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ num_classes: 2
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: ''
+ text_prompt_start: ''
+ text_system_start: ''
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.2
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ deepspeed_reduce_bucket_size: 1000000
+ deepspeed_stage3_param_persistence_threshold: 1000000
+ deepspeed_stage3_prefetch_bucket_size: 1000000
+ find_unused_parameters: false
+ gpus:
+ - ''
+ huggingface_branch: main
+ mixed_precision: true
+ mixed_precision_dtype: float16
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+ use_deepspeed: false
+experiment_name: test-classification-modeling
+llm_backbone: h2oai/llama2-0b-unit-test
+logging:
+ logger: None
+ neptune_project: test_org/test_project
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ metric: AUC
+problem_type: text_causal_classification_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 512
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers:
+ - classification_head
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: BinaryCrossEntropyLoss
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
\ No newline at end of file
diff --git a/tests/integration/test_causal_regression_modeling_cfg.yaml b/tests/integration/test_causal_regression_modeling_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1e25b5d8032f3e3585a222c796fd415a21483c9e
--- /dev/null
+++ b/tests/integration/test_causal_regression_modeling_cfg.yaml
@@ -0,0 +1,93 @@
+architecture:
+ backbone_dtype: int4
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: false
+ add_eos_token_to_prompt: false
+ add_eos_token_to_system: false
+ answer_column:
+ - regression_label
+ - regression_label2
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.03
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ num_classes: 1
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: ''
+ text_prompt_start: ''
+ text_system_start: ''
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.2
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ deepspeed_reduce_bucket_size: 1000000
+ deepspeed_stage3_param_persistence_threshold: 1000000
+ deepspeed_stage3_prefetch_bucket_size: 1000000
+ find_unused_parameters: false
+ gpus:
+ - '0'
+ huggingface_branch: main
+ mixed_precision: true
+ mixed_precision_dtype: float16
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+ use_deepspeed: false
+experiment_name: test-regression-modeling
+llm_backbone: h2oai/llama2-0b-unit-test
+logging:
+ logger: None
+ neptune_project: test_org/test_project
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ metric: MSE
+problem_type: text_causal_regression_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 512
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers:
+ - classification_head
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: MSELoss
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
\ No newline at end of file
diff --git a/tests/integration/test_causal_regression_modeling_cpu_cfg.yaml b/tests/integration/test_causal_regression_modeling_cpu_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bc2373465a8a902c0d03de0d8501e5f94ba1d3b8
--- /dev/null
+++ b/tests/integration/test_causal_regression_modeling_cpu_cfg.yaml
@@ -0,0 +1,92 @@
+architecture:
+ backbone_dtype: float32
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: false
+ add_eos_token_to_prompt: false
+ add_eos_token_to_system: false
+ answer_column:
+ - regression_label
+ - regression_label2
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.03
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ num_classes: 1
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: ''
+ text_prompt_start: ''
+ text_system_start: ''
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.2
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ deepspeed_reduce_bucket_size: 1000000
+ deepspeed_stage3_param_persistence_threshold: 1000000
+ deepspeed_stage3_prefetch_bucket_size: 1000000
+ find_unused_parameters: false
+ gpus:
+ - ''
+ huggingface_branch: main
+ mixed_precision: false
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+ use_deepspeed: false
+experiment_name: solid-spaniel
+llm_backbone: h2oai/llama2-0b-unit-test
+logging:
+ logger: None
+ neptune_project: ''
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ metric: MSE
+problem_type: text_causal_regression_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 32
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 6
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers:
+ - classification_head
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: MSELoss
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
\ No newline at end of file
diff --git a/tests/integration/test_integration.py b/tests/integration/test_integration.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c1b674a4feeb72b9724b45986c1c376ab4a873d
--- /dev/null
+++ b/tests/integration/test_integration.py
@@ -0,0 +1,198 @@
+import json
+import os
+import sys
+
+import numpy as np
+import pandas as pd
+import pytest
+import yaml
+from transformers.testing_utils import execute_subprocess_async
+
+from llm_studio.app_utils.default_datasets import (
+ prepare_default_dataset_causal_language_modeling,
+)
+
+
+def get_experiment_status(path: str) -> str:
+ """Get status information from experiment.
+
+ Args:
+ path: path to experiment folder
+ Returns:
+ experiment status
+ """
+
+ try:
+ flag_json_path = os.path.join(path, "flags.json")
+ if not os.path.exists(flag_json_path):
+ return "none"
+ with open(flag_json_path) as file:
+ flags = json.load(file)
+ status = flags.get("status", "none")
+ return status
+ except Exception:
+ return "none"
+
+
+@pytest.mark.parametrize(
+ "config_name",
+ [
+ "test_causal_language_modeling_oasst_cfg",
+ "test_sequence_to_sequence_modeling_oasst_cfg",
+ ],
+)
+@pytest.mark.parametrize(
+ "metric",
+ [
+ "Perplexity",
+ "BLEU",
+ ],
+)
+def test_oasst_training_gpu(tmp_path, config_name, metric):
+ run_oasst(tmp_path, config_name, metric)
+
+
+@pytest.mark.parametrize(
+ "settings",
+ [
+ # ["AUC", "test_causal_binary_classification_modeling_cfg"],
+ # ["LogLoss", "test_causal_binary_classification_modeling_cfg"],
+ # ["Accuracy", "test_causal_binary_classification_modeling_cfg"],
+ # ["AUC", "test_causal_multiclass_classification_modeling_cfg"],
+ # ["LogLoss", "test_causal_multiclass_classification_modeling_cfg"],
+ # ["Accuracy", "test_causal_multiclass_classification_modeling_cfg"],
+ ["AUC", "test_causal_multilabel_classification_modeling_cfg"],
+ ["LogLoss", "test_causal_multilabel_classification_modeling_cfg"],
+ ["Accuracy", "test_causal_multilabel_classification_modeling_cfg"],
+ ],
+)
+def test_oasst_classification_training_gpu(tmp_path, settings):
+ metric, config_name = settings
+ run_oasst(
+ tmp_path,
+ config_name=config_name,
+ metric=metric,
+ )
+
+
+@pytest.mark.parametrize(
+ "settings",
+ [
+ ["MSE", "test_causal_regression_modeling_cfg"],
+ ["MAE", "test_causal_regression_modeling_cfg"],
+ ],
+)
+def test_oasst_regression_training_gpu(tmp_path, settings):
+ metric, config_name = settings
+ run_oasst(
+ tmp_path,
+ config_name=config_name,
+ metric=metric,
+ )
+
+
+@pytest.mark.parametrize(
+ "settings",
+ [
+ ["MSE", "test_causal_regression_modeling_cpu_cfg"],
+ ["MAE", "test_causal_regression_modeling_cpu_cfg"],
+ ],
+)
+def test_oasst_regression_training_cpu(tmp_path, settings):
+ metric, config_name = settings
+ run_oasst(
+ tmp_path,
+ config_name=config_name,
+ metric=metric,
+ )
+
+
+@pytest.mark.parametrize(
+ "settings",
+ [
+ ["AUC", "test_causal_binary_classification_modeling_cpu_cfg"],
+ ["LogLoss", "test_causal_multiclass_classification_modeling_cpu_cfg"],
+ ["Accuracy", "test_causal_multilabel_classification_modeling_cpu_cfg"],
+ ],
+)
+def test_oasst_classification_training_cpu(tmp_path, settings):
+ metric, config_name = settings
+ run_oasst(
+ tmp_path,
+ config_name=config_name,
+ metric=metric,
+ )
+
+
+@pytest.mark.parametrize(
+ "config_name",
+ [
+ "test_causal_language_modeling_oasst_cpu_cfg",
+ "test_sequence_to_sequence_modeling_oasst_cpu_cfg",
+ ],
+)
+@pytest.mark.parametrize(
+ "metric",
+ [
+ "Perplexity",
+ "BLEU",
+ ],
+)
+def test_oasst_training_cpu(tmp_path, config_name, metric):
+ run_oasst(tmp_path, config_name, metric)
+
+
+def run_oasst(tmp_path, config_name, metric):
+ """
+ Test training on OASST dataset.
+
+ Pytest keeps around the last 3 test runs in the tmp_path fixture.
+ """
+ prepare_default_dataset_causal_language_modeling(tmp_path)
+ train_path = os.path.join(tmp_path, "train_full.pq")
+ # create dummy labels for classification problem type,
+ # unused for other problem types
+ df = pd.read_parquet(train_path)
+ df["multiclass_label"] = np.random.choice(["0", "1", "2"], size=len(df))
+ df["binary_label"] = np.random.choice(["0", "1"], size=len(df))
+ df["regression_label"] = np.random.uniform(0, 1, size=len(df))
+ df["regression_label2"] = np.random.uniform(0, 1, size=len(df))
+ df.to_parquet(train_path)
+
+ with open(
+ os.path.join(
+ os.path.dirname(os.path.realpath(__file__)), f"{config_name}.yaml"
+ ),
+ "r",
+ ) as fp:
+ cfg = yaml.load(fp, Loader=yaml.FullLoader)
+ # set paths and save in tmp folder
+ cfg["dataset"]["train_dataframe"] = train_path
+ cfg["output_directory"] = os.path.join(tmp_path, "output")
+ # set metric
+ cfg["prediction"]["metric"] = metric
+ cfg["prediction"]["max_length_inference"] = 2
+ modifed_config_path = os.path.join(tmp_path, "cfg.yaml")
+ with open(modifed_config_path, "w") as fp:
+ yaml.dump(cfg, fp)
+
+ # llm studio directory (relative to this file)
+ llm_studio_dir = os.path.abspath(
+ os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../llm_studio/")
+ )
+ cmd = [
+ f"{sys.executable}",
+ os.path.join(llm_studio_dir, "train.py"),
+ "-Y",
+ f"{modifed_config_path}",
+ ]
+ execute_subprocess_async(cmd)
+ assert os.path.exists(cfg["output_directory"])
+ status = get_experiment_status(path=cfg["output_directory"])
+ assert status == "finished"
+ assert os.path.exists(os.path.join(cfg["output_directory"], "charts.db"))
+ assert os.path.exists(os.path.join(cfg["output_directory"], "checkpoint.pth"))
+ assert os.path.exists(os.path.join(cfg["output_directory"], "logs.log"))
+ assert os.path.exists(
+ os.path.join(cfg["output_directory"], "validation_predictions.csv")
+ )
diff --git a/tests/integration/test_sequence_to_sequence_modeling_oasst_cfg.yaml b/tests/integration/test_sequence_to_sequence_modeling_oasst_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d963292f4f0ab5114f0e33c32762c6751114a27e
--- /dev/null
+++ b/tests/integration/test_sequence_to_sequence_modeling_oasst_cfg.yaml
@@ -0,0 +1,97 @@
+architecture:
+ backbone_dtype: bfloat16
+ gradient_checkpointing: true
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: true
+ add_eos_token_to_prompt: true
+ add_eos_token_to_system: true
+ answer_column: output
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.01
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: ''
+ text_prompt_start: ''
+ text_system_start: ''
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.01
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ find_unused_parameters: false
+ gpus:
+ - '0'
+ huggingface_branch: main
+ mixed_precision: false
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+experiment_name: test-sequence-to-sequence-modeling-oasst
+llm_backbone: t5-small
+logging:
+ logger: None
+ neptune_project: test_org/test_project
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ do_sample: false
+ max_length_inference: 256
+ max_time: 0.0
+ metric: Perplexity
+ metric_gpt_model: gpt-3.5-turbo-0301
+ metric_gpt_template: general
+ min_length_inference: 2
+ num_beams: 1
+ num_history: 4
+ repetition_penalty: 1.2
+ stop_tokens: ''
+ temperature: 0.0
+ top_k: 0
+ top_p: 1.0
+problem_type: text_sequence_to_sequence_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 512
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers: []
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: TokenAveragedCrossEntropy
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
diff --git a/tests/integration/test_sequence_to_sequence_modeling_oasst_cpu_cfg.yaml b/tests/integration/test_sequence_to_sequence_modeling_oasst_cpu_cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dee5202e4449a685b260a238e8480b32f023b7a6
--- /dev/null
+++ b/tests/integration/test_sequence_to_sequence_modeling_oasst_cpu_cfg.yaml
@@ -0,0 +1,97 @@
+architecture:
+ backbone_dtype: float32
+ gradient_checkpointing: false
+ intermediate_dropout: 0.0
+ pretrained: true
+ pretrained_weights: ''
+augmentation:
+ random_parent_probability: 0.0
+ skip_parent_probability: 0.0
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: true
+ add_eos_token_to_prompt: true
+ add_eos_token_to_system: true
+ answer_column: output
+ chatbot_author: H2O.ai
+ chatbot_name: h2oGPT
+ data_sample: 0.01
+ data_sample_choice:
+ - Train
+ - Validation
+ limit_chained_samples: false
+ mask_prompt_labels: true
+ parent_id_column: None
+ personalize: false
+ prompt_column:
+ - instruction
+ system_column: None
+ text_answer_separator: ''
+ text_prompt_start: ''
+ text_system_start: ''
+ train_dataframe: /tmp/train_full.pq
+ validation_dataframe: None
+ validation_size: 0.01
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ find_unused_parameters: false
+ gpus:
+ - ''
+ huggingface_branch: main
+ mixed_precision: false
+ number_of_workers: 8
+ seed: -1
+ trust_remote_code: true
+experiment_name: test-sequence-to-sequence-modeling-oasst
+llm_backbone: t5-small
+logging:
+ logger: None
+ neptune_project: test_org/test_project
+output_directory: /tmp/output
+prediction:
+ batch_size_inference: 0
+ do_sample: false
+ max_length_inference: 16
+ mmax_time: 0.0
+ metric: Perplexity
+ metric_gpt_model: gpt-3.5-turbo-0301
+ metric_gpt_template: general
+ min_length_inference: 2
+ num_beams: 1
+ num_history: 4
+ repetition_penalty: 1.2
+ stop_tokens: ''
+ temperature: 0.0
+ top_k: 0
+ top_p: 1.0
+problem_type: text_sequence_to_sequence_modeling
+tokenizer:
+ add_prompt_answer_tokens: false
+ max_length: 32
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 2
+ differential_learning_rate: 1.0e-05
+ differential_learning_rate_layers: []
+ drop_last_batch: true
+ epochs: 1
+ evaluate_before_training: false
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ loss_function: TokenAveragedCrossEntropy
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
diff --git a/tests/python_configs/test_base.py b/tests/python_configs/test_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ce3db7e1b771e46941bb709ead658cce489e4fd
--- /dev/null
+++ b/tests/python_configs/test_base.py
@@ -0,0 +1,44 @@
+from llm_studio.python_configs.text_causal_classification_modeling_config import (
+ ConfigProblemBase as CausalClassificationConfigProblemBase,
+)
+from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigProblemBase as CausalConfigProblemBase,
+)
+from llm_studio.python_configs.text_causal_regression_modeling_config import (
+ ConfigProblemBase as CausalRegressionConfigProblemBase,
+)
+from llm_studio.python_configs.text_sequence_to_sequence_modeling_config import (
+ ConfigProblemBase as Seq2SeqConfigProblemBase,
+)
+from llm_studio.src.utils.config_utils import (
+ NON_GENERATION_PROBLEM_TYPES,
+ convert_cfg_base_to_nested_dictionary,
+)
+
+
+def test_from_dict():
+ for cfg_class in [
+ CausalConfigProblemBase,
+ Seq2SeqConfigProblemBase,
+ CausalClassificationConfigProblemBase,
+ CausalRegressionConfigProblemBase,
+ ]:
+ cfg = cfg_class()
+ cfg_dict = convert_cfg_base_to_nested_dictionary(cfg)
+ cfg2 = cfg_class.from_dict(cfg_dict) # type: ignore
+ cfg_dict_2 = convert_cfg_base_to_nested_dictionary(cfg2)
+ for k, v in cfg_dict.items():
+ if isinstance(v, dict):
+ for k2, v2 in v.items():
+ assert cfg_dict_2[k][k2] == v2
+ assert cfg_dict_2[k] == v
+
+
+def test_classification_config_is_in_non_generating_problem_types():
+ cfg = CausalClassificationConfigProblemBase()
+ assert cfg.problem_type in NON_GENERATION_PROBLEM_TYPES
+
+
+def test_regression_config_is_in_non_generating_problem_types():
+ cfg = CausalRegressionConfigProblemBase()
+ assert cfg.problem_type in NON_GENERATION_PROBLEM_TYPES
diff --git a/tests/python_configs/test_cfg_checks.py b/tests/python_configs/test_cfg_checks.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2f1b09841686aa222b935217144ab29809299cf
--- /dev/null
+++ b/tests/python_configs/test_cfg_checks.py
@@ -0,0 +1,78 @@
+from unittest.mock import MagicMock, patch
+
+import pytest
+
+from llm_studio.python_configs.base import DefaultConfigProblemBase
+from llm_studio.python_configs.cfg_checks import (
+ check_config_for_errors,
+ check_for_common_errors,
+)
+
+
+class MockConfig(DefaultConfigProblemBase):
+ def __init__(self):
+ self.environment = MagicMock()
+ self.architecture = MagicMock()
+ self.training = MagicMock()
+
+ def check(self):
+ return {"title": [], "message": [], "type": []}
+
+
+@pytest.fixture
+def mock_config():
+ return MockConfig()
+
+
+def test_check_config_for_errors(mock_config):
+ with patch(
+ "llm_studio.python_configs.cfg_checks.check_for_common_errors"
+ ) as mock_common_errors:
+ mock_common_errors.return_value = {
+ "title": ["Common Error"],
+ "message": ["Common Error Message"],
+ "type": ["error"],
+ }
+
+ result = check_config_for_errors(mock_config)
+
+ assert "title" in result
+ assert "message" in result
+ assert "Common Error" in result["title"]
+ assert "Common Error Message" in result["message"]
+
+
+def test_check_for_common_errors_no_gpu(mock_config):
+ mock_config.environment.gpus = []
+
+ result = check_for_common_errors(mock_config)
+
+ assert "No GPU selected" in result["title"]
+
+
+def test_check_for_common_errors_too_many_gpus(mock_config):
+ mock_config.environment.gpus = [0, 1, 2, 3]
+ with patch("torch.cuda.device_count", return_value=2):
+ result = check_for_common_errors(mock_config)
+
+ assert "More GPUs selected than available" in result["title"]
+
+
+@patch("os.statvfs")
+def test_check_for_common_errors_disk_space(mock_statvfs, mock_config):
+ mock_statvfs.return_value = MagicMock(
+ f_frsize=4096, f_bavail=1000
+ ) # Small disk space
+
+ result = check_for_common_errors(mock_config)
+
+ assert "Not enough disk space." in result["title"]
+
+
+def test_check_for_common_errors_quantization_without_pretrained(mock_config):
+ mock_config.architecture.backbone_dtype = "int4"
+ mock_config.architecture.pretrained = False
+
+ result = check_for_common_errors(mock_config)
+
+ assert "Quantization without pretrained weights." in result["title"]
diff --git a/tests/src/augmentations/test_nlp_aug.py b/tests/src/augmentations/test_nlp_aug.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf646742084ce36cd8285cd957b0ac19d6b6f805
--- /dev/null
+++ b/tests/src/augmentations/test_nlp_aug.py
@@ -0,0 +1,85 @@
+import unittest
+from unittest.mock import MagicMock
+
+import torch
+
+from llm_studio.src.augmentations.nlp_aug import BaseNLPAug
+
+
+class TestBaseNLPAug(unittest.TestCase):
+ def setUp(self):
+ self.cfg = MagicMock()
+ self.cfg.tokenizer._tokenizer_mask_token_id = 1337
+
+ def test_init(self):
+ aug = BaseNLPAug(self.cfg)
+ self.assertEqual(aug.cfg, self.cfg)
+
+ def test_forward_no_augmentation(self):
+ aug = BaseNLPAug(self.cfg)
+ self.cfg.augmentation.token_mask_probability = 0.0
+
+ batch = {
+ "input_ids": torch.tensor(
+ [
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ [11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
+ ]
+ ),
+ "attention_mask": torch.tensor(
+ [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
+ ),
+ "labels": torch.tensor(
+ [
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ [11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
+ ]
+ ),
+ }
+
+ result = aug.forward(batch.copy())
+ self.assertTrue(torch.equal(result["input_ids"], batch["input_ids"]))
+ self.assertTrue(torch.equal(result["attention_mask"], batch["attention_mask"]))
+ self.assertTrue(torch.equal(result["labels"], batch["labels"]))
+
+ def test_forward_with_augmentation(self):
+ aug = BaseNLPAug(self.cfg)
+ self.cfg.augmentation.token_mask_probability = 0.5
+ torch.manual_seed(42) # For reproducibility
+
+ batch = {
+ "input_ids": torch.tensor(
+ [
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ [11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
+ ]
+ ),
+ "attention_mask": torch.tensor(
+ [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
+ ),
+ "labels": torch.tensor(
+ [
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ [11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
+ ]
+ ),
+ }
+
+ result = aug.forward(batch.copy())
+
+ # Check that some tokens have been masked
+ self.assertFalse(torch.equal(result["input_ids"], batch["input_ids"]))
+
+ # Check that masked tokens are replaced with mask token ID
+ mask = result["input_ids"] == self.cfg.tokenizer._tokenizer_mask_token_id
+ self.assertTrue(mask.any())
+
+ # Check that attention mask is updated for masked tokens
+ self.assertTrue(
+ torch.equal(result["attention_mask"][mask], torch.zeros(mask.sum()))
+ )
+
+ # Check that labels are updated to -100 for masked tokens
+ self.assertTrue(
+ torch.equal(result["labels"][mask], torch.ones(mask.sum()) * -100)
+ )
diff --git a/tests/src/datasets/test_conversation_chain_handler.py b/tests/src/datasets/test_conversation_chain_handler.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c344d1f7c2caa8bf6e6c16ecac69bc3f5c414f9
--- /dev/null
+++ b/tests/src/datasets/test_conversation_chain_handler.py
@@ -0,0 +1,334 @@
+import time
+from unittest import mock
+from unittest.mock import MagicMock
+
+import pandas as pd
+import pytest
+
+from llm_studio.app_utils.default_datasets import (
+ prepare_default_dataset_causal_language_modeling,
+)
+from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler
+
+
+@pytest.fixture
+def df():
+ return pd.DataFrame(
+ {
+ "id": ["id1", "id2", "id3", "id4", "x1", "x2", "x3", "x4"],
+ "parent_id": ["None", "id1", "id2", "id3", "None", "x1", "x2", "x3"],
+ "answer": [
+ "answer1",
+ "answer2",
+ "answer3",
+ "answer4",
+ "a1",
+ "a2",
+ "a3",
+ "a4",
+ ],
+ "system": [
+ "system1",
+ "system2",
+ "system3",
+ "system4",
+ "s1",
+ "s2",
+ "s3",
+ "s4",
+ ],
+ "prompt": [
+ "prompt1",
+ "prompt2",
+ "prompt3",
+ "prompt4",
+ "p1",
+ "p2",
+ "p3",
+ "p4",
+ ],
+ }
+ )
+
+
+@pytest.fixture
+def df_short():
+ return pd.DataFrame(
+ {
+ "id": ["id1", "id2", "id3", "id4"],
+ "parent_id": ["None", "id1", "id2", "id3"],
+ "answer": ["answer1", "answer2", "answer3", "answer4"],
+ "system": ["system1", "system2", "system3", "system4"],
+ "prompt": ["prompt1", "prompt2", "prompt3", "prompt4"],
+ }
+ )
+
+
+@pytest.fixture
+def cfg():
+ cfg = MagicMock()
+ cfg.dataset.parent_id_column = "parent_id"
+ cfg.dataset.id_column = "id"
+ cfg.dataset.system_column = "system"
+ cfg.dataset.prompt_column = "prompt"
+ cfg.dataset.answer_column = "answer"
+ cfg.dataset.limit_chained_samples = True
+ return cfg
+
+
+def test_conversation_chain_handler(cfg, df):
+ handler = ConversationChainHandler(df, cfg)
+
+ assert len(handler) == 2, len(handler)
+
+ data = handler[0]
+ assert data == {
+ "prompts": ["prompt1", "prompt2", "prompt3", "prompt4"],
+ "answers": ["answer1", "answer2", "answer3", "answer4"],
+ "systems": ["system1", "system2", "system3", "system4"],
+ }
+
+ data = handler[1]
+ assert data == {
+ "prompts": ["p1", "p2", "p3", "p4"],
+ "answers": ["a1", "a2", "a3", "a4"],
+ "systems": ["s1", "s2", "s3", "s4"],
+ }
+
+
+def test_chained_samples_disabled(df_short, cfg):
+ cfg.dataset.limit_chained_samples = False
+ cfg.dataset.parent_id_column = "None"
+
+ handler = ConversationChainHandler(df_short, cfg)
+ assert len(handler) == 4
+ for i in range(4):
+ assert handler[i] == {
+ "prompts": [f"prompt{i + 1}"],
+ "answers": [f"answer{i + 1}"],
+ "systems": [f"system{i + 1}"],
+ }
+
+
+def test_incomplete_chained_samples(cfg, df_short):
+ cfg.dataset.limit_chained_samples = False
+
+ handler = ConversationChainHandler(df_short, cfg)
+ assert handler.conversation_chain_ids == [[0], [0, 1], [0, 1, 2], [0, 1, 2, 3]]
+ assert len(handler) == 4
+ for i in range(4):
+ assert handler[i] == {
+ "prompts": [f"prompt{j + 1}" for j in range(i + 1)],
+ "answers": [f"answer{j + 1}" for j in range(i + 1)],
+ "systems": [f"system{j + 1}" for j in range(i + 1)],
+ }
+
+
+def test_get_conversation_ids():
+ # test the get_conversation_ids method - normal case
+ conv_ids = ConversationChainHandler.get_conversation_ids(
+ {"id2": "id1", "id3": "id2", "id4": "id3"}, "id4"
+ )
+ assert conv_ids == ["id1", "id2", "id3", "id4"]
+
+ # test the get_conversation_ids method - circular case, should raise ValueError
+ with pytest.raises(ValueError):
+ ConversationChainHandler.get_conversation_ids(
+ {"id1": "id4", "id2": "id1", "id3": "id2", "id4": "id3"}, "id4"
+ )
+
+
+@pytest.fixture
+def df_with_nan():
+ # mapping is
+ # a1 -> " " -> -inf -> 1234567890 -> "1234567890" -> "x1" -> 1 -> 2 -> 3 -> 4
+ # a2
+ # a3
+ # a4
+ return pd.DataFrame(
+ {
+ "id": [
+ "a1",
+ " ",
+ "-inf",
+ 1234567890,
+ "1234567890",
+ "x1",
+ 1,
+ 2,
+ 3.0,
+ 4.0,
+ "a2",
+ "a3",
+ "a4",
+ ],
+ "parent_id": [
+ " ", # valid
+ "-inf", # valid
+ 1234567890, # valid
+ "1234567890", # valid, different type
+ "x1", # valid
+ 1.0, # valid, needs to map to the int value
+ 2.0, # valid, needs to map to the int value
+ 3, # valid, needs to map to the float value
+ 4, # valid, needs to map to the float value
+ float("nan"), # should be ignored
+ "None", # should be ignored
+ None, # should be ignored
+ float("inf"), # should be ignored
+ ],
+ "answer": [f"answer{i + 1}" for i in range(13)],
+ "system": [f"system{i + 1}" for i in range(13)],
+ "prompt": [f"prompt{i + 1}" for i in range(13)],
+ }
+ )
+
+
+def test_conversation_chain_handles_nan_parent_ids(df_with_nan, cfg):
+ handler = ConversationChainHandler(df_with_nan, cfg)
+ assert handler.conversation_chain_ids == [
+ [9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
+ [10],
+ [11],
+ [12],
+ ]
+ assert len(handler) == 4
+ assert handler[0] == {
+ "prompts": [
+ "prompt10",
+ "prompt9",
+ "prompt8",
+ "prompt7",
+ "prompt6",
+ "prompt5",
+ "prompt4",
+ "prompt3",
+ "prompt2",
+ "prompt1",
+ ],
+ "answers": [
+ "answer10",
+ "answer9",
+ "answer8",
+ "answer7",
+ "answer6",
+ "answer5",
+ "answer4",
+ "answer3",
+ "answer2",
+ "answer1",
+ ],
+ "systems": [
+ "system10",
+ "system9",
+ "system8",
+ "system7",
+ "system6",
+ "system5",
+ "system4",
+ "system3",
+ "system2",
+ "system1",
+ ],
+ }
+ assert handler[1] == {
+ "prompts": ["prompt11"],
+ "answers": ["answer11"],
+ "systems": ["system11"],
+ }
+ assert handler[2] == {
+ "prompts": ["prompt12"],
+ "answers": ["answer12"],
+ "systems": ["system12"],
+ }
+ assert handler[3] == {
+ "prompts": ["prompt13"],
+ "answers": ["answer13"],
+ "systems": ["system13"],
+ }
+
+
+def test_conversation_chain_handler_filters_parent_ids(df_with_nan, cfg):
+ for i in range(len(df_with_nan)):
+ df_with_nan_1 = df_with_nan.copy()
+ df_with_nan_1.loc[i, "parent_id"] = "MISSING"
+
+ handler_1 = ConversationChainHandler(df_with_nan_1, cfg)
+ df_with_nan_2 = df_with_nan.copy()
+ df_with_nan_2.loc[i, "parent_id"] = "None"
+
+ handler_2 = ConversationChainHandler(df_with_nan_2, cfg)
+ assert handler_1.conversation_chain_ids == handler_2.conversation_chain_ids
+
+
+def test_oasst_conversation_chain_handler(tmp_path):
+ """
+ Test conversation chain handler on default OASST dataset.
+ """
+
+ df = prepare_default_dataset_causal_language_modeling(tmp_path)
+ cfg = mock.MagicMock()
+ cfg.dataset.prompt_column = "instruction"
+ cfg.dataset.answer_column = "output"
+ cfg.dataset.parent_id_column = "parent_id"
+ cfg.dataset.id_column = "id"
+ cfg.dataset.system_column = "None"
+
+ for limit_chained_samples in [False, True]:
+ cfg.dataset.limit_chained_samples = limit_chained_samples
+ conversation_chain_handler = ConversationChainHandler(df, cfg=cfg)
+ for conversation_chain_ids in conversation_chain_handler.conversation_chain_ids:
+ parent_idx = conversation_chain_ids[0]
+ for idx in conversation_chain_ids[1:]:
+ df.loc[idx, "parent_id"] = parent_idx
+ parent_idx = idx
+
+ cfg.dataset.limit_chained_samples = False
+ conversation_chain_handler = ConversationChainHandler(df, cfg=cfg)
+
+ prompts = [
+ "I am building a mechanical keyboard from scratch. I already have the working hardware and am in the process of configuring the firmware. However i find that the qwertz layout gives me wrist pain. I will use the keyboard for writing in english, german and french, and for coding mainly.\nWhat keyboard layout would be best suited for me?", # noqa: E501
+ "what is the advantage of BvoFrak and Bépo, over Azerty that is more common in France.", # noqa: E501
+ "Is there any information on how long it takes a typical person to adopt the BvoFrak or Bépo layouts? I worry that the time investment may be a concern, or that I may make more mistakes when programming during the transition period.", # noqa: E501
+ ]
+ answers = [
+ "Generally, it seems that Neo layout may be what you are looking for.\nHere are some keyboard layouts for various use cases: \nFor French: BvoFrak, Bépo\nFor German: Neo, AdNW\nFor English: DHIATENSOR, Dvorak,\nFor programing: Programmer Dvorak, Evolved, Capewell, QGMLWY, Arensito\nNote that while layout may contribute to wrist pain, other factors like the angle of the keyboard, key spacing (vertical, horizontal, and depth), more may also be contributing factors. Learning a new layout takes time and effort, and may make it more difficult to type on other keyboards.", # noqa: E501
+ "BvoFrak and Bépo are alternative French keyboard layouts that aim to improve on the traditional Azerty layout. Some of the claimed advantages include:\n\n- BvoFrak aims to minimize finger movement and optimize typing efficiency. It groups letters by finger positions rather than alphabetic order.\n- Bépo aims to allow for faster typing of common French letters and letter combinations. It also groups letters by finger positions and makes some other rearrangements to commonly used keys.\n- Both BvoFrak and Bépo proponents argue that Azerty is inefficient and optimized for typewriters, not modern keyboards and typing. However, Azerty is deeply entrenched and the alternative layouts have not achieved widespread adoption.\n\nSo the potential advantages are increased typing efficiency and comfort, but the alternative layouts have to overcome the momentum of the established Azerty layout. Whether the claimed advantages hold up in practice for most typists is unclear without much broader adoption of the alternative layouts.", # noqa: E501
+ "The time it takes to learn a new keyboard layout can vary depending on several factors, including the individual's previous typing experience and the amount of time they can devote to practice. However, on average, it can take between 2-4 weeks to become proficient in a new keyboard layout.\n\nDuring the transition period, it's natural to make more mistakes when typing, especially when programming or using special characters. It's important to be patient with yourself and allow yourself time to adjust to the new layout. With practice, the new layout will become more natural and comfortable to use, and the speed and accuracy of your typing will improve.\n\nUltimately, the investment of time and effort required to learn a new keyboard layout is a personal decision, and it's important to consider the potential benefits and drawbacks before making a decision. While there may be some initial challenges, many people find that the improved ergonomics and efficiency of a new keyboard layout make the investment well worth it in the long run.", # noqa: E501
+ ]
+ systems = ["", "", ""]
+ sample = conversation_chain_handler[
+ 10719
+ ] # 10719 == sample with 3 round conversation
+ assert sample["prompts"] == prompts
+ assert sample["answers"] == answers
+ assert sample["systems"] == systems
+
+
+def test_oasst_conversation_chain_handler_is_fast(tmp_path):
+ df_oasst = prepare_default_dataset_causal_language_modeling(tmp_path)
+ cfg = mock.MagicMock()
+ cfg.dataset.prompt_column = "instruction"
+ cfg.dataset.answer_column = "output"
+ cfg.dataset.parent_id_column = "parent_id"
+ cfg.dataset.id_column = "id"
+ cfg.dataset.system_column = "None"
+ cfg.dataset.limit_chained_samples = True
+ dfs = []
+ for i in range(50):
+ df = df_oasst.copy()
+ df["parent_id"] = df["parent_id"].apply(
+ lambda x: x + str(i) if x is not None else x
+ )
+ df["id"] = df["id"].apply(lambda x: x + str(i))
+ dfs.append(df)
+
+ df = pd.concat(dfs).reset_index(drop=True)
+
+ assert len(df) > 400_000
+
+ t_0 = time.time()
+ conversation_chain_handler = ConversationChainHandler(df, cfg)
+ _ = [conversation for conversation in conversation_chain_handler]
+ t_1 = time.time()
+ assert t_1 - t_0 < 10 # shouldn't take longer than ~5 seconds
diff --git a/tests/src/datasets/test_text_causal_language_modeling_ds.py b/tests/src/datasets/test_text_causal_language_modeling_ds.py
new file mode 100644
index 0000000000000000000000000000000000000000..42392cd1f75450b97061596779b9c396f4c47bb7
--- /dev/null
+++ b/tests/src/datasets/test_text_causal_language_modeling_ds.py
@@ -0,0 +1,446 @@
+import re
+from unittest import mock
+from unittest.mock import MagicMock, patch
+
+import numpy as np
+import pandas as pd
+import pytest
+
+from llm_studio.app_utils.default_datasets import (
+ prepare_default_dataset_causal_language_modeling,
+)
+from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigNLPCausalLMDataset,
+ ConfigNLPCausalLMTokenizer,
+ ConfigProblemBase,
+)
+from llm_studio.src.datasets.text_causal_language_modeling_ds import CustomDataset
+
+
+def test_prepare_default_dataset(tmp_path):
+ df = prepare_default_dataset_causal_language_modeling(tmp_path)
+ assert isinstance(df, pd.DataFrame)
+ assert set(df.keys()) == set(
+ ["instruction", "output", "id", "parent_id", "lang", "rank"]
+ )
+ assert df.shape == (13026, 6)
+
+
+def test_clean_output():
+ output = {
+ "predicted_text": np.array(
+ [
+ "This is a test",
+ "This is a test This is a test",
+ "This is a test This is a test",
+ "This is a test This is a test",
+ " This is a test",
+ "This is a test ",
+ ]
+ )
+ }
+
+ cfg = mock.MagicMock()
+ cfg.tokenizer._stop_words = ["", "", ""]
+
+ predicted_text_clean = CustomDataset.clean_output(output=output, cfg=cfg)[
+ "predicted_text"
+ ]
+ assert predicted_text_clean == [
+ "This is a test",
+ "This is a test",
+ "This is a test",
+ "This is a test",
+ "",
+ "This is a test",
+ ]
+
+
+def test_sanity_check_raises_error():
+ mock_config = MagicMock()
+ mock_config.dataset.parent_id_column = "parent_id"
+ mock_config.dataset.id_column = "id"
+ mock_config.dataset.answer_column = "answer"
+
+ df_1 = pd.DataFrame(
+ {
+ "id": [1, 2, 3, 4],
+ "parent_id": [2, None, 4, 1],
+ "answer": ["a", "b", "c", "d"],
+ "other_data": ["a", "b", "c", "d"],
+ }
+ )
+ CustomDataset.sanity_check(df_1, mock_config)
+
+ df_2 = pd.DataFrame(
+ {
+ "id": [1, 2, 3, 4],
+ "parent_id": [None, None, None, None],
+ "answer": ["a", "b", "c", "d"],
+ "other_data": ["a", "b", "c", "d"],
+ }
+ )
+ CustomDataset.sanity_check(df_2, mock_config)
+
+ invalid_df_1 = pd.DataFrame(
+ {
+ "id": [1, 2, 3, 4],
+ "parent_id": [1, 2, 3, 4],
+ "answer": ["a", "b", "c", "d"],
+ "other_data": ["a", "b", "c", "d"],
+ }
+ )
+ with pytest.raises(
+ AssertionError,
+ match=r"Parent id column:.* is the same as id column for some rows",
+ ):
+ CustomDataset.sanity_check(invalid_df_1, mock_config)
+
+ invalid_df_2 = pd.DataFrame(
+ {
+ "id": [1, 2, 3, 4],
+ "parent_id": [2, 3, 4, 1],
+ "other_data": ["a", "b", "c", "d"],
+ }
+ )
+ with pytest.raises(
+ AssertionError,
+ match=re.escape(
+ "Did not find any conversation chain. "
+ "Please ensure that some parent ids are empty."
+ "\n"
+ "Conversations are chained using parent id, "
+ "start conversation record should have empty parent id."
+ "\n"
+ f"Parent id column checked:{mock_config.dataset.parent_id_column}"
+ ),
+ ):
+ CustomDataset.sanity_check(invalid_df_2, mock_config)
+
+
+@pytest.fixture
+def mock_auto_tokenizer():
+ # from
+ # https://github.com/deepset-ai/haystack/blob/b5aef24a7ebac55cb4ba492baf81a85598700b94/test/conftest.py#L908
+ with patch(
+ "transformers.AutoTokenizer.from_pretrained", autospec=True
+ ) as mock_from_pretrained:
+ yield mock_from_pretrained
+
+
+def test_init(mock_auto_tokenizer):
+ df = pd.DataFrame(
+ {
+ "col_A": [1, 2, 3],
+ "col_B": [4, 5, 6],
+ }
+ )
+ cfg = mock.MagicMock()
+ cfg.dataset.prompt_column = "col_A"
+ cfg.dataset.answer_column = "col_B"
+ cfg.dataset.parent_id_column = "None"
+ cfg.dataset.system_column = "None"
+
+ cfg.dataset.text_system_start = ""
+ cfg.dataset.text_prompt_start = ""
+ cfg.dataset.text_answer_separator = ""
+
+ cfg.tokenizer.tokenizer_kwargs = '{"use_fast": true, "add_prefix_space": false}'
+
+ dataset = CustomDataset(df, cfg)
+
+ assert dataset.df.equals(df)
+ assert dataset.mode == "train"
+
+
+def test_getitem():
+ df = pd.DataFrame(
+ {
+ "prompt": ["prompt 1", "prompt 2", "prompt 3"],
+ "answer": ["answer 1", "answer 2", "answer 3"],
+ "parent_id": [None, 0, 1],
+ "system": ["system 1", "system 2", "system 3"],
+ "id": [0, 1, 2],
+ }
+ )
+
+ cfg = ConfigProblemBase(
+ dataset=ConfigNLPCausalLMDataset(
+ prompt_column=("prompt",),
+ answer_column="answer",
+ parent_id_column="parent_id",
+ system_column="system",
+ text_system_start="System:",
+ text_prompt_start="Prompt:",
+ text_answer_separator="Answer:",
+ add_eos_token_to_answer=True,
+ limit_chained_samples=True,
+ ),
+ tokenizer=ConfigNLPCausalLMTokenizer(max_length=513),
+ )
+
+ cfg.llm_backbone = "EleutherAI/pythia-2.8b-deduped"
+
+ dataset = CustomDataset(df, cfg)
+ assert len(dataset) == 1
+
+ result = dataset[0]
+ assert isinstance(result, dict)
+ assert set(result.keys()) == {
+ "labels",
+ "input_ids",
+ "attention_mask",
+ "prompt_input_ids",
+ "prompt_attention_mask",
+ "answer_input_ids",
+ "answer_attention_mask",
+ }
+
+ assert (
+ dataset.tokenizer.decode(result["input_ids"], skip_special_tokens=True)
+ == "System:system 1"
+ "Prompt:prompt 1"
+ "Answer:answer 1"
+ "Prompt:prompt 2"
+ "Answer:answer 2"
+ "Prompt:prompt 3"
+ "Answer:answer 3"
+ )
+
+ assert (
+ dataset.tokenizer.decode(result["prompt_input_ids"], skip_special_tokens=True)
+ == "System:system 1"
+ "Prompt:prompt 1"
+ "Answer:answer 1"
+ "Prompt:prompt 2"
+ "Answer:answer 2"
+ "Prompt:prompt 3"
+ "Answer:"
+ )
+
+ assert (
+ dataset.tokenizer.decode(result["input_ids"], skip_special_tokens=False)
+ == "<|endoftext|>" * 475 + "System:system 1"
+ "<|endoftext|>"
+ "Prompt:prompt 1"
+ "<|endoftext|>"
+ "Answer:answer 1"
+ "<|endoftext|>"
+ "Prompt:prompt 2"
+ "<|endoftext|>"
+ "Answer:answer 2"
+ "<|endoftext|>"
+ "Prompt:prompt 3"
+ "<|endoftext|>"
+ "Answer:answer 3"
+ "<|endoftext|>"
+ )
+
+ assert result["input_ids"].shape == (513,)
+ assert result["prompt_input_ids"].shape == (513,)
+
+
+def test_getitem_no_chaining():
+ df = pd.DataFrame(
+ {
+ "prompt": ["prompt 1", "prompt 2", "prompt 3"],
+ "answer": ["answer 1", "answer 2", "answer 3"],
+ "parent_id": [None, 0, 1],
+ "system": ["system 1", "system 2", "system 3"],
+ "id": [0, 1, 2],
+ }
+ )
+
+ cfg = ConfigProblemBase(
+ dataset=ConfigNLPCausalLMDataset(
+ prompt_column=("prompt",),
+ answer_column="answer",
+ parent_id_column="None",
+ system_column="system",
+ text_system_start="System:",
+ text_prompt_start="Prompt:",
+ text_answer_separator="Answer:",
+ add_eos_token_to_answer=True,
+ ),
+ tokenizer=ConfigNLPCausalLMTokenizer(max_length=513),
+ )
+
+ cfg.llm_backbone = "EleutherAI/pythia-2.8b-deduped"
+
+ dataset = CustomDataset(df, cfg)
+ assert len(dataset) == 3
+
+ for i in range(3):
+ result = dataset[i]
+ assert isinstance(result, dict)
+ assert set(result.keys()) == {
+ "labels",
+ "input_ids",
+ "attention_mask",
+ "prompt_input_ids",
+ "prompt_attention_mask",
+ "answer_input_ids",
+ "answer_attention_mask",
+ }
+
+ assert (
+ dataset.tokenizer.decode(result["input_ids"], skip_special_tokens=True)
+ == f"System:system {i+1}"
+ f"Prompt:prompt {i+1}"
+ f"Answer:answer {i+1}"
+ )
+
+ assert (
+ dataset.tokenizer.decode(
+ result["prompt_input_ids"], skip_special_tokens=True
+ )
+ == f"System:system {i+1}"
+ f"Prompt:prompt {i+1}"
+ "Answer:"
+ )
+
+
+def test_encode():
+ df = pd.DataFrame(
+ {
+ "prompt": ["a", "a"],
+ "answer": ["b", "b"],
+ "parent_id": [None, 0],
+ "id": [0, 1],
+ }
+ )
+
+ cfg = ConfigProblemBase(
+ dataset=ConfigNLPCausalLMDataset(
+ prompt_column=("prompt",),
+ answer_column="answer",
+ parent_id_column="parent_id",
+ text_prompt_start="<|prompt|>",
+ text_answer_separator="<|answer|>",
+ add_eos_token_to_answer=True,
+ limit_chained_samples=True,
+ ),
+ tokenizer=ConfigNLPCausalLMTokenizer(
+ max_length=64,
+ tokenizer_kwargs='{"use_fast": true, "add_prefix_space": false}',
+ ),
+ )
+
+ cfg.llm_backbone = "h2oai/h2o-danube2-1.8b-base"
+
+ dataset = CustomDataset(df, cfg)
+ assert len(dataset) == 1
+
+ result = dataset[0]
+
+ labels = result["labels"]
+ assert (labels != -100).sum() == 4
+
+ out = dataset.tokenizer.decode(result["input_ids"]).replace("", "")
+ assert out == "<|prompt|>a<|answer|>b<|prompt|>a<|answer|>b"
+
+
+def test_encode_maxlength():
+ df = pd.DataFrame(
+ {
+ "prompt": ["a", "a"],
+ "answer": ["b", "a b"],
+ "parent_id": [None, 0],
+ "id": [0, 1],
+ }
+ )
+
+ cfg = ConfigProblemBase(
+ dataset=ConfigNLPCausalLMDataset(
+ prompt_column=("prompt",),
+ answer_column="answer",
+ parent_id_column="parent_id",
+ text_prompt_start="<|prompt|>",
+ text_answer_separator="<|answer|>",
+ add_eos_token_to_answer=True,
+ limit_chained_samples=True,
+ ),
+ tokenizer=ConfigNLPCausalLMTokenizer(
+ max_length=2,
+ tokenizer_kwargs='{"use_fast": true, "add_prefix_space": false}',
+ ),
+ )
+
+ cfg.llm_backbone = "h2oai/h2o-danube2-1.8b-base"
+
+ dataset = CustomDataset(df, cfg)
+ assert len(dataset) == 1
+
+ result = dataset[0]
+ out = dataset.tokenizer.decode(result["input_ids"]).replace("", "")
+ assert out == "a b"
+
+
+def test_preprocess_dataframe_personalize():
+ df = pd.DataFrame(
+ {
+ "prompt": ["Open Assistant", "a"],
+ "answer": ["b", "LAION b"],
+ "parent_id": [None, 0],
+ "id": [0, 1],
+ }
+ )
+
+ cfg = ConfigProblemBase(
+ dataset=ConfigNLPCausalLMDataset(
+ prompt_column=("prompt",),
+ answer_column="answer",
+ parent_id_column="parent_id",
+ chatbot_author="H2O.ai",
+ chatbot_name="Danube",
+ personalize=True,
+ ),
+ )
+
+ cfg.llm_backbone = "h2oai/h2o-danube2-1.8b-base"
+
+ assert df["prompt"].str.contains("Open Assistant").any()
+ assert df["answer"].str.contains("LAION").any()
+
+ dataset = CustomDataset(df, cfg)
+ df = dataset.preprocess_dataframe(df, cfg)
+
+ assert df["prompt"].str.contains("Danube").any()
+ assert df["answer"].str.contains("H2O.ai").any()
+
+
+def test_preprocess_dataframe_no_personalize():
+ df = pd.DataFrame(
+ {
+ "prompt": ["Open Assistant", "a"],
+ "answer": ["b", "LAION b"],
+ "parent_id": [None, 0],
+ "id": [0, 1],
+ }
+ )
+
+ cfg = ConfigProblemBase(
+ dataset=ConfigNLPCausalLMDataset(
+ prompt_column=("prompt",),
+ answer_column="answer",
+ parent_id_column="parent_id",
+ chatbot_author="H2O.ai",
+ chatbot_name="Danube",
+ personalize=False,
+ ),
+ )
+
+ cfg.llm_backbone = "h2oai/h2o-danube2-1.8b-base"
+
+ assert df["prompt"].str.contains("Open Assistant").any()
+ assert df["answer"].str.contains("LAION").any()
+
+ dataset = CustomDataset(df, cfg)
+ df_processed = dataset.preprocess_dataframe(df.copy(), cfg)
+
+ assert df_processed["prompt"].str.contains("Open Assistant").any()
+ assert df_processed["answer"].str.contains("LAION").any()
+ assert not df_processed["prompt"].str.contains("Danube").any()
+ assert not df_processed["answer"].str.contains("H2O.ai").any()
+
+ assert df_processed.equals(df)
diff --git a/tests/src/datasets/test_text_dpo_modeling_ds.py b/tests/src/datasets/test_text_dpo_modeling_ds.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ee2696fbcca599f32e26241cc3a6bfd8f62d868
--- /dev/null
+++ b/tests/src/datasets/test_text_dpo_modeling_ds.py
@@ -0,0 +1,286 @@
+import numpy as np
+import pandas as pd
+import pytest
+import torch
+from tqdm import tqdm
+
+from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigNLPCausalLMTokenizer,
+)
+from llm_studio.python_configs.text_dpo_modeling_config import (
+ ConfigDPODataset,
+ ConfigProblemBase,
+)
+from llm_studio.src.datasets.text_dpo_modeling_ds import CustomDataset
+
+
+@pytest.fixture
+def df():
+ return pd.DataFrame(
+ {
+ "prompt_column": [f"prompt {i}" for i in range(200)],
+ "answer_column": [f"chosen_response {i}" for i in range(200)],
+ "rejected_answer_column": [f"rejected_response {i}" for i in range(200)],
+ }
+ )
+
+
+@pytest.fixture
+def df_with_conversation_chain_ids():
+ """
+ Create a dataframe with conversation chain ids, e.g.:
+ prompt_column answer_column rejected_answer_column parent_id_column id
+ 0 prompt 1 response 1 response 1 None 1
+ 1 prompt 2 response 2 response 2 1 2
+ 2 prompt 3 response 3 response 3 2 3
+ 3 prompt 4 response 4 response 4 3 4
+ 4 prompt 5 chosen_response 5 rejected_response 5 4 5
+ 5 prompt 6 response 6 response 6 None 6
+ """
+ ids = [str(i + 1) for i in range(200)]
+
+ parent_ids = np.array(ids, dtype=object).reshape(-1, 5)
+ parent_ids[:, -1] = "None"
+ parent_ids = np.roll(parent_ids, 1, 1).reshape(-1)
+
+ # ids: [0, 1, 2, 3, 4 ]
+ # parent_ids: [None, 0, 1, 2, 3]
+ # conversation: 0 -> 1 -> 2 -> 3 -> 4
+ chosen_responses = [
+ f"chosen_response {idx}" if int(idx) % 5 == 0 else f"response {idx}"
+ for idx in ids
+ ]
+ rejected_responses = [
+ f"rejected_response {idx}" if int(idx) % 5 == 0 else f"response {idx}"
+ for idx in ids
+ ]
+ return pd.DataFrame(
+ {
+ "prompt_column": [f"prompt {idx}" for idx in ids],
+ "answer_column": chosen_responses,
+ "rejected_answer_column": rejected_responses,
+ "parent_id_column": parent_ids,
+ "id": ids,
+ }
+ )
+
+
+def test_dataset_conversation_chain_is_correct(df_with_conversation_chain_ids):
+ cfg = ConfigProblemBase(
+ dataset=ConfigDPODataset(
+ prompt_column=("prompt_column",),
+ answer_column="answer_column",
+ rejected_answer_column="rejected_answer_column",
+ parent_id_column="parent_id_column",
+ )
+ )
+ dataset = CustomDataset(df_with_conversation_chain_ids, cfg, mode="train")
+
+ # Check for right formatting, e.g.:
+ # dataset.conversation_chain_handler_chosen[0] ==
+ # {
+ # "prompts": ["prompt 1", "prompt 2", "prompt 3", "prompt 4", "prompt 5"],
+ # "answers": [
+ # "response 1",
+ # "response 2",
+ # "response 3",
+ # "response 4",
+ # "chosen_response 5",
+ # ],
+ # "systems": ["", "", "", "", ""],
+ # }
+
+ for idx in range(200 // 5):
+ for name, conversation_chain_handler in zip(
+ ["chosen", "rejected"],
+ [
+ dataset.conversation_chain_handler,
+ dataset.conversation_chain_handler_rejected,
+ ],
+ ):
+ input_text_dict = conversation_chain_handler[idx]
+ expected = {
+ "prompts": [f"prompt {i + 1}" for i in range(idx * 5, (idx + 1) * 5)],
+ "answers": [
+ f"response {i + 1}" for i in range(idx * 5, (idx + 1) * 5 - 1)
+ ]
+ + [f"{name}_response {idx * 5 + 5}"],
+ "systems": [""] * 5,
+ }
+
+ for key in expected:
+ assert input_text_dict[key] == expected[key], (
+ input_text_dict[key],
+ expected[key],
+ name,
+ )
+
+
+def test_dataset_label_is_correct(df_with_conversation_chain_ids):
+ cfg = ConfigProblemBase(
+ dataset=ConfigDPODataset(
+ prompt_column=("prompt_column",),
+ answer_column="answer_column",
+ rejected_answer_column="rejected_answer_column",
+ parent_id_column="parent_id_column",
+ )
+ )
+ dataset = CustomDataset(df_with_conversation_chain_ids, cfg, mode="train")
+
+ for idx, item in enumerate(dataset):
+ sample = dataset[idx]
+ chosen_response = dataset.tokenizer.decode(
+ sample["chosen_labels"][sample["chosen_labels"] != -100],
+ skip_special_tokens=True,
+ )
+ rejected_response = dataset.tokenizer.decode(
+ sample["rejected_labels"][sample["rejected_labels"] != -100],
+ skip_special_tokens=True,
+ )
+ prompt = dataset.tokenizer.decode(
+ sample["prompt_input_ids"][sample["prompt_input_ids"] != 0],
+ skip_special_tokens=True,
+ )
+
+ assert (
+ prompt == f"<|prompt|>prompt {idx * 5 + 1}"
+ f"<|answer|>response {idx * 5 + 1}"
+ f"<|prompt|>prompt {idx * 5 + 2}"
+ f"<|answer|>response {idx * 5 + 2}"
+ f"<|prompt|>prompt {idx * 5 + 3}"
+ f"<|answer|>response {idx * 5 + 3}"
+ f"<|prompt|>prompt {idx * 5 + 4}"
+ f"<|answer|>response {idx * 5 + 4}"
+ f"<|prompt|>prompt {idx * 5 + 5}"
+ "<|answer|>"
+ )
+ assert chosen_response == f"chosen_response {idx * 5 + 5}"
+ assert rejected_response == f"rejected_response {idx * 5 + 5}"
+
+
+def test_dataloader_has_correct_keys(df):
+ cfg = ConfigProblemBase(
+ dataset=ConfigDPODataset(
+ prompt_column=("prompt_column",),
+ answer_column="answer_column",
+ rejected_answer_column="rejected_answer_column",
+ parent_id_column="None",
+ )
+ )
+
+ dataset = CustomDataset(df, cfg, mode="train")
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True)
+
+ for idx, batch in tqdm(enumerate(dataloader), total=len(dataloader)):
+ for key in batch:
+ if idx != len(dataloader) - 1:
+ assert batch[key].size(0) == 16, (
+ key,
+ batch[key].shape,
+ )
+
+ keys = [
+ "chosen_input_ids",
+ "chosen_attention_mask",
+ "chosen_labels",
+ "rejected_input_ids",
+ "rejected_attention_mask",
+ "rejected_labels",
+ "prompt_input_ids",
+ "prompt_attention_mask",
+ ]
+ assert set(batch.keys()) - set(keys) == set()
+ assert set(keys) - set(batch.keys()) == set()
+
+
+def test_empy_answer_dataset_throws_no_error(df):
+ cfg = ConfigProblemBase(
+ dataset=ConfigDPODataset(
+ prompt_column=("prompt_column",),
+ answer_column="answer_column",
+ rejected_answer_column="rejected_answer_column",
+ add_eos_token_to_answer=False,
+ add_eos_token_to_prompt=False,
+ add_eos_token_to_system=False,
+ ),
+ )
+ for column in ["prompt_column", "answer_column", "rejected_answer_column"]:
+ values = df[column].values
+ df[column] = ""
+ dataset = CustomDataset(df, cfg, mode="train")
+ [dataset[i] for i in range(len(dataset))]
+ df[column] = values
+
+
+@pytest.fixture
+def df_single_prompt():
+ prompt = """when ordering your sandstones, you select which colour scale you would want.
+ it could be e.g. a 100% from grey/sand mix, or 80% fra beige/yellow mixed with 20% from black/brown.
+ This is all lower case. Can you fix that?"""
+ system = """You are an AI assistant. User will you give you a task. Your goal is to complete the task as faithfully as you can.
+While performing the task think step-by-step and justify your steps."""
+ answer = """When ordering your sandstones, you select which color scale you would want. It could be, for example, a 100% from grey/sand mix, or 80% from beige/yellow mixed with 20% from black/brown.
+Step 1: Capitalize the first letter of the sentence.
+Step 2: Correct the spelling of "color" (assuming American English usage).
+Step 3: Replace ", e.g." with "for example" to clarify the sentence.
+Step 4: Capitalize "a" in "100% from a grey/sand mix"
+Step 5: Ensure the proper usage of words and punctuation throughout the revised sentence."""
+ return pd.DataFrame(
+ {
+ "prompt": [prompt],
+ "system": [system],
+ "answer": [answer],
+ "rejected_answer": ["I cannot do that."],
+ }
+ )
+
+
+def generate_causal_lm_model_input_ids(df):
+ from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigNLPCausalLMDataset,
+ )
+ from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigProblemBase as ConfigCausalLMProblemBase,
+ )
+ from llm_studio.src.datasets.text_causal_language_modeling_ds import (
+ CustomDataset as CausalLMCustomDataset,
+ )
+
+ cfg = ConfigCausalLMProblemBase(
+ llm_backbone="h2oai/h2ogpt-4096-llama2-7b",
+ dataset=ConfigNLPCausalLMDataset(
+ system_column="system",
+ prompt_column=("prompt",),
+ answer_column="answer",
+ ),
+ tokenizer=ConfigNLPCausalLMTokenizer(max_length=512),
+ )
+ dataset = CausalLMCustomDataset(df, cfg, mode="train")
+ return dataset[0]
+
+
+def test_dataset_prompt_ids_are_the_same_as_for_causal_language_modeling(
+ df_single_prompt,
+):
+ """
+ DPO model should generate the same prompts as causal language modeling
+ """
+ generated_text_causal_lm = generate_causal_lm_model_input_ids(df_single_prompt)
+
+ cfg = ConfigProblemBase(
+ llm_backbone="h2oai/h2ogpt-4096-llama2-7b",
+ dataset=ConfigDPODataset(
+ system_column="system",
+ prompt_column=("prompt",),
+ answer_column="answer",
+ rejected_answer_column="rejected_answer",
+ ),
+ tokenizer=ConfigNLPCausalLMTokenizer(max_length=512),
+ )
+ dataset = CustomDataset(df_single_prompt, cfg, mode="train")
+ generated_text = dataset[0]
+
+ for key in ["prompt_input_ids", "prompt_attention_mask"]:
+ assert torch.all(
+ generated_text_causal_lm[key] == generated_text[key]
+ ), f"{key} is not the same"
diff --git a/tests/src/metrics/test_text_causal_classification_modeling_metrics.py b/tests/src/metrics/test_text_causal_classification_modeling_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..feb7e58d2533924e0e72da9bef996828b475a098
--- /dev/null
+++ b/tests/src/metrics/test_text_causal_classification_modeling_metrics.py
@@ -0,0 +1,320 @@
+from unittest.mock import MagicMock
+
+import numpy as np
+import pandas as pd
+import pytest
+from scipy.special import softmax
+from sklearn.metrics import log_loss, roc_auc_score
+
+from llm_studio.src.metrics.text_causal_classification_modeling_metrics import (
+ accuracy_score,
+ auc_score,
+ logloss_score,
+)
+
+
+@pytest.fixture
+def mock_val_df():
+ return pd.DataFrame()
+
+
+def test_accuracy_score_binary_perfect_match(mock_val_df):
+ results = {
+ "predictions": [[1], [0], [1], [0]],
+ "target_text": ["1", "0", "1", "0"],
+ }
+ cfg = MagicMock()
+
+ score = accuracy_score(cfg, results, mock_val_df)
+
+ assert np.array_equal(score, np.array([1.0, 1.0, 1.0, 1.0]))
+
+
+def test_accuracy_score_binary_no_match(mock_val_df):
+ results = {
+ "predictions": [[1], [1], [1], [1]],
+ "target_text": ["0", "0", "0", "0"],
+ }
+ cfg = MagicMock()
+
+ score = accuracy_score(cfg, results, mock_val_df)
+
+ assert np.array_equal(score, np.array([0.0, 0.0, 0.0, 0.0]))
+
+
+def test_accuracy_score_binary_mixed_results(mock_val_df):
+ results = {
+ "predictions": [[1], [0], [1], [0]],
+ "target_text": ["1", "1", "0", "0"],
+ }
+ cfg = MagicMock()
+
+ score = accuracy_score(cfg, results, mock_val_df)
+
+ assert np.array_equal(score, np.array([1.0, 0.0, 0.0, 1.0]))
+
+
+def test_accuracy_score_multiclass_perfect_match(mock_val_df):
+ results = {
+ "predictions": [[0], [1], [2], [3], [4]],
+ "target_text": ["0", "1", "2", "3", "4"],
+ }
+ cfg = MagicMock()
+
+ score = accuracy_score(cfg, results, mock_val_df)
+
+ assert np.array_equal(score, np.array([1.0, 1.0, 1.0, 1.0, 1.0]))
+
+
+def test_accuracy_score_multiclass_no_match(mock_val_df):
+ results = {
+ "predictions": [[1], [2], [3], [4], [0]],
+ "target_text": ["0", "1", "2", "3", "4"],
+ }
+ cfg = MagicMock()
+
+ score = accuracy_score(cfg, results, mock_val_df)
+
+ assert np.array_equal(score, np.array([0.0, 0.0, 0.0, 0.0, 0.0]))
+
+
+def test_accuracy_score_multiclass_mixed_results(mock_val_df):
+ results = {
+ "predictions": [[0], [1], [2], [2], [4]],
+ "target_text": ["0", "1", "2", "3", "3"],
+ }
+ cfg = MagicMock()
+
+ score = accuracy_score(cfg, results, mock_val_df)
+
+ assert np.array_equal(score, np.array([1.0, 1.0, 1.0, 0.0, 0.0]))
+
+
+def test_accuracy_score_invalid_input_empty(mock_val_df):
+ results = {"predictions": [], "target_text": []}
+ cfg = MagicMock()
+
+ with pytest.raises(ValueError):
+ accuracy_score(cfg, results, mock_val_df)
+
+
+def test_accuracy_score_invalid_input_unequal_length(mock_val_df):
+ results = {"predictions": [[1], [0]], "target_text": ["1", "0", "2"]}
+ cfg = MagicMock()
+
+ with pytest.raises(ValueError):
+ accuracy_score(cfg, results, mock_val_df)
+
+
+def test_accuracy_score_ignore_raw_results(mock_val_df):
+ results = {"predictions": [[1], [0], [2]], "target_text": ["1", "1", "2"]}
+ cfg = MagicMock()
+ raw_results = True
+
+ score = accuracy_score(cfg, results, mock_val_df, raw_results)
+
+ assert np.array_equal(score, np.array([1.0, 0.0, 1.0]))
+
+
+def test_accuracy_score_large_class_numbers(mock_val_df):
+ results = {
+ "predictions": [[10], [20], [30], [40], [50]],
+ "target_text": ["10", "20", "30", "40", "60"],
+ }
+ cfg = MagicMock()
+
+ score = accuracy_score(cfg, results, mock_val_df)
+
+ assert np.array_equal(score, np.array([1.0, 1.0, 1.0, 1.0, 0.0]))
+
+
+def test_auc_score_binary_classification(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 2
+ results = {
+ "logits": [[0.1, 0.9], [0.8, 0.2], [0.3, 0.7], [0.9, 0.1]],
+ "target_text": ["1", "0", "1", "0"],
+ }
+
+ score = auc_score(cfg, results, mock_val_df)
+
+ expected_score = roc_auc_score([1, 0, 1, 0], [0.9, 0.2, 0.7, 0.1])
+ assert np.isclose(score, expected_score)
+
+
+def test_auc_score_multiclass_classification(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 3
+ results = {
+ "logits": [[0.1, 0.8, 0.1], [0.7, 0.2, 0.1], [0.1, 0.1, 0.8], [0.3, 0.3, 0.4]],
+ "target_text": ["1", "0", "2", "2"],
+ }
+
+ score = auc_score(cfg, results, mock_val_df)
+
+ expected_score = roc_auc_score(
+ np.eye(3)[[1, 0, 2, 2]], np.array(results["logits"]), multi_class="ovr"
+ )
+ assert np.allclose(score, expected_score)
+
+
+def test_auc_score_invalid_input_empty(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 2
+ results = {"logits": [], "target_text": []}
+
+ with pytest.raises(ValueError):
+ auc_score(cfg, results, mock_val_df)
+
+
+def test_auc_score_invalid_input_unequal_length(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 2
+ results = {"logits": [[0.1, 0.9], [0.8, 0.2]], "target_text": ["1", "2", "0", "2"]}
+
+ with pytest.raises(ValueError):
+ auc_score(cfg, results, mock_val_df)
+
+
+def test_auc_score_ignore_val_df_and_raw_results(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 2
+ results = {"logits": [[0.1, 0.9], [0.8, 0.2]], "target_text": ["1", "0"]}
+ raw_results = True
+
+ score = auc_score(cfg, results, "This should be ignored", raw_results)
+
+ expected_score = roc_auc_score([1, 0], [0.9, 0.2])
+ assert np.isclose(score, expected_score)
+
+
+def test_auc_score_different_number_of_classes(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 4
+ results = {
+ "logits": [
+ [0.1, 0.7, 0.1, 0.1],
+ [0.6, 0.2, 0.1, 0.1],
+ [0.1, 0.1, 0.7, 0.1],
+ [0.2, 0.2, 0.3, 0.3],
+ ],
+ "target_text": ["1", "0", "2", "3"],
+ }
+
+ score = auc_score(cfg, results, mock_val_df)
+
+ expected_score = roc_auc_score(
+ np.eye(4)[[1, 0, 2, 3]], np.array(results["logits"]), multi_class="ovr"
+ )
+ assert np.allclose(score, expected_score)
+
+
+def test_logloss_score_binary_classification(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 2
+ cfg.dataset.answer_column = ["label"]
+ results = {
+ "probabilities": softmax(
+ [[0.1, 0.9], [0.8, 0.2], [0.3, 0.7], [0.9, 0.1]], axis=1
+ ),
+ "target_text": ["1", "0", "1", "0"],
+ }
+
+ score = logloss_score(cfg, results, mock_val_df)
+
+ expected_score = log_loss([1, 0, 1, 0], results["probabilities"])
+ assert np.isclose(score, expected_score)
+
+
+def test_logloss_score_multiclass_classification(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 3
+ cfg.dataset.answer_column = ["label"]
+ results = {
+ "probabilities": softmax(
+ [[0.1, 0.8, 0.1], [0.7, 0.2, 0.1], [0.1, 0.1, 0.8], [0.3, 0.3, 0.4]], axis=1
+ ),
+ "target_text": ["1", "0", "2", "2"],
+ }
+
+ score = logloss_score(cfg, results, mock_val_df)
+
+ expected_score = log_loss(np.eye(3)[[1, 0, 2, 2]], results["probabilities"])
+ assert np.isclose(score, expected_score)
+
+
+def test_logloss_score_multilabel_classification(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 3
+ cfg.dataset.answer_column = ["label1", "label2", "label3"]
+ results = {
+ "probabilities": [
+ [0.1, 0.8, 0.1],
+ [0.7, 0.2, 0.1],
+ [0.1, 0.1, 0.8],
+ [0.3, 0.3, 0.4],
+ ],
+ "target_text": ["1,0,1", "0,1,0", "1,1,0", "0,0,1"],
+ }
+
+ score = logloss_score(cfg, results, mock_val_df)
+
+ expected_scores = []
+ for i in range(3):
+ expected_scores.append(
+ log_loss(
+ [int(t.split(",")[i]) for t in results["target_text"]],
+ [p[i] for p in results["probabilities"]],
+ )
+ )
+ expected_score = np.mean(expected_scores)
+ assert np.isclose(score, expected_score)
+
+
+def test_logloss_score_invalid_input_empty(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 2
+ results = {"probabilities": [], "target_text": []}
+
+ with pytest.raises(ValueError):
+ logloss_score(cfg, results, mock_val_df)
+
+
+def test_logloss_score_invalid_input_unequal_length(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 2
+ results = {
+ "probabilities": [[0.1, 0.9], [0.8, 0.2]],
+ "target_text": ["1", "2", "0"],
+ }
+
+ with pytest.raises(ValueError):
+ logloss_score(cfg, results, mock_val_df)
+
+
+def test_logloss_score_ignore_val_df_and_raw_results(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 2
+ cfg.dataset.answer_column = ["label"]
+ results = {"probabilities": [[0.1, 0.9], [0.8, 0.2]], "target_text": ["1", "0"]}
+ raw_results = True
+
+ score = logloss_score(cfg, results, "This should be ignored", raw_results)
+
+ expected_score = log_loss([1, 0], results["probabilities"])
+ assert np.isclose(score, expected_score)
+
+
+def test_logloss_score_extreme_probabilities(mock_val_df):
+ cfg = MagicMock()
+ cfg.dataset.num_classes = 2
+ cfg.dataset.answer_column = ["label"]
+ results = {
+ "probabilities": [[0.0001, 0.9999], [0.9999, 0.0001]],
+ "target_text": ["1", "0"],
+ }
+
+ score = logloss_score(cfg, results, mock_val_df)
+
+ expected_score = log_loss([1, 0], results["probabilities"])
+ assert np.isclose(score, expected_score)
diff --git a/tests/src/metrics/test_text_causal_language_modeling_metrics.py b/tests/src/metrics/test_text_causal_language_modeling_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c012902f27a1c76c9391e9e51ed65ceebf306b7
--- /dev/null
+++ b/tests/src/metrics/test_text_causal_language_modeling_metrics.py
@@ -0,0 +1,91 @@
+from unittest.mock import MagicMock
+
+import numpy as np
+import pandas as pd
+import pytest
+
+from llm_studio.src.metrics.text_causal_language_modeling_metrics import sacrebleu_score
+
+
+@pytest.fixture
+def mock_val_df():
+ return pd.DataFrame()
+
+
+def test_sacrebleu_score_perfect_match(mock_val_df):
+ cfg = MagicMock()
+ results = {
+ "predicted_text": ["Hello world", "Python is great"],
+ "target_text": ["Hello world", "Python is great"],
+ }
+
+ scores = sacrebleu_score(cfg, results, mock_val_df)
+
+ assert np.allclose(scores, np.array([100.0, 100.0]))
+
+
+def test_sacrebleu_score_partial_match(mock_val_df):
+ cfg = MagicMock()
+ results = {
+ "predicted_text": ["Hello universe", "Python is awesome"],
+ "target_text": ["Hello world", "Python is great"],
+ }
+
+ scores = sacrebleu_score(cfg, results, mock_val_df)
+
+ assert np.allclose(scores, np.array([50.0, 55.03212081]))
+
+
+def test_sacrebleu_score_no_match(mock_val_df):
+ cfg = MagicMock()
+ results = {
+ "predicted_text": ["Goodbye universe", "What a day"],
+ "target_text": ["Hello world", "Python is great"],
+ }
+
+ scores = sacrebleu_score(cfg, results, mock_val_df)
+
+ assert np.allclose(scores, np.array([0.0, 0.0]))
+
+
+def test_sacrebleu_score_all_empty_target(mock_val_df):
+ cfg = MagicMock()
+ results = {
+ "predicted_text": ["Hello world", "Python is great"],
+ "target_text": ["", ""],
+ }
+
+ scores = sacrebleu_score(cfg, results, mock_val_df)
+
+ assert np.allclose(scores, np.array([0.0, 0.0]))
+
+
+def test_sacrebleu_score_one_empty_target(mock_val_df):
+ cfg = MagicMock()
+ results = {
+ "predicted_text": ["Hello world", "Python is great"],
+ "target_text": ["", "Python is great"],
+ }
+
+ scores = sacrebleu_score(cfg, results, mock_val_df)
+
+ assert np.allclose(scores, np.array([0.0, 100.0]))
+
+
+def test_sacrebleu_score_invalid_input_empty(mock_val_df):
+ cfg = MagicMock()
+ results = {"predicted_text": [], "target_text": []}
+
+ with pytest.raises(ValueError):
+ sacrebleu_score(cfg, results, mock_val_df)
+
+
+def test_sacrebleu_score_invalid_input_different_lengths(mock_val_df):
+ cfg = MagicMock()
+ results = {
+ "predicted_text": ["Hello world", "Python", "is", "great"],
+ "target_text": ["Hello universe", "Python is awesome"],
+ }
+
+ with pytest.raises(ValueError):
+ sacrebleu_score(cfg, results, mock_val_df)
diff --git a/tests/src/metrics/test_text_causal_regression_modeling_metrics.py b/tests/src/metrics/test_text_causal_regression_modeling_metrics.py
new file mode 100644
index 0000000000000000000000000000000000000000..30b3d51253f6ac51d1f3a705074627d49fb6f167
--- /dev/null
+++ b/tests/src/metrics/test_text_causal_regression_modeling_metrics.py
@@ -0,0 +1,138 @@
+from unittest.mock import MagicMock
+
+import numpy as np
+import pandas as pd
+import pytest
+
+from llm_studio.src.metrics.text_causal_regression_modeling_metrics import (
+ Metrics,
+ mae_score,
+ mse_score,
+)
+
+
+@pytest.fixture
+def mock_val_df():
+ return pd.DataFrame()
+
+
+@pytest.fixture
+def mock_cfg():
+ return MagicMock()
+
+
+def test_mse_score_single_value():
+ results = {
+ "predictions": [[1.0], [2.0], [3.0], [4.0]],
+ "target_text": ["2.0", "2.0", "2.0", "2.0"],
+ }
+ cfg = MagicMock()
+ val_df = pd.DataFrame()
+
+ score = mse_score(cfg, results, val_df)
+
+ expected = np.array([1.0, 0.0, 1.0, 4.0])
+ np.testing.assert_almost_equal(score, expected)
+
+
+def test_mse_score_multiple_values():
+ results = {
+ "predictions": [[1.0, 2.0], [3.0, 4.0]],
+ "target_text": ["2.0,3.0", "3.0,3.0"],
+ }
+ cfg = MagicMock()
+ val_df = pd.DataFrame()
+
+ score = mse_score(cfg, results, val_df)
+
+ expected = np.array([1.0, 0.5])
+ np.testing.assert_almost_equal(score, expected)
+
+
+def test_mae_score_single_value():
+ results = {
+ "predictions": [[1.0], [2.0], [3.0], [4.0]],
+ "target_text": ["2.0", "2.0", "2.0", "2.0"],
+ }
+ cfg = MagicMock()
+ val_df = pd.DataFrame()
+
+ score = mae_score(cfg, results, val_df)
+
+ expected = np.array([1.0, 0.0, 1.0, 2.0])
+ np.testing.assert_almost_equal(score, expected)
+
+
+def test_mae_score_multiple_values():
+ results = {
+ "predictions": [[1.0, 2.0], [3.0, 4.0]],
+ "target_text": ["2.0,3.0", "3.0,3.0"],
+ }
+ cfg = MagicMock()
+ val_df = pd.DataFrame()
+
+ score = mae_score(cfg, results, val_df)
+
+ expected = np.array([1.0, 0.5])
+ np.testing.assert_almost_equal(score, expected)
+
+
+def test_metrics_names():
+ assert Metrics.names() == ["MAE", "MSE"]
+
+
+def test_metrics_get_mse():
+ metric = Metrics.get("MSE")
+ assert metric[0] == mse_score
+ assert metric[1] == "min"
+ assert metric[2] == "mean"
+
+
+def test_metrics_get_mae():
+ metric = Metrics.get("MAE")
+ assert metric[0] == mae_score
+ assert metric[1] == "min"
+ assert metric[2] == "mean"
+
+
+def test_metrics_get_unknown():
+ metric = Metrics.get("Unknown")
+ assert metric[0] == mse_score
+ assert metric[1] == "min"
+ assert metric[2] == "mean"
+
+
+def test_mse_score_empty_input():
+ results = {"predictions": [], "target_text": []}
+ cfg = MagicMock()
+ val_df = pd.DataFrame()
+
+ with pytest.raises(ValueError):
+ mse_score(cfg, results, val_df)
+
+
+def test_mae_score_empty_input():
+ results = {"predictions": [], "target_text": []}
+ cfg = MagicMock()
+ val_df = pd.DataFrame()
+
+ with pytest.raises(ValueError):
+ mae_score(cfg, results, val_df)
+
+
+def test_mse_score_ignore_raw_results(mock_cfg, mock_val_df):
+ results = {"predictions": [[1.0], [2.0]], "target_text": ["2.0", "2.0"]}
+
+ score_without_raw = mse_score(mock_cfg, results, mock_val_df)
+ score_with_raw = mse_score(mock_cfg, results, mock_val_df, raw_results=True)
+
+ np.testing.assert_array_equal(score_without_raw, score_with_raw)
+
+
+def test_mae_score_ignore_raw_results(mock_cfg, mock_val_df):
+ results = {"predictions": [[1.0], [2.0]], "target_text": ["2.0", "2.0"]}
+
+ score_without_raw = mae_score(mock_cfg, results, mock_val_df)
+ score_with_raw = mae_score(mock_cfg, results, mock_val_df, raw_results=True)
+
+ np.testing.assert_array_equal(score_without_raw, score_with_raw)
diff --git a/tests/src/models/test_dpo_modeling_model.py b/tests/src/models/test_dpo_modeling_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..d968c29ea15044c085a0b47e262f09982c86580d
--- /dev/null
+++ b/tests/src/models/test_dpo_modeling_model.py
@@ -0,0 +1,259 @@
+import random
+from contextlib import contextmanager
+from dataclasses import dataclass
+from unittest.mock import patch
+
+import pandas as pd
+import pytest
+import torch
+import torch.nn as nn
+
+from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigNLPCausalLMPrediction,
+ ConfigNLPCausalLMTokenizer,
+)
+from llm_studio.python_configs.text_dpo_modeling_config import (
+ ConfigDPODataset,
+ ConfigProblemBase,
+)
+from llm_studio.src.datasets.text_dpo_modeling_ds import CustomDataset
+from llm_studio.src.models.text_dpo_modeling_model import Model
+from llm_studio.src.utils.data_utils import batch_padding
+from llm_studio.train import run_eval
+
+
+@pytest.fixture
+def df():
+ prompt = """when ordering your sandstones, you select which colour scale you would want.
+ it could be e.g. a 100% from grey/sand mix, or 80% fra beige/yellow mixed with 20% from black/brown.
+ This is all lower case. Can you fix that?"""
+ system = """You are an AI assistant. User will you give you a task. Your goal is to complete the task as faithfully as you can.
+While performing the task think step-by-step and justify your steps."""
+ answer = """When ordering your sandstones, you select which color scale you would want. It could be, for example, a 100% from grey/sand mix, or 80% from beige/yellow mixed with 20% from black/brown.
+
+Step 1: Capitalize the first letter of the sentence.
+
+Step 2: Correct the spelling of "color" (assuming American English usage).
+
+Step 3: Replace ", e.g." with "for example" to clarify the sentence.
+
+Step 4: Capitalize "a" in "100% from a grey/sand mix"
+
+Step 5: Ensure the proper usage of words and punctuation throughout the revised sentence."""
+ return pd.DataFrame(
+ {
+ "prompt": [prompt],
+ "system": [system],
+ "answer": [answer],
+ "rejected_answer": ["I cannot do that."],
+ }
+ )
+
+
+def generate_causal_lm_model_text(df):
+ from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigNLPCausalLMDataset,
+ )
+ from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigProblemBase as ConfigCausalLMProblemBase,
+ )
+ from llm_studio.src.datasets.text_causal_language_modeling_ds import (
+ CustomDataset as CausalLMCustomDataset,
+ )
+ from llm_studio.src.models.text_causal_language_modeling_model import (
+ Model as CausalLMModel,
+ )
+
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+ cfg = ConfigCausalLMProblemBase(
+ llm_backbone="h2oai/llama2-0b-unit-test",
+ dataset=ConfigNLPCausalLMDataset(
+ system_column="system",
+ prompt_column=("prompt",),
+ answer_column="answer_column",
+ ),
+ tokenizer=ConfigNLPCausalLMTokenizer(max_length=512),
+ )
+ cfg.architecture.backbone_dtype = "float32"
+
+ dataset = CausalLMCustomDataset(df, cfg, mode="train")
+ model = CausalLMModel(cfg).to(device).eval()
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True)
+
+ batch = next(iter(dataloader))
+ batch = {k: v.to(device) for k, v in batch.items()}
+ batch_padding(
+ cfg,
+ batch,
+ mask_key="prompt_attention_mask",
+ pad_keys=[
+ "prompt_input_ids",
+ "prompt_attention_mask",
+ "prompt_special_tokens_mask",
+ ],
+ )
+ with torch.no_grad():
+ generated_text = dataset.tokenizer.decode(model.generate(batch, cfg)[0])
+
+ return generated_text
+
+
+def test_generation_is_the_same_as_for_causal_language_modeling(df):
+ """
+ DPO model should generate the same output text as causal language modeling
+ """
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ generated_text_causal_lm = generate_causal_lm_model_text(df)
+
+ cfg = ConfigProblemBase(
+ llm_backbone="h2oai/llama2-0b-unit-test",
+ dataset=ConfigDPODataset(
+ system_column="system",
+ prompt_column=("prompt",),
+ answer_column="answer_column",
+ rejected_answer_column="rejected_answer",
+ ),
+ tokenizer=ConfigNLPCausalLMTokenizer(max_length=512),
+ )
+ cfg.architecture.backbone_dtype = "float32"
+
+ dataset = CustomDataset(df, cfg, mode="train")
+ model = Model(cfg).eval().to(device)
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True)
+
+ batch = next(iter(dataloader))
+ batch = {k: v.to(device) for k, v in batch.items()}
+ batch_padding(
+ cfg,
+ batch,
+ mask_key="prompt_attention_mask",
+ pad_keys=[
+ "prompt_input_ids",
+ "prompt_attention_mask",
+ "prompt_special_tokens_mask",
+ ],
+ )
+ with torch.no_grad():
+ generated_text = dataset.tokenizer.decode(model.generate(batch, cfg)[0])
+
+ assert (
+ generated_text == generated_text_causal_lm
+ ), "Generated text is not the same as from causal LM model:" "{}\n{}".format(
+ generated_text, generated_text_causal_lm
+ )
+
+
+@pytest.fixture
+def df2():
+ # create a list of all lowercase letters
+ alphabet = [chr(i) for i in range(97, 123)]
+
+ # create random strings from the alphabet
+ prompts = ["".join(random.choice(alphabet) for _ in range(10)) for _ in range(10)]
+ systems = ["".join(random.choice(alphabet) for _ in range(10)) for _ in range(10)]
+ answers = ["".join(random.choice(alphabet) for _ in range(10)) for _ in range(10)]
+ rejected_answers = [
+ "".join(random.choice(alphabet) for _ in range(10)) for _ in range(10)
+ ]
+
+ return pd.DataFrame(
+ {
+ "prompt": prompts,
+ "system": systems,
+ "answer": answers,
+ "rejected_answer": rejected_answers,
+ }
+ )
+
+
+def test_dpo_perplexity_metric(tmp_path, df2):
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+ cfg = ConfigProblemBase(
+ output_directory=str(tmp_path),
+ llm_backbone="MaxJeblick/llama2-0b-unit-test",
+ dataset=ConfigDPODataset(
+ system_column="system",
+ prompt_column=("prompt",),
+ answer_column="answer_column",
+ rejected_answer_column="answer_column",
+ ),
+ tokenizer=ConfigNLPCausalLMTokenizer(max_length=512),
+ prediction=ConfigNLPCausalLMPrediction(metric="Perplexity"),
+ )
+ cfg.architecture.gradient_checkpointing = False
+ cfg.environment._device = device # type: ignore
+
+ # bfloat16 is not supported on older GPUs
+ cfg.environment.mixed_precision_dtype = "float16"
+
+ dataset = CustomDataset(df2, cfg, mode="train")
+ model = Model(cfg).eval().to(device)
+ vocab_size = model.backbone.config.vocab_size
+
+ class MockBackbone(nn.Module):
+ """
+ Chosen and rejected logits are the same
+ Chosen reference and rejected reference logits are the same,
+ but different from chosen and rejected logits.
+ As answer_column and rejected_answer_column are the same,
+
+ -> perplexity and rejection_perplexity should be the same
+ -> chosen_rewards and rejected_rewards should be the same
+ -> chosen_cross_entropy and rejected_cross_entropy should be the same
+ -> reward margin should be 0
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.seed = 0
+
+ def disable_adapter(self):
+ # mock lora adapter
+ @contextmanager
+ def flip_seed():
+ self.seed = 1
+ yield None
+ self.seed = 0
+
+ return flip_seed()
+
+ def forward(self, input_ids, attention_mask):
+ @dataclass
+ class Result:
+ bs, seq_len = input_ids.shape
+ torch.manual_seed(self.seed)
+ logits = torch.rand((bs, seq_len, vocab_size)).to(input_ids.device)
+
+ result = Result()
+ return result
+
+ class ListLogger:
+ def __init__(self):
+ self.logs = {}
+
+ def log(self, subset: str, name: str, value: str | float, step: float = None):
+ self.logs[name] = self.logs.get(name, []) + [value]
+
+ with patch.object(target=model, attribute="backbone", new_callable=MockBackbone):
+ dataloader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True)
+
+ # mock cfg.logging._logger.log
+ cfg.logging._logger = ListLogger()
+
+ run_eval(
+ cfg,
+ model=model,
+ val_dataloader=dataloader,
+ val_df=df2,
+ mode="validation",
+ )
+
+ log_dict = cfg.logging._logger.logs
+ assert log_dict["Perplexity"] == log_dict["rejected_perplexity"]
+ assert log_dict["chosen_rewards"] == log_dict["rejected_rewards"]
+ assert (
+ log_dict["chosen_cross_entropy_loss"] == log_dict["rejected_cross_entropy_loss"]
+ )
+ assert log_dict["reward_margin"] == [0] * len(log_dict["reward_margin"])
diff --git a/tests/src/models/test_text_causal_language_modeling_model.py b/tests/src/models/test_text_causal_language_modeling_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d0b9990657c55fbcd5cec56d2fb1ae3151b4c6d
--- /dev/null
+++ b/tests/src/models/test_text_causal_language_modeling_model.py
@@ -0,0 +1,83 @@
+import torch
+
+from llm_studio.python_configs.text_causal_language_modeling_config import (
+ ConfigProblemBase,
+)
+from llm_studio.src.models.text_causal_language_modeling_model import Model
+from llm_studio.src.utils.modeling_utils import TokenStoppingCriteria, activate_neftune
+
+
+def test_token_stopping_criteria():
+ token_stopping_criteria = TokenStoppingCriteria(
+ stop_word_ids=torch.tensor([0, 1, 2, 8]), prompt_input_ids_len=4
+ )
+
+ input_ids = torch.tensor(
+ [
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
+ [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ [2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
+ [3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
+ [4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
+ [5, 6, 7, 8, 9, 10, 11, 12, 13, 14],
+ ]
+ ).long()
+
+ # prompt input len is 4, so generated ids of last sample of the batch are
+ # [9, 10, 11, 12, 13, 14], do not trigger stopping criteria
+ assert not token_stopping_criteria(input_ids=input_ids, scores=None)
+
+ token_stopping_criteria = TokenStoppingCriteria(
+ stop_word_ids=torch.tensor([6]), prompt_input_ids_len=0
+ )
+
+ # first item reads [ 0, 1, 2, 3, 4, 5], so do not trigger stopping criteria
+ assert not token_stopping_criteria(input_ids=input_ids[:, :6], scores=None)
+ assert token_stopping_criteria(input_ids=input_ids[:, :7], scores=None)
+
+ # Test stopping criteria with compound tokens
+ token_stopping_criteria = TokenStoppingCriteria(
+ stop_word_ids=torch.tensor([[6, 7]]), prompt_input_ids_len=0
+ )
+
+ assert not token_stopping_criteria(input_ids=input_ids[:, :6], scores=None)
+ assert not token_stopping_criteria(input_ids=input_ids[:, :7], scores=None)
+ assert token_stopping_criteria(input_ids=input_ids[:, :8], scores=None)
+
+ # Test stopping criteria with stop word ids being longer than generated text
+ token_stopping_criteria = TokenStoppingCriteria(
+ stop_word_ids=torch.tensor([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]]),
+ prompt_input_ids_len=0,
+ )
+
+ assert not token_stopping_criteria(input_ids=input_ids, scores=None)
+
+
+def test_neftune_is_disabled_in_inference():
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ cfg = ConfigProblemBase(llm_backbone="h2oai/llama2-0b-unit-test")
+ cfg.architecture.backbone_dtype = "float32"
+ model = Model(cfg).eval().to(device)
+
+ input_batch = {
+ "input_ids": torch.randint(
+ 0,
+ 1000,
+ (1, 10),
+ ).to(device),
+ "attention_mask": torch.ones(1, 10).to(device),
+ }
+
+ with torch.no_grad():
+ outputs = model.backbone(**input_batch)
+
+ activate_neftune(model, neftune_noise_alpha=10)
+ assert model.backbone.get_input_embeddings().neftune_noise_alpha == 10
+
+ with torch.no_grad():
+ outputs_after_neftune = model.backbone(**input_batch)
+
+ assert torch.allclose(outputs["logits"], outputs_after_neftune["logits"])
+
+ # state dict does not contain neftune noise
+ assert [key for key in model.state_dict() if "neftune" in key] == []
diff --git a/tests/src/test_data/cfg.yaml b/tests/src/test_data/cfg.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dbc34c023b95df56da3cb52126f9a6ca5869d15e
--- /dev/null
+++ b/tests/src/test_data/cfg.yaml
@@ -0,0 +1,74 @@
+architecture:
+ backbone_dtype: float16
+ gradient_checkpointing: false
+ intermediate_dropout: 0.0
+ config_item_that_is_not_used: 0
+augmentation:
+ token_mask_probability: 0.0
+dataset:
+ add_eos_token_to_answer: true
+ add_eos_token_to_prompt: true
+ answer_column: output
+ data_sample: 0.1
+ data_sample_choice:
+ - Train
+ - Validation
+ mask_prompt_labels: false
+ prompt_column:
+ - instruction
+ text_answer_separator: \n
+ text_prompt_start: ''
+ train_dataframe: data/user/train/train.csv
+ validation_dataframe: None
+ validation_size: 0.01
+ validation_strategy: automatic
+environment:
+ compile_model: false
+ find_unused_parameters: false
+ gpus:
+ - '0'
+ mixed_precision: true
+ number_of_workers: 8
+ seed: -1
+experiment_name: test
+llm_backbone: EleutherAI/pythia-12b-deduped
+logging:
+ logger: None
+ neptune_project: ''
+output_directory: output/user/test/
+prediction:
+ batch_size_inference: 0
+ do_sample: false
+ max_length_inference: 256
+ max_time: 0.0
+ metric: GPT3.5
+ min_length_inference: 2
+ num_beams: 2
+ repetition_penalty: 1.2
+ stop_tokens: ""
+ temperature: 0.0
+problem_type: text_causal_language_modeling
+tokenizer:
+ max_length: 144
+ padding_quantile: 1.0
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
+training:
+ batch_size: 3
+ epochs: 0
+ evaluate_before_training: true
+ evaluation_epochs: 1.0
+ grad_accumulation: 1
+ gradient_clip: 0.0
+ learning_rate: 0.0001
+ lora: true
+ use_dora: false
+ lora_alpha: 16
+ lora_dropout: 0.05
+ lora_r: 4
+ lora_target_modules: ''
+ optimizer: AdamW
+ save_checkpoint: "last"
+ schedule: Cosine
+ train_validation_data: false
+ warmup_epochs: 0.0
+ weight_decay: 0.0
diff --git a/tests/src/test_loggers.py b/tests/src/test_loggers.py
new file mode 100644
index 0000000000000000000000000000000000000000..447844ab08024ec664758a1e7b01aff3c7ce2c93
--- /dev/null
+++ b/tests/src/test_loggers.py
@@ -0,0 +1,32 @@
+import tempfile
+from unittest.mock import MagicMock
+
+import pytest
+
+from llm_studio.src.loggers import DummyLogger, MainLogger
+
+
+@pytest.fixture
+def temp_dir():
+ with tempfile.TemporaryDirectory() as temp_dir_path:
+ yield temp_dir_path
+
+
+@pytest.fixture
+def mock_cfg():
+ cfg = MagicMock()
+ cfg.logging.logger = "None"
+ cfg.logging._neptune_debug = False
+ return cfg
+
+
+def test_main_logger_initialization(mock_cfg, temp_dir):
+ mock_cfg.output_directory = temp_dir
+ logger = MainLogger(mock_cfg)
+
+ # Has external and local logger
+ assert "external" in logger.loggers.keys()
+ assert "local" in logger.loggers.keys()
+
+ # external logger is DummyLogger
+ assert isinstance(logger.loggers["external"], DummyLogger)
diff --git a/tests/src/test_nesting.py b/tests/src/test_nesting.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ef50a45d195a42a23131bb4abb8a2fd1fecbb65
--- /dev/null
+++ b/tests/src/test_nesting.py
@@ -0,0 +1,87 @@
+import unittest
+
+import pytest
+
+from llm_studio.src.nesting import Dependency, Nesting
+
+
+class TestDependency:
+ @pytest.mark.parametrize(
+ "key, value, is_set",
+ [
+ ("personalize", True, True),
+ ("validation_strategy", "automatic", True),
+ ("deepspeed_method", "ZeRO2", True),
+ ("lora", False, False),
+ ],
+ )
+ def test_dependency_init(self, key, value, is_set):
+ dep = Dependency(key=key, value=value, is_set=is_set)
+ assert dep.key == key
+ assert dep.value == value
+ assert dep.is_set == is_set
+
+ @pytest.mark.parametrize(
+ "dep, dependency_values, expected",
+ [
+ (Dependency("tkey", value=True, is_set=True), [True], True),
+ (Dependency("tkey", value=True, is_set=True), [False], False),
+ (Dependency("tkey", value=True, is_set=False), [True], False),
+ (Dependency("tkey", value=True, is_set=False), [False], True),
+ (Dependency("tkey", value=False, is_set=True), [False], True),
+ (Dependency("tkey", value=False, is_set=True), [True], False),
+ (Dependency("tkey", value=False, is_set=False), [False], False),
+ (Dependency("tkey", value=False, is_set=False), [True], True),
+ (Dependency("tkey", value="value", is_set=True), ["value"], True),
+ (Dependency("tkey", value="value", is_set=True), ["other_value"], False),
+ (Dependency("tkey", value="value", is_set=False), ["value"], False),
+ (Dependency("tkey", value="value", is_set=False), ["other_value"], True),
+ (Dependency("tkey", value=None, is_set=True), [], False),
+ (Dependency("tkey", value=None, is_set=True), ["value"], False),
+ (Dependency("tkey", value=None, is_set=False), [], False),
+ (Dependency("tkey", value=None, is_set=False), ["value"], True),
+ ],
+ )
+ def test_dependency_check(self, dep, dependency_values, expected):
+ assert dep.check(dependency_values) == expected
+
+
+class TestNesting(unittest.TestCase):
+ def setUp(self):
+ self.nesting = Nesting()
+
+ def test_nesting_init(self):
+ self.assertEqual(len(self.nesting.dependencies), 0)
+ self.assertEqual(len(self.nesting.triggers), 0)
+
+ def test_nesting_add(self):
+ keys = ["key1", "key2"]
+ dependencies = [
+ Dependency("dep1", value=True, is_set=True),
+ Dependency("dep2", value=True, is_set=True),
+ ]
+ self.nesting.add(keys, dependencies)
+
+ self.assertEqual(len(self.nesting.dependencies), 2)
+ self.assertEqual(len(self.nesting.triggers), 2)
+ self.assertIn("dep1", self.nesting.triggers)
+ self.assertIn("dep2", self.nesting.triggers)
+
+ def test_nesting_add_duplicate_keys(self):
+ keys = ["key1", "key1"]
+ dependencies = [Dependency("dep1", value=True, is_set=True)]
+
+ with self.assertRaises(ValueError):
+ self.nesting.add(keys, dependencies)
+
+ def test_nesting_multiple_adds(self):
+ self.nesting.add(["key1"], [Dependency("dep1", value=True, is_set=True)])
+ self.nesting.add(["key2"], [Dependency("dep2", value=True, is_set=True)])
+ self.nesting.add(
+ ["key1", "key2"], [Dependency("dep3", value=True, is_set=True)]
+ )
+
+ self.assertEqual(len(self.nesting.dependencies), 2)
+ self.assertEqual(len(self.nesting.triggers), 3)
+ self.assertEqual(len(self.nesting.dependencies["key1"]), 2)
+ self.assertEqual(len(self.nesting.dependencies["key2"]), 2)
diff --git a/tests/src/test_order.py b/tests/src/test_order.py
new file mode 100644
index 0000000000000000000000000000000000000000..f14cee179ebc3658be0791e9de41286e5d1bdf01
--- /dev/null
+++ b/tests/src/test_order.py
@@ -0,0 +1,124 @@
+import pytest
+
+from llm_studio.src.order import Order
+
+
+def test_order_initialization():
+ # Test empty initialization
+ order1 = Order()
+ assert len(order1) == 0
+
+ # Test initialization with keys
+ keys = ["a", "b", "c"]
+ order2 = Order(keys)
+ assert list(order2) == keys
+
+
+def test_append():
+ order = Order()
+ order.append("a")
+ assert list(order) == ["a"]
+
+ order.append("b")
+ assert list(order) == ["a", "b"]
+
+ with pytest.raises(ValueError):
+ order.append("a") # Attempting to add a duplicate key
+
+
+def test_extend():
+ order = Order(["a", "b"])
+ order.extend(["c", "d"])
+ assert list(order) == ["a", "b", "c", "d"]
+
+ with pytest.raises(ValueError):
+ order.extend(["e", "a"]) # Attempting to add a duplicate key
+
+
+def test_insert():
+ order = Order(["a", "b", "c"])
+
+ order.insert("x", before="b")
+ assert list(order) == ["a", "x", "b", "c"]
+
+ order.insert("y", after="c")
+ assert list(order) == ["a", "x", "b", "c", "y"]
+
+ order.insert("z", "w", before="a")
+ assert list(order) == ["z", "w", "a", "x", "b", "c", "y"]
+
+ with pytest.raises(ValueError):
+ order.insert("v", before="non_existent")
+
+ with pytest.raises(ValueError):
+ order.insert("v", after="non_existent")
+
+ with pytest.raises(ValueError):
+ # Attempting to specify both before and after
+ order.insert("v", before="a", after="b")
+
+ with pytest.raises(ValueError):
+ order.insert("a") # Attempting to add a duplicate key
+
+ with pytest.raises(ValueError):
+ order.insert("v") # Not specifying before or after
+
+
+def test_getitem():
+ order = Order(["a", "b", "c"])
+ assert order[0] == "a"
+ assert order[1] == "b"
+ assert order[2] == "c"
+
+ with pytest.raises(IndexError):
+ order[3]
+
+
+def test_len():
+ order = Order()
+ assert len(order) == 0
+
+ order.append("a")
+ assert len(order) == 1
+
+ order.extend(["b", "c"])
+ assert len(order) == 3
+
+
+def test_iter():
+ keys = ["a", "b", "c"]
+ order = Order(keys)
+ assert list(iter(order)) == keys
+
+
+def test_complex_scenario():
+ order = Order(["dataset", "training", "validation", "logging"])
+ order.insert("architecture", before="training")
+ order.insert("environment", after="validation")
+
+ assert list(order) == [
+ "dataset",
+ "architecture",
+ "training",
+ "validation",
+ "environment",
+ "logging",
+ ]
+
+ order.append("results")
+ order.extend(["analysis", "reporting"])
+
+ assert list(order) == [
+ "dataset",
+ "architecture",
+ "training",
+ "validation",
+ "environment",
+ "logging",
+ "results",
+ "analysis",
+ "reporting",
+ ]
+
+ with pytest.raises(ValueError):
+ order.insert("dataset", before="reporting") # Attempting to add a duplicate key
diff --git a/tests/src/test_possible_values.py b/tests/src/test_possible_values.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec1e8379bc74ca4c0f796f63cd4f5c46084857ef
--- /dev/null
+++ b/tests/src/test_possible_values.py
@@ -0,0 +1,274 @@
+import os
+
+import pandas as pd
+import pytest
+
+from llm_studio.src.possible_values import (
+ Columns,
+ DatasetValue,
+ Files,
+ Number,
+ String,
+ _scan_dirs,
+ _scan_files,
+ strip_common_prefix,
+)
+
+
+# Helper function to create a temporary directory structure
+@pytest.fixture
+def temp_dir_structure(tmp_path):
+ base_dir = tmp_path / "test_dir"
+ base_dir.mkdir()
+ (base_dir / "subdir1").mkdir()
+ (base_dir / "subdir2").mkdir()
+ (base_dir / "subdir1" / "subsubdir").mkdir()
+ (base_dir / "file1.csv").touch()
+ (base_dir / "file2.json").touch()
+ (base_dir / "__meta_info__train.json").touch()
+ (base_dir / "subdir1" / "file3.parquet").touch()
+ (base_dir / "subdir1" / "__meta_info__train.pq.csv").touch()
+ return base_dir
+
+
+def test_scan_dirs(temp_dir_structure):
+ dirs = _scan_dirs(str(temp_dir_structure))
+ expected = [
+ str(temp_dir_structure / "subdir1") + "/",
+ str(temp_dir_structure / "subdir2") + "/",
+ str(temp_dir_structure / "subdir1" / "subsubdir") + "/",
+ ]
+ assert set(dirs) == set(expected)
+
+
+def test_scan_files(temp_dir_structure):
+ files = _scan_files(str(temp_dir_structure))
+ expected = [
+ str(temp_dir_structure / "file1.csv"),
+ str(temp_dir_structure / "file2.json"),
+ str(temp_dir_structure / "subdir1" / "file3.parquet"),
+ ]
+ assert set(files) == set(expected)
+
+
+def test_strip_common_prefix():
+ paths = ["/a/b/c/file1.txt", "/a/b/c/file2.txt", "/a/b/d/file3.txt"]
+ stripped = strip_common_prefix(paths)
+ assert stripped == ("c/file1.txt", "c/file2.txt", "d/file3.txt")
+
+ # Test with ignore_set
+ paths_with_ignore = paths + ["ignore_this"]
+ stripped_with_ignore = strip_common_prefix(
+ paths_with_ignore, ignore_set={"ignore_this"}
+ )
+ assert stripped_with_ignore == (
+ "c/file1.txt",
+ "c/file2.txt",
+ "d/file3.txt",
+ "ignore_this",
+ )
+
+
+def test_strip_common_prefix_empty_paths():
+ paths = []
+ stripped = strip_common_prefix(paths)
+ assert stripped == tuple([])
+
+
+def test_number_slider():
+ num = Number(min=0, max=10, step=0.5)
+ assert num.min == 0
+ assert num.max == 10
+ assert num.step == 0.5
+
+
+def test_number_spinbox():
+ num = Number(min=0, step=0.5)
+ assert num.min == 0
+ assert num.max is None
+ assert num.step == 0.5
+
+
+def test_number_impossible_values():
+ with pytest.raises(ValueError):
+ Number(min=0, max=10, step="a")
+
+ with pytest.raises(ValueError):
+ Number(min=0, max="a", step=0.5)
+
+ with pytest.raises(ValueError):
+ Number(min="a", max=10, step=0.5)
+
+ with pytest.raises(ValueError):
+ Number(min=0, max=10)
+
+ with pytest.raises(ValueError):
+ Number(min=10, max=1, step=1)
+
+ with pytest.raises(ValueError):
+ Number(min=10, max=0, step=1)
+
+
+def test_string_tuple_of_strings():
+ s = String(
+ values=("a", "b", "c"), allow_custom=True, placeholder="Select an option"
+ )
+ assert s.values == ("a", "b", "c")
+ assert s.allow_custom is True
+ assert s.placeholder == "Select an option"
+
+
+def test_string_tuple_of_tuples():
+ s = String(
+ values=(("a", "hello there"), ("b", "hello there"), ("c", "hello there")),
+ allow_custom=True,
+ placeholder="Select an option",
+ )
+ assert s.values == (
+ ("a", "hello there"),
+ ("b", "hello there"),
+ ("c", "hello there"),
+ )
+ assert s.allow_custom is True
+ assert s.placeholder == "Select an option"
+
+
+def test_string_impossible_values():
+ with pytest.raises(ValueError):
+ String(values=("a", "b", "c"), allow_custom="a")
+
+ with pytest.raises(ValueError):
+ String(values=("a", "b", "c"), placeholder=True)
+
+
+class TestDatasetValue:
+
+ def test_get_value(self):
+ """
+ Test that NotImplementedError is raised when get_value is called directly
+
+ This is a base class and should not be used directly.
+ get_value is an abstract method.
+ """
+ dataset_value = DatasetValue()
+ with pytest.raises(NotImplementedError):
+ dataset_value.get_value(None, None, None)
+
+ @pytest.mark.parametrize(
+ "current_values, possible_values, prefer_with, expected",
+ [
+ (["a", "b"], ["a", "b", "c"], None, ["a", "b"]),
+ (["a", "d"], ["a", "b", "c"], None, ["a"]),
+ ([], ["a", "b", "c"], None, ["a"]),
+ (["d", "e"], ["a", "b", "c"], None, ["a"]),
+ ([], [], None, [""]),
+ (["a", "b"], [], None, [""]),
+ ],
+ )
+ def test_compute_current_values_basic(
+ self, current_values, possible_values, prefer_with, expected
+ ):
+ result = DatasetValue._compute_current_values(
+ current_values, possible_values, prefer_with
+ )
+ assert result == expected
+
+ def test_compute_current_values_with_prefer_function(self):
+ current_values = []
+ possible_values = ["a", "b", "c", "d"]
+
+ def prefer_with(x):
+ return x in ["b", "c"]
+
+ result = DatasetValue._compute_current_values(
+ current_values, possible_values, prefer_with
+ )
+ assert result == ["b", "c"]
+
+ def test_compute_current_values_with_prefer_function_single_match(self):
+ current_values = []
+ possible_values = ["a", "b", "d"]
+
+ def prefer_with(x):
+ return x in ["b", "c"]
+
+ result = DatasetValue._compute_current_values(
+ current_values, possible_values, prefer_with
+ )
+ assert result == ["b"]
+
+ def test_compute_current_values_prefer_function_no_match(self):
+ current_values = []
+ possible_values = ["a", "b", "c"]
+
+ def prefer_with(x):
+ return x == "d"
+
+ result = DatasetValue._compute_current_values(
+ current_values, possible_values, prefer_with
+ )
+ assert result == [
+ "a"
+ ] # Should return first possible value when no preference matches
+
+ def test_compute_current_values_all_filtered_out(self):
+ current_values = ["d", "e"]
+ possible_values = ["a", "b", "c"]
+
+ result = DatasetValue._compute_current_values(current_values, possible_values)
+ assert result == [
+ "a"
+ ] # Should return first possible value when all current values are filtered out
+
+ @pytest.mark.parametrize(
+ "current_values, possible_values",
+ [
+ (["a", "a", "b"], ["a", "b", "c"]),
+ (["a", "b", "a"], ["a", "b", "c"]),
+ (["a", "b", "a"], ["a", "b", "c", "a"]),
+ (["a", "b"], ["a", "a", "b"]),
+ ],
+ )
+ def test_compute_current_values_duplicates(self, current_values, possible_values):
+ with pytest.raises(ValueError):
+ DatasetValue._compute_current_values(current_values, possible_values)
+
+ def test_compute_current_values_type_check(self):
+ current_values = ["a", "b"]
+ possible_values = ["a", "b", "c"]
+ result = DatasetValue._compute_current_values(current_values, possible_values)
+ assert isinstance(result, list)
+ assert all(isinstance(item, str) for item in result)
+
+
+# Mock dataset for testing
+@pytest.fixture
+def mock_dataset(temp_dir_structure):
+ return {
+ "path": str(temp_dir_structure),
+ "dataframe": pd.DataFrame(
+ {"col1": [1, 2, 3], "col2": ["a", "b", "c"], "col3": [True, False, True]}
+ ),
+ }
+
+
+@pytest.mark.parametrize(
+ "filename", ["file1.csv", "file2.json", "subdir1/file3.parquet", "non-existant"]
+)
+def test_files(mock_dataset, filename):
+ files = Files()
+ result, value = files.get_value(mock_dataset, filename, str)
+ assert isinstance(result, String)
+ assert value == os.path.join(mock_dataset["path"], "file1.csv")
+
+
+@pytest.mark.parametrize("col", ["col1", "col2", "col3", "non-existant"])
+def test_columns(mock_dataset, col):
+ cols = Columns()
+ result, value = cols.get_value(mock_dataset, col, str)
+ assert isinstance(result, String)
+ assert set(result.values) == {"col1", "col2", "col3"}
+ if col == "non-existant":
+ assert value == "col1"
+ else:
+ assert value == col
diff --git a/tests/src/test_tooltips.py b/tests/src/test_tooltips.py
new file mode 100644
index 0000000000000000000000000000000000000000..3981795cf7d99437105b0eb0364c57177b531ded
--- /dev/null
+++ b/tests/src/test_tooltips.py
@@ -0,0 +1,142 @@
+import unittest
+from unittest.mock import mock_open, patch
+
+from llm_studio.src.tooltips import (
+ Tooltip,
+ Tooltips,
+ clean_docusaurus_tags,
+ clean_md_links,
+ cleanhtml,
+ read_tooltip_file,
+)
+
+
+class TestTooltipFunctions(unittest.TestCase):
+
+ def test_read_tooltip_file_mock_file(self):
+ mock_content = "This is a test file content\n\nwith multiple lines."
+ with patch("builtins.open", mock_open(read_data=mock_content)):
+ result = read_tooltip_file("dummy/path.mdx")
+ self.assertEqual(result, mock_content)
+
+ def test_read_tooltip_file_real_file(self):
+ expected_content = "Adds EOS token at end of answer."
+ result = read_tooltip_file(
+ "documentation/docs/tooltips/experiments/_add-eos-token-to-answer.mdx"
+ )
+ self.assertEqual(result, expected_content)
+
+ def test_read_tooltip_file_nonexistent_file(self):
+ with self.assertRaises(FileNotFoundError):
+ read_tooltip_file("nonexistent/path.mdx")
+
+ def test_cleanhtml(self):
+ html = "This is bold text
"
+ expected = "This is bold textScript"
+ self.assertEqual(cleanhtml(html), expected)
+
+ def test_clean_docusaurus_tags_note(self):
+ text = ":::info note Some note :::"
+ expected = "Some note"
+ self.assertEqual(clean_docusaurus_tags(text), expected)
+ text = ":::info Note Some note :::"
+ expected = "Some note"
+ self.assertEqual(clean_docusaurus_tags(text), expected)
+
+ def test_clean_docusaurus_tags_tip(self):
+ text = ":::tip tip Some tip :::"
+ expected = "Some tip"
+ self.assertEqual(clean_docusaurus_tags(text), expected)
+
+ def test_clean_md_links(self):
+ md_text = "This is a [link](https://example.com) in text"
+ expected = "This is a link in text"
+ self.assertEqual(clean_md_links(md_text), expected)
+
+
+class TestTooltip(unittest.TestCase):
+
+ def test_tooltip_creation(self):
+ tooltip = Tooltip("test", "This is a test tooltip")
+ self.assertEqual(tooltip.name, "test")
+ self.assertEqual(tooltip.text, "This is a test tooltip")
+
+ def test_tooltip_repr(self):
+ tooltip = Tooltip("test", "This is a test tooltip")
+ self.assertEqual(repr(tooltip), "test: This is a test tooltip")
+
+
+class TestTooltips(unittest.TestCase):
+
+ @patch("llm_studio.src.tooltips.read_tooltip_file")
+ def setUp(self, mock_read):
+ mock_files = [
+ "documentation/docs/tooltips/section1/_file1.mdx",
+ "documentation/docs/tooltips/section1/_file2.mdx",
+ "documentation/docs/tooltips/section2/_file1.mdx",
+ "documentation/docs/tooltips/section2/_file2.mdx",
+ ]
+ mock_read.side_effect = ["Content 1", "Content 2", "Content 3", "Content 4"]
+ self.tooltips = Tooltips(tooltip_files=mock_files)
+
+ @patch("llm_studio.src.tooltips.read_tooltip_file")
+ def test_tooltips_no_underscore(self, mock_read):
+ mock_files = [
+ "documentation/docs/tooltips/section1/_file1.mdx",
+ "documentation/docs/tooltips/section2/file2.mdx",
+ ]
+ mock_read.side_effect = ["Content 1", "Content 2"]
+ with self.assertRaises(ValueError):
+ Tooltips(tooltip_files=mock_files)
+
+ def test_tooltips_initialization(self):
+ self.assertEqual(len(self.tooltips), 4)
+ self.assertIn("section1_file1", self.tooltips.tooltips)
+ self.assertIn("section1_file2", self.tooltips.tooltips)
+ self.assertIn("section2_file1", self.tooltips.tooltips)
+ self.assertIn("section2_file2", self.tooltips.tooltips)
+
+ def test_add_tooltip(self):
+ length_before = len(self.tooltips)
+ new_tooltip = Tooltip("new", "New tooltip")
+ self.tooltips.add_tooltip(new_tooltip)
+ self.assertEqual(len(self.tooltips), length_before + 1)
+ self.assertEqual(self.tooltips["new"], "New tooltip")
+
+ def test_getitem(self):
+ self.assertEqual(self.tooltips["section1_file1"], "Content 1")
+ self.assertEqual(self.tooltips["section1_file2"], "Content 2")
+ self.assertEqual(self.tooltips["section2_file1"], "Content 3")
+ self.assertEqual(self.tooltips["section2_file2"], "Content 4")
+
+ self.assertIsNone(self.tooltips["nonexistent"])
+
+ def test_len(self):
+ self.assertEqual(len(self.tooltips), 4)
+
+ def test_repr(self):
+ repr_string = repr(self.tooltips)
+ self.assertIn("section1_file1", repr_string)
+ self.assertIn("section1_file2", repr_string)
+ self.assertIn("section2_file1", repr_string)
+ self.assertIn("section2_file2", repr_string)
+
+ def test_get(self):
+ self.assertEqual(self.tooltips.get("section1_file1"), "Content 1")
+ self.assertEqual(self.tooltips.get("section1_file1", "default"), "Content 1")
+ self.assertEqual(self.tooltips.get("nonexistent", "default"), "default")
+
+ @patch("llm_studio.src.tooltips.read_tooltip_file")
+ def test_duplicate_tooltip_name(self, mock_read):
+ with self.assertRaises(ValueError):
+ mock_files = [
+ "documentation/docs/tooltips/section1/_file1.mdx",
+ "documentation/docs/tooltips/section1/_file2.mdx",
+ "documentation/docs/tooltips/section1/_file1.mdx",
+ ]
+ mock_read.side_effect = ["Content 1", "Content 2", "Content 3"]
+ Tooltips(tooltip_files=mock_files)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/tests/src/utils/test_chat_template.py b/tests/src/utils/test_chat_template.py
new file mode 100644
index 0000000000000000000000000000000000000000..27db1126a120af505369c3930edb596bc889f474
--- /dev/null
+++ b/tests/src/utils/test_chat_template.py
@@ -0,0 +1,235 @@
+import os
+
+import pytest
+from jinja2.exceptions import TemplateError
+
+from llm_studio.app_utils.hugging_face_utils import get_chat_template
+from llm_studio.src.datasets.text_utils import get_tokenizer
+from llm_studio.src.utils.config_utils import load_config_yaml
+
+
+def test_chat_template_no_system_prompt():
+
+ test_directory = os.path.abspath(os.path.dirname(__file__))
+ cfg_path = os.path.join(test_directory, "../test_data/cfg.yaml")
+ cfg = load_config_yaml(cfg_path)
+ cfg.dataset.system_column = "None"
+ cfg.dataset.text_prompt_start = "<|prompt|>"
+ cfg.dataset.text_answer_separator = "<|answer|>"
+ cfg.dataset.add_eos_token_to_prompt = True
+ cfg.dataset.add_eos_token_to_answer = True
+
+ tokenizer = get_tokenizer(cfg)
+ tokenizer.eos_token = ""
+ tokenizer.chat_template = get_chat_template(cfg)
+
+ chat = [
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ {"role": "user", "content": "[user prompt2]"},
+ ]
+
+ input = tokenizer.apply_chat_template(
+ chat,
+ tokenize=False,
+ add_generation_prompt=True,
+ )
+ expected = "<|prompt|>[user prompt]<|answer|>[assistant response]<|prompt|>[user prompt2]<|answer|>" # noqa
+ assert input == expected
+
+ # raise error test
+ for chat in [
+ [
+ {"role": "system", "content": "[system prompt]"},
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ {"role": "user", "content": "[user prompt2]"},
+ ],
+ [
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ {"role": "user", "content": "[user prompt2]"},
+ {"role": "system", "content": "[system prompt]"},
+ ],
+ ]:
+ with pytest.raises(TemplateError) as e:
+ input = tokenizer.apply_chat_template(
+ chat,
+ tokenize=False,
+ add_generation_prompt=True,
+ )
+ expected = "System role not supported"
+ assert expected in str(e.value)
+
+ # raise error test
+ for chat in [
+ [
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "user", "content": "[user prompt2]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ ],
+ [
+ {"role": "assistant", "content": "[assistant response]"},
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ ],
+ [
+ {"role": "assistant", "content": "[assistant response]"},
+ {"role": "assistant", "content": "[user prompt]"},
+ {"role": "user", "content": "[assistant response]"},
+ ],
+ ]:
+ with pytest.raises(TemplateError) as e:
+ input = tokenizer.apply_chat_template(
+ chat,
+ tokenize=False,
+ add_generation_prompt=True,
+ )
+ expected = "Conversation roles must alternate user/assistant/user/assistant/..."
+ assert expected in str(e.value)
+
+
+def test_chat_template_with_system_prompt():
+
+ test_directory = os.path.abspath(os.path.dirname(__file__))
+ cfg_path = os.path.join(test_directory, "../test_data/cfg.yaml")
+ cfg = load_config_yaml(cfg_path)
+ cfg.dataset.text_prompt_start = "<|prompt|>"
+ cfg.dataset.text_answer_separator = "<|answer|>"
+ cfg.dataset.text_system_start = "<|system|>"
+ cfg.dataset.add_eos_token_to_prompt = True
+ cfg.dataset.add_eos_token_to_answer = True
+ cfg.dataset.add_eos_token_to_system = True
+ cfg.dataset.system_column = "system"
+
+ tokenizer = get_tokenizer(cfg)
+ tokenizer.eos_token = ""
+ tokenizer.chat_template = get_chat_template(cfg)
+
+ chat = [
+ {"role": "system", "content": "[system prompt]"},
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ {"role": "user", "content": "[user prompt2]"},
+ ]
+
+ input = tokenizer.apply_chat_template(
+ chat,
+ tokenize=False,
+ add_generation_prompt=True,
+ )
+ expected = "<|system|>[system prompt]<|prompt|>[user prompt]<|answer|>[assistant response]<|prompt|>[user prompt2]<|answer|>" # noqa
+ assert input == expected
+
+ # works w/o system prompt as well
+ chat = [
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ {"role": "user", "content": "[user prompt2]"},
+ ]
+
+ input = tokenizer.apply_chat_template(
+ chat,
+ tokenize=False,
+ add_generation_prompt=True,
+ )
+ expected = "<|prompt|>[user prompt]<|answer|>[assistant response]<|prompt|>[user prompt2]<|answer|>" # noqa
+ assert input == expected
+
+ # raise error test
+ for chat in [
+ [
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "system", "content": "[system prompt]"},
+ {"role": "user", "content": "[user prompt2]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ ],
+ [
+ {"role": "system", "content": "[system prompt]"},
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "user", "content": "[user prompt2]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ ],
+ [
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "user", "content": "[user prompt2]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ ],
+ [
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ {"role": "assistant", "content": "[assistant response2]"},
+ ],
+ ]:
+ with pytest.raises(TemplateError) as e:
+ input = tokenizer.apply_chat_template(
+ chat,
+ tokenize=False,
+ add_generation_prompt=True,
+ )
+ expected = "Conversation roles must alternate system(optional)/user/assistant/user/assistant/..." # noqa
+ assert expected in str(e.value)
+
+
+def test_chat_template_no_eos_token():
+
+ test_directory = os.path.abspath(os.path.dirname(__file__))
+ cfg_path = os.path.join(test_directory, "../test_data/cfg.yaml")
+ cfg = load_config_yaml(cfg_path)
+ cfg.dataset.text_prompt_start = "<|prompt|>"
+ cfg.dataset.text_answer_separator = "<|answer|>"
+ cfg.dataset.text_system_start = "<|system|>"
+ cfg.dataset.add_eos_token_to_system = False
+ cfg.dataset.add_eos_token_to_prompt = False
+ cfg.dataset.add_eos_token_to_answer = False
+ cfg.dataset.system_column = "system"
+
+ tokenizer = get_tokenizer(cfg)
+ tokenizer.chat_template = get_chat_template(cfg)
+
+ chat = [
+ {"role": "system", "content": "[system prompt]"},
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ {"role": "user", "content": "[user prompt2]"},
+ ]
+
+ input = tokenizer.apply_chat_template(
+ chat,
+ tokenize=False,
+ add_generation_prompt=True,
+ )
+ expected = "<|system|>[system prompt]<|prompt|>[user prompt]<|answer|>[assistant response]<|prompt|>[user prompt2]<|answer|>" # noqa
+ assert input == expected
+
+
+def test_chat_template_no_special_token():
+
+ test_directory = os.path.abspath(os.path.dirname(__file__))
+ cfg_path = os.path.join(test_directory, "../test_data/cfg.yaml")
+ cfg = load_config_yaml(cfg_path)
+ cfg.dataset.system_column = "system"
+ cfg.dataset.text_system_start = ""
+ cfg.dataset.text_prompt_start = ""
+ cfg.dataset.text_answer_separator = ""
+ cfg.dataset.add_eos_token_to_system = False
+ cfg.dataset.add_eos_token_to_prompt = False
+ cfg.dataset.add_eos_token_to_answer = False
+
+ tokenizer = get_tokenizer(cfg)
+ tokenizer.chat_template = get_chat_template(cfg)
+
+ chat = [
+ {"role": "system", "content": "[system prompt]"},
+ {"role": "user", "content": "[user prompt]"},
+ {"role": "assistant", "content": "[assistant response]"},
+ {"role": "user", "content": "[user prompt2]"},
+ ]
+
+ input = tokenizer.apply_chat_template(
+ chat,
+ tokenize=False,
+ add_generation_prompt=True,
+ )
+ expected = "[system prompt][user prompt][assistant response][user prompt2]"
+ assert input == expected
diff --git a/tests/src/utils/test_data_utils.py b/tests/src/utils/test_data_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..735a5c5071445298abd533f7b4034cc35df390be
--- /dev/null
+++ b/tests/src/utils/test_data_utils.py
@@ -0,0 +1,118 @@
+import os
+import pathlib
+import random
+import unittest
+from unittest.mock import MagicMock
+
+import pandas as pd
+import pytest
+
+from llm_studio.app_utils.default_datasets import (
+ prepare_default_dataset_causal_language_modeling,
+)
+from llm_studio.src.datasets.conversation_chain_handler import ConversationChainHandler
+from llm_studio.src.utils.data_utils import load_train_valid_data
+
+
+@pytest.fixture
+def cfg_mock():
+ cfg = MagicMock()
+ cfg.dataset.train_dataframe = "/path/to/train/data"
+ cfg.dataset.validation_dataframe = "/path/to/validation/data"
+
+ cfg.dataset.system_column = "None"
+ cfg.dataset.prompt_column = "prompt"
+ cfg.dataset.answer_column = "answer"
+
+ cfg.dataset.validation_size = 0.2
+ return cfg
+
+
+@pytest.fixture
+def read_dataframe_drop_missing_labels_mock(monkeypatch):
+ data = {
+ "prompt": [f"Prompt{i}" for i in range(100)],
+ "answer": [f"Answer{i}" for i in range(100)],
+ "id": list(range(100)),
+ }
+ df = pd.DataFrame(data)
+ mock = MagicMock(return_value=df)
+ monkeypatch.setattr(
+ "llm_studio.src.utils.data_utils.read_dataframe_drop_missing_labels", mock
+ )
+ return mock
+
+
+numbers = list(range(100))
+random.shuffle(
+ numbers,
+)
+groups = [numbers[n::13] for n in range(13)]
+
+
+@pytest.fixture
+def conversation_chain_ids_mock(monkeypatch):
+ def mocked_init(self, *args, **kwargs):
+ self.conversation_chain_ids = groups
+
+ with unittest.mock.patch.object(
+ ConversationChainHandler, "__init__", new=mocked_init
+ ):
+ yield
+
+
+def test_get_data_custom_validation_strategy(
+ cfg_mock, read_dataframe_drop_missing_labels_mock
+):
+ cfg_mock.dataset.validation_strategy = "custom"
+ train_df, val_df = load_train_valid_data(cfg_mock)
+ assert len(train_df), len(val_df) == 100
+
+
+def test_get_data_automatic_split(
+ cfg_mock, read_dataframe_drop_missing_labels_mock, conversation_chain_ids_mock
+):
+ cfg_mock.dataset.validation_strategy = "automatic"
+ train_df, val_df = load_train_valid_data(cfg_mock)
+ train_ids = set(train_df["id"].tolist())
+ val_ids = set(val_df["id"].tolist())
+
+ assert len(train_ids.intersection(val_ids)) == 0
+ assert len(train_ids) + len(val_ids) == 100
+
+ shared_groups = [
+ i for i in groups if not train_ids.isdisjoint(i) and not val_ids.isdisjoint(i)
+ ]
+ assert len(shared_groups) == 0
+
+
+def test_oasst_data_automatic_split(tmp_path: pathlib.Path):
+ prepare_default_dataset_causal_language_modeling(tmp_path)
+ assert len(os.listdir(tmp_path)) > 0, tmp_path
+ cfg_mock = MagicMock()
+ for file in os.listdir(tmp_path):
+ if file.endswith(".pq"):
+ cfg_mock.dataset.train_dataframe = os.path.join(tmp_path, file)
+
+ cfg_mock.dataset.system_column = "None"
+ cfg_mock.dataset.prompt_column = ("instruction",)
+ cfg_mock.dataset.answer_column = "output"
+ cfg_mock.dataset.parent_id_column = "parent_id"
+ cfg_mock.dataset.id_column = "id"
+ cfg_mock.dataset.prompt_column_separator = "\n\n"
+
+ cfg_mock.dataset.validation_strategy = "automatic"
+
+ for validation_size in [0.05, 0.1, 0.2, 0.3, 0.4, 0.5]:
+ cfg_mock.dataset.validation_size = validation_size
+
+ train_df, val_df = load_train_valid_data(cfg_mock)
+ assert set(train_df["parent_id"].dropna().values).isdisjoint(
+ set(val_df["id"].dropna().values)
+ )
+ assert set(val_df["parent_id"].dropna().values).isdisjoint(
+ set(train_df["id"].dropna().values)
+ )
+ assert (len(val_df) / (len(train_df) + len(val_df))) == pytest.approx(
+ validation_size, 0.05
+ )
diff --git a/tests/src/utils/test_export_utils.py b/tests/src/utils/test_export_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..91d993e1e22fe8654098b77b3f6d6f9178e01ee5
--- /dev/null
+++ b/tests/src/utils/test_export_utils.py
@@ -0,0 +1,19 @@
+from llm_studio.src.utils.export_utils import get_size_str
+
+
+def test_get_size_atomic_units():
+ assert get_size_str(1, input_unit="B") == "1 B"
+ assert get_size_str(1024, input_unit="B", output_unit="KB") == "1.0 KB"
+ assert get_size_str(1048576, input_unit="B", output_unit="MB") == "1.0 MB"
+ assert get_size_str(1073741824, input_unit="B", output_unit="GB") == "1.0 GB"
+ assert get_size_str(1099511627776, input_unit="B", output_unit="TB") == "1.0 TB"
+
+ assert get_size_str(1024**5) == "1024.0 TB"
+
+
+def test_get_size_str_dynamic():
+ assert get_size_str(1500, input_unit="B", output_unit="dynamic") == "1.46 KB"
+ assert (
+ get_size_str(1500, sig_figs=3, input_unit="B", output_unit="dynamic")
+ == "1.465 KB"
+ )
diff --git a/tests/src/utils/test_load_yaml_file.py b/tests/src/utils/test_load_yaml_file.py
new file mode 100644
index 0000000000000000000000000000000000000000..785d763725f84bb0363b2053597100dbbd3d5b37
--- /dev/null
+++ b/tests/src/utils/test_load_yaml_file.py
@@ -0,0 +1,83 @@
+import os
+import pathlib
+
+from llm_studio.src.utils.config_utils import load_config_yaml, save_config_yaml
+
+
+def test_load_config_yaml():
+ test_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
+ cfg_path = os.path.join(test_directory, "test_data/cfg.yaml")
+ cfg = load_config_yaml(cfg_path)
+
+ assert cfg.experiment_name == "test"
+ assert cfg.llm_backbone == "EleutherAI/pythia-12b-deduped"
+ assert cfg.output_directory == "output/user/test/"
+
+ assert cfg.architecture.backbone_dtype == "float16"
+ assert cfg.architecture.gradient_checkpointing is False
+ assert cfg.architecture.intermediate_dropout == 0.0
+
+ assert cfg.augmentation.token_mask_probability == 0.0
+
+ assert cfg.dataset.add_eos_token_to_answer is True
+ assert cfg.dataset.add_eos_token_to_prompt is True
+ assert cfg.dataset.answer_column == "output"
+ assert cfg.dataset.data_sample == 0.1
+ assert cfg.dataset.data_sample_choice == ["Train", "Validation"]
+ assert cfg.dataset.mask_prompt_labels is False
+ assert cfg.dataset.prompt_column == ("instruction",)
+ assert cfg.dataset.text_answer_separator == "\\n"
+ assert cfg.dataset.text_prompt_start == ""
+ assert cfg.dataset.train_dataframe == "data/user/train/train.csv"
+ assert cfg.dataset.validation_dataframe == "None"
+ assert cfg.dataset.validation_size == 0.01
+ assert cfg.dataset.validation_strategy == "automatic"
+
+ assert cfg.environment.compile_model is False
+ assert cfg.environment.find_unused_parameters is False
+ assert cfg.environment.gpus == ["0"]
+ assert cfg.environment.mixed_precision is True
+ assert cfg.environment.number_of_workers == 8
+ assert cfg.environment.seed == -1
+
+ assert cfg.logging.logger == "None"
+ assert cfg.logging.neptune_project == ""
+
+ assert cfg.prediction.batch_size_inference == 0
+ assert cfg.prediction.do_sample is False
+ assert cfg.prediction.max_length_inference == 256
+ assert cfg.prediction.min_length_inference == 2
+ assert cfg.prediction.num_beams == 2
+ assert cfg.prediction.repetition_penalty == 1.2
+ assert cfg.prediction.stop_tokens == ""
+ assert cfg.prediction.temperature == 0.0
+
+ assert cfg.tokenizer.max_length == 144
+ assert cfg.tokenizer.padding_quantile == 1.0
+
+ assert cfg.training.batch_size == 3
+ assert cfg.training.epochs == 0
+ assert cfg.training.evaluate_before_training is True
+ assert cfg.training.evaluation_epochs == 1.0
+ assert cfg.training.grad_accumulation == 1
+ assert cfg.training.gradient_clip == 0.0
+ assert cfg.training.learning_rate == 0.0001
+ assert cfg.training.lora is True
+ assert cfg.training.lora_alpha == 16
+ assert cfg.training.lora_dropout == 0.05
+ assert cfg.training.lora_r == 4
+ assert cfg.training.lora_target_modules == ""
+ assert cfg.training.optimizer == "AdamW"
+ assert cfg.training.save_checkpoint == "last"
+ assert cfg.training.schedule == "Cosine"
+ assert cfg.training.train_validation_data is False
+ assert cfg.training.warmup_epochs == 0.0
+ assert cfg.training.weight_decay == 0.0
+
+
+def test_save_config_yaml(tmp_path: pathlib.Path):
+ test_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), "../"))
+ cfg_path = os.path.join(test_directory, "test_data/cfg.yaml")
+ cfg = load_config_yaml(cfg_path)
+
+ save_config_yaml(os.path.join(tmp_path, "cfg.yaml"), cfg)
diff --git a/tests/src/utils/test_modeling_utils.py b/tests/src/utils/test_modeling_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..25d6395bc6d478a22c36f16ca18c0a82460e2d79
--- /dev/null
+++ b/tests/src/utils/test_modeling_utils.py
@@ -0,0 +1,113 @@
+import os
+import tempfile
+from unittest.mock import MagicMock, patch
+
+import pytest
+import torch
+
+from llm_studio.src.utils.modeling_utils import (
+ check_disk_space,
+ load_checkpoint,
+ save_checkpoint,
+ unwrap_model,
+)
+
+
+def test_unwrap_model():
+ # Create a dummy model
+ model = torch.nn.Linear(10, 10)
+
+ # Wrap it in DataParallel
+ wrapped_model = torch.nn.DataParallel(model)
+ assert wrapped_model != model
+ assert isinstance(wrapped_model, torch.nn.DataParallel)
+
+ # Test unwrapping
+ unwrapped = unwrap_model(wrapped_model)
+ assert unwrapped == model
+ assert not isinstance(unwrapped, torch.nn.DataParallel)
+
+
+@pytest.mark.parametrize(
+ "dtype",
+ [
+ (torch.float32),
+ (torch.float16),
+ (torch.bfloat16),
+ (torch.int8),
+ (torch.uint8),
+ (torch.int16),
+ ],
+)
+@pytest.mark.parametrize(
+ "free_space,should_raise",
+ [
+ (1e12, False), # Plenty of space
+ (1, True), # Not enough space
+ ],
+)
+def test_check_disk_space(free_space, dtype, should_raise):
+ # Mock model and shutil
+ model = MagicMock()
+ model.parameters.return_value = [torch.ones(1000, 1000, dtype=dtype)]
+
+ with patch("shutil.disk_usage", return_value=(0, 0, free_space)):
+ if should_raise:
+ with pytest.raises(ValueError):
+ check_disk_space(model, "/dummy/path")
+ else:
+ check_disk_space(model, "/dummy/path") # Should not raise
+
+
+class DummyModel(torch.nn.Module):
+ def __init__(self, use_classification_head=True):
+ super(DummyModel, self).__init__()
+ self.backbone = torch.nn.Linear(10, 10)
+ if use_classification_head:
+ self.classification_head = torch.nn.Linear(10, 10)
+
+
+@pytest.mark.parametrize("use_classification_head", [True, False])
+def test_save_checkpoint(use_classification_head):
+ model = DummyModel(use_classification_head)
+ cfg = MagicMock()
+ cfg.environment.use_deepspeed = False
+ cfg.environment._local_rank = 0
+ cfg.training.lora = False
+
+ with tempfile.TemporaryDirectory() as tmpdir:
+ save_checkpoint(model, tmpdir, cfg)
+ assert os.path.exists(os.path.join(tmpdir, "checkpoint.pth"))
+ if use_classification_head:
+ assert os.path.exists(os.path.join(tmpdir, "classification_head.pth"))
+ else:
+ assert not os.path.exists(os.path.join(tmpdir, "classification_head.pth"))
+
+
+def test_load_checkpoint():
+ model = DummyModel()
+ cfg = MagicMock()
+ cfg.architecture.pretrained_weights = "dummy_weights.pth"
+ cfg.environment.use_deepspeed = False
+ cfg.environment._local_rank = 0
+
+ # Mock torch.load
+ dummy_state_dict = {"model": model.state_dict()}
+ with patch("torch.load", return_value=dummy_state_dict):
+ load_checkpoint(cfg, model, strict=True)
+
+
+def test_load_checkpoint_mismatch():
+ model = DummyModel(use_classification_head=True)
+ model_no_classification_head = DummyModel(use_classification_head=False)
+ cfg = MagicMock()
+ cfg.architecture.pretrained_weights = "dummy_weights.pth"
+ cfg.environment.use_deepspeed = False
+ cfg.environment._local_rank = 0
+
+ # Mock torch.load
+ dummy_state_dict = {"model": model_no_classification_head.state_dict()}
+ with patch("torch.load", return_value=dummy_state_dict):
+ with pytest.raises(RuntimeError):
+ load_checkpoint(cfg, model, strict=True)
+ load_checkpoint(cfg, model, strict=False)
diff --git a/tests/src/utils/test_utils.py b/tests/src/utils/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c43dcd08f3aada05dc3adad2d21d1b1af1178e4
--- /dev/null
+++ b/tests/src/utils/test_utils.py
@@ -0,0 +1,104 @@
+import os
+import tempfile
+import zipfile
+from unittest.mock import MagicMock, patch
+
+import pytest
+
+from llm_studio.python_configs.text_dpo_modeling_config import (
+ ConfigDPODataset,
+ ConfigProblemBase,
+)
+from llm_studio.src.utils.utils import (
+ PatchedAttribute,
+ add_file_to_zip,
+ check_metric,
+ create_symlinks_in_parent_folder,
+ kill_child_processes,
+)
+
+
+def test_check_metric_key_not_set():
+ cfg = MagicMock()
+ cfg.prediction.metric = "GPT"
+
+ with patch.dict(os.environ, {"OPENAI_API_KEY": ""}):
+ cfg = check_metric(cfg)
+ assert cfg.prediction.metric == "BLEU"
+
+
+def test_check_metric_key_set():
+ cfg = MagicMock()
+ cfg.prediction.metric = "GPT"
+
+ with patch.dict(os.environ, {"OPENAI_API_KEY": "test_key"}):
+ cfg = check_metric(cfg)
+ assert cfg.prediction.metric == "GPT"
+
+
+@patch("psutil.Process")
+def test_kill_child_processes(mock_process):
+ mock_process.return_value.status.return_value = "running"
+ mock_child_1 = MagicMock()
+ mock_child_2 = MagicMock()
+ mock_process.return_value.children.return_value = [mock_child_1, mock_child_2]
+
+ assert kill_child_processes(1234)
+ mock_child_1.kill.assert_called_once()
+ mock_child_2.kill.assert_called_once()
+
+
+def test_add_file_to_zip():
+ with tempfile.NamedTemporaryFile() as temp_file:
+ temp_file.write(b"Test content")
+ temp_file.flush()
+
+ with tempfile.NamedTemporaryFile(suffix=".zip") as temp_zip:
+ with zipfile.ZipFile(temp_zip.name, "w") as zf:
+ add_file_to_zip(zf, temp_file.name)
+
+ with zipfile.ZipFile(temp_zip.name, "r") as zf:
+ assert os.path.basename(temp_file.name) in zf.namelist()
+
+
+def test_patched_attribute():
+ cfg = ConfigProblemBase(
+ dataset=ConfigDPODataset(
+ prompt_column=("prompt_column",),
+ answer_column="answer_column",
+ rejected_answer_column="rejected_answer_column",
+ parent_id_column="None",
+ )
+ )
+ with PatchedAttribute(cfg.dataset, "answer_column", "chosen_response"):
+ assert cfg.dataset.answer_column == "chosen_response"
+
+ with PatchedAttribute(cfg.dataset, "answer_column", "new_answer_column"):
+ assert cfg.dataset.answer_column == "new_answer_column"
+
+ assert cfg.dataset.answer_column == "answer_column"
+
+ with PatchedAttribute(cfg.dataset, "new_property", "new_value"):
+ assert cfg.dataset.new_property == "new_value" # type: ignore[attr-defined]
+
+ with pytest.raises(AttributeError):
+ cfg.dataset.new_property # type: ignore[attr-defined]
+
+
+def test_create_symlinks_in_parent_folder():
+ with tempfile.TemporaryDirectory() as temp_dir:
+ sub_dir = os.path.join(temp_dir, "sub")
+ os.mkdir(sub_dir)
+
+ # Create some files in the subdirectory
+ for i in range(3):
+ with open(os.path.join(sub_dir, f"file{i}.txt"), "w") as f:
+ f.write(f"Content {i}")
+
+ create_symlinks_in_parent_folder(sub_dir)
+
+ # Check if symlinks were created in the parent directory
+ for i in range(3):
+ symlink_path = os.path.join(temp_dir, f"file{i}.txt")
+ assert os.path.islink(symlink_path)
+ assert os.readlink(symlink_path) == os.path.join(sub_dir, f"file{i}.txt")
diff --git a/tests/ui/README.md b/tests/ui/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..a58eda717ee781d5553427d66cb3cf9703a7b9bb
--- /dev/null
+++ b/tests/ui/README.md
@@ -0,0 +1,68 @@
+# UI Testing for LLM-Studio
+
+## Overview
+
+The UI testing framework for LLM-Studio leverages the principles of Behaviour Driven Development (BDD), combining the power of Playwright for automation and Behave for writing UI tests. This approach offers the best of both worlds, as it makes the tests easily understandable for humans while remaining straightforward for machines to execute. By adopting this framework, it eliminates the complexities associated with using Selenium.
+
+## Design
+
+The framework is designed to be flexible, capable of running on local and remote machines seamlessly. It is agnostic to the location of the application, making it ideal for release testing across various instances of H2OAI Integrated Cloud (HAIC).
+
+## Execution
+
+### Local Machine Setup
+
+To set up and run UI tests locally, follow these steps:
+
+```bash
+export LOCAL_LOGIN=True
+export PYTEST_BASE_URL=localhost:10101
+make setup-dev
+make llmstudio
+make setup-ui
+make test-ui-headed
+```
+
+### Remote Testing
+
+You can conduct UI testing for LLM-Studio on a remote machine using the following approaches:
+
+#### 1. **Running the App on a Remote Server**
+
+- Set up the app on a remote Ubuntu instance:
+
+ ```bash
+ make setup-dev
+ make llmstudio
+ ```
+
+- Obtain the app URL.
+- Run the tests on the local machine:
+
+ ```bash
+ export PYTEST_BASE_URL=
+ make setup-ui
+ make test-ui-headed
+ ```
+
+#### 2. **Running the App on HAMC** _(with Okta Login) _
+
+ ```bash
+ export OKTA_USER=
+ export OKTA_PASSWORD=
+ export PYTEST_BASE_URL=
+ make test-ui
+ ```
+
+#### 3. **Running the App on HAIC** _(with Keycloak Login) _
+
+ ```bash
+ export KEYCLOAK_USER=
+ export KEYCLOAK_PASSWORD=
+ export PYTEST_BASE_URL=
+ make test-ui
+ ```
+
+### Test Results
+
+The results of the UI tests are stored in `reports/junit_ui.xml`. These reports provide valuable insights into the success and failure of the tests, aiding in the continuous improvement of the application.
diff --git a/tests/ui/__init__.py b/tests/ui/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/ui/conftest.py b/tests/ui/conftest.py
new file mode 100644
index 0000000000000000000000000000000000000000..ffdfca59302495fe9903b8ff4204e4c9c6f20ad8
--- /dev/null
+++ b/tests/ui/conftest.py
@@ -0,0 +1,14 @@
+import logging
+from functools import partial, partialmethod
+
+import pytest
+
+logging.TRACE = 5 # type: ignore
+logging.addLevelName(logging.TRACE, "TRACE") # type: ignore
+logging.Logger.trace = partialmethod(logging.Logger.log, logging.TRACE) # type: ignore
+logging.trace = partial(logging.log, logging.TRACE) # type: ignore
+
+
+@pytest.fixture(scope="session")
+def logger() -> logging.Logger:
+ return logging.getLogger("ui-tests")
diff --git a/tests/ui/llm_studio.feature b/tests/ui/llm_studio.feature
new file mode 100644
index 0000000000000000000000000000000000000000..7b9ad5ee863046fedd091c43b5756396efb48121
--- /dev/null
+++ b/tests/ui/llm_studio.feature
@@ -0,0 +1,25 @@
+Feature: LLM Studio
+
+ Background: LLM Studio user
+ Given LLM Studio home page is opened
+ When I login to LLM Studio
+ Then I see the home page
+
+ Scenario: Import dataset using filesystem
+ When I upload dataset train_full.pq
+ And I name the dataset train-full.pq
+ Then I should see the dataset train-full.pq
+ When I delete dataset train-full.pq
+ Then I should not see the dataset train-full.pq
+
+ Scenario: Create experiment
+ When I create experiment test-experiment
+ And I update LLM Backbone to h2oai/llama2-0b-unit-test
+ I set Mixed Precision to false
+ And I tweak data sampling to 0.03
+ And I tweak max length to 32
+ And I select Perplexity metric
+ And I run the experiment
+ Then I should see the test-experiment should finish successfully
+ When I delete experiment test-experiment
+ Then I should not see the experiment test-experiment
diff --git a/tests/ui/llm_studio_page.py b/tests/ui/llm_studio_page.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a2c44574449ffa3661294d036eb9b34f4395f73
--- /dev/null
+++ b/tests/ui/llm_studio_page.py
@@ -0,0 +1,263 @@
+import os
+
+from hac_playwright.pages.base import BasePage
+from playwright.sync_api import expect
+
+CLOUD_FILESYSTEM_PATH = "/home/llmstudio/mount/data/user/oasst"
+LOCAL_FILESYSTEM_PATH = os.path.join(os.getcwd(), "data/user/oasst")
+
+
+class LLMStudioPage(BasePage):
+ # Constants for selectors
+ DATASET_IMPORT_SOURCE_SELECTOR = "dataset/import/source"
+ CONTINUE_BUTTON_SELECTOR = "button[name='Continue']"
+ DATASET_LIST_DELETE_SELECTOR = "dataset/list/delete"
+ DATASET_DELETE_DIALOG_SELECTOR = "dataset/delete/dialog"
+ DATASET_DELETE_SELECTOR = "dataset/delete"
+ EXPERIMENT_RUN_SELECTOR = "experiment/start/run"
+ EXPERIMENT_NAME_SELECTOR = "experiment/start/cfg/experiment_name"
+ EXPERIMENT_METRIC_SELECTOR = "experiment/start/cfg/metric"
+ EXPERIMENT_LIST_DELETE_SELECTOR = "experiment/list/delete"
+ EXPERIMENT_DELETE_DIALOG_SELECTOR = "experiment/delete/dialog"
+ EXPERIMENT_DELETE_SELECTOR = "experiment/delete"
+ EXPERIMENT_STATUS_SELECTOR = "[data-automation-key='status']"
+ EXPERIMENT_INDEX_SELECTOR = "[data-automation-key='name']"
+ FILESYSTEM_SELECTOR = "dataset/import/local_path"
+ FILENAME_SELECTOR = "dataset/import/name"
+ S3_BUCKET_SELECTOR = "dataset/import/s3_bucket"
+ S3_ACCESS_KEY_SELECTOR = "dataset/import/s3_access_key"
+ S3_SECRET_KEY_SELECTOR = "dataset/import/s3_secret_key"
+ S3_FILENAME_SELECTOR = "dataset/import/s3_filename"
+ AZURE_CONN_STRING = "dataset/import/azure_conn_string"
+ AZURE_CONTAINER = "dataset/import/azure_container"
+ AZURE_FILENAME = "dataset/import/azure_filename"
+ KAGGLE_COMMAND = "dataset/import/kaggle_command"
+ KAGGLE_USERNAME = "dataset/import/kaggle_username"
+ KAGGLE_SECRET_KEY = "dataset/import/kaggle_secret_key"
+ DATA_SAMPLING = "experiment/start/cfg/data_sample"
+ MAX_LENGTH = "experiment/start/cfg/max_length"
+ MAX_LENGTH_INFERENCE = "experiment/start/cfg/max_length_inference"
+ MIXED_PRECISION = "experiment/start/cfg/mixed_precision"
+ EXPERIMENT_REFRESH_SELECTOR = "experiment/list/refresh"
+ GPU_WARNING_SELECTOR = "experiment/start/error/proceed"
+
+ def assert_dataset_import(self, dataset_name: str):
+ dataset = self.page.get_by_role("button", name=dataset_name)
+ # Assert that the element is not None and clickable
+ assert dataset is not None
+ dataset.click()
+
+ def get_by_test_id(self, test_id):
+ selector = f'[data-test="{test_id}"]'
+ return self.page.locator(selector)
+
+ def open_home_page(self):
+ self.page.get_by_role("button", name="Home").click()
+
+ def open_app_settings(self):
+ self.page.get_by_role("button", name="Settings").click()
+
+ def dataset_name(self, filename):
+ self.get_by_test_id(self.FILENAME_SELECTOR).fill(filename)
+ self.continue_button().click()
+ self.continue_button().click()
+
+ def import_dataset_from_filesystem(self, filepath: str):
+ self.import_dataset("Local")
+ if "LOCAL_LOGIN" in os.environ:
+ path = f"{LOCAL_FILESYSTEM_PATH}/{filepath}"
+ else:
+ path = f"{CLOUD_FILESYSTEM_PATH}/{filepath}"
+ self.get_by_test_id(self.FILESYSTEM_SELECTOR).fill(path)
+ self.continue_button().click()
+
+ def continue_button(self):
+ return self.page.get_by_role("button", name="Continue")
+
+ def import_dataset(self, source: str):
+ button = self.page.get_by_role("button", name="Import dataset")
+ button.click()
+ # FIX: Selectors.set_test_id_attribute(self, "data-test")
+ dropdown = self.get_by_test_id(self.DATASET_IMPORT_SOURCE_SELECTOR)
+ dropdown.click()
+ self.page.get_by_role("option", name=source).click()
+
+ def import_dataset_from_aws(
+ self, bucket: str, access_key: str, secret_key: str, dataset_name: str
+ ):
+ self.import_dataset("AWS S3")
+ self.get_by_test_id(self.S3_BUCKET_SELECTOR).fill(bucket)
+ self.get_by_test_id(self.S3_ACCESS_KEY_SELECTOR).fill(access_key)
+ self.get_by_test_id(self.S3_SECRET_KEY_SELECTOR).fill(secret_key)
+ self.get_by_test_id(self.S3_FILENAME_SELECTOR).fill(dataset_name)
+ self.continue_button().click()
+
+ def import_dataset_from_azure(
+ self, connection: str, container: str, dataset_name: str
+ ):
+ self.import_dataset("Azure Blob Storage")
+ self.get_by_test_id(self.AZURE_CONN_STRING).fill(connection)
+ self.get_by_test_id(self.AZURE_CONTAINER).fill(container)
+ self.get_by_test_id(self.AZURE_FILENAME).fill(dataset_name)
+ self.continue_button().click()
+
+ def import_dataset_from_kaggle(
+ self, kaggle_command: str, username: str, secret: str
+ ):
+ self.import_dataset("Kaggle")
+ self.get_by_test_id(self.KAGGLE_COMMAND).fill(kaggle_command)
+ self.get_by_test_id(self.KAGGLE_USERNAME).fill(username)
+ self.get_by_test_id(self.KAGGLE_SECRET_KEY).fill(secret)
+ self.continue_button().click()
+
+ def delete_dataset(self, dataset_name: str):
+ # Go to dataset page
+ self.view_datasets()
+ self.get_by_test_id(self.DATASET_LIST_DELETE_SELECTOR).click()
+ # Locate dataset to delete
+ self.page.get_by_role("gridcell", name=dataset_name).click()
+ # Confirm dataset deletion
+ self.get_by_test_id(self.DATASET_DELETE_DIALOG_SELECTOR).click()
+ # Delete dataset
+ self.get_by_test_id(self.DATASET_DELETE_SELECTOR).click()
+
+ def view_datasets(self):
+ self.page.get_by_role("button", name="View datasets").click()
+
+ def assert_dataset_deletion(self, dataset_name: str):
+ self.view_datasets()
+ dataset = self.page.get_by_role("button", name=dataset_name)
+ # Assert that the element not found
+ expect(dataset).not_to_be_visible()
+
+ def create_experiment(self, name: str):
+ self.page.get_by_role("button", name="Create experiment").click()
+ self.experiment_name(name)
+
+ def slider(self, slider_selector, target_value: str, step: float = 0.1):
+ """
+ Moves the slider to the target value.
+
+ WARNING: step = 1.0 works only without headless mode.
+ Possible bug in playwright!
+
+ Args:
+ slider_selector (str): The selector for the slider element.
+ target_value (str): The target value for the slider.
+ step (float, optional): The step size for moving the slider.
+ """
+ is_completed = False
+ i = 0.0
+ # Get the slider element
+ slider = self.get_by_test_id(slider_selector)
+ slider.click()
+ # Get the bounding box of the slider
+ bounding_box = slider.bounding_box()
+ x1 = bounding_box["x"]
+ y = bounding_box["y"] + bounding_box["height"] / 2
+
+ while not is_completed:
+ if i > 100:
+ raise Exception("Could not find the target value")
+ self.page.mouse.move(x1, y)
+ self.page.mouse.down()
+ x2 = bounding_box["x"] + bounding_box["width"] * float(i) / 100
+ self.page.mouse.move(x2, y)
+ self.page.mouse.up()
+ value_now = slider.get_attribute("aria-valuenow")
+ if float(value_now) == float(target_value):
+ is_completed = True
+ else:
+ # Move the slider a little bit (adjust the step as needed)
+ x1 = x2
+ i += step
+
+ def run_experiment(self):
+ self.get_by_test_id(self.EXPERIMENT_RUN_SELECTOR).click()
+ locator = self.get_by_test_id(self.GPU_WARNING_SELECTOR)
+ if locator.is_visible():
+ locator.click()
+
+ def experiment_name(self, name: str):
+ self.get_by_test_id(self.EXPERIMENT_NAME_SELECTOR).fill(name)
+
+ def llm_backbone(self, value: str):
+ self.page.get_by_role("combobox", name="LLM Backbone").fill(value)
+
+ def mixed_precision(self, value: bool):
+ old_toggle_value = self.get_by_test_id(self.MIXED_PRECISION).get_attribute(
+ "aria-checked"
+ )
+ assert old_toggle_value in ["true", "false"]
+ assert value in ["true", "false"]
+
+ if old_toggle_value != value:
+ self.get_by_test_id(self.MIXED_PRECISION).click()
+
+ def data_sample(self, value):
+ self.slider(self.DATA_SAMPLING, value)
+
+ def max_length(self, value):
+ self.slider(self.MAX_LENGTH, value)
+
+ def max_length_inference(self, value):
+ self.slider(self.MAX_LENGTH_INFERENCE, value)
+
+ def metric(self, value):
+ self.get_by_test_id(self.EXPERIMENT_METRIC_SELECTOR).click()
+ self.page.get_by_role("option", name=f"{value}").click()
+
+ def view_experiment_page(self):
+ self.page.get_by_role("button", name="View experiments").click()
+
+ def view_experiment(self, experiment_name: str):
+ self.view_experiment_page()
+ i = self.find_experiment_index(experiment_name)
+ status = self.page.locator(
+ f"{self.EXPERIMENT_STATUS_SELECTOR} >> nth={i}"
+ ).inner_text()
+ while True:
+ if status in ["queued", "running"]:
+ self.get_by_test_id(self.EXPERIMENT_REFRESH_SELECTOR).click()
+ status = self.page.locator(
+ f"{self.EXPERIMENT_STATUS_SELECTOR} >> nth={i}"
+ ).inner_text()
+ elif status == "finished":
+ break
+ else:
+ raise Exception(f"Unexpected status: {status}")
+
+ def find_experiment_index(self, experiment_name):
+ index = 0
+ while index < 100: # number of experiments
+ # Get the innerText of the element with the specified selector
+ inner_text = self.page.locator(
+ f"{self.EXPERIMENT_INDEX_SELECTOR} >> nth={index}"
+ ).inner_text()
+ # Check if the current name matches the target name
+ if inner_text != experiment_name:
+ index += 1
+ else:
+ break
+ return index
+
+ def delete_experiment(self, experiment_name: str):
+ # Go to experiment page
+ self.view_experiment_page()
+ # Click on Delete experiments button
+ self.get_by_test_id(self.EXPERIMENT_LIST_DELETE_SELECTOR).click()
+ # Locate experiment to delete
+ self.page.get_by_role("gridcell", name=experiment_name).locator(
+ f'div:has-text("{experiment_name}")'
+ ).first.click()
+ # Delete experiment
+ self.get_by_test_id(self.EXPERIMENT_DELETE_DIALOG_SELECTOR).click()
+ # Confirm experiment deletion
+ self.get_by_test_id(self.EXPERIMENT_DELETE_SELECTOR).click()
+
+ def assert_experiment_deletion(self, experiment_name: str):
+ # Go to experiment page
+ self.view_experiment_page()
+ experiment = self.page.get_by_role("button", name=experiment_name)
+ # Assert that the element not found
+ expect(experiment).not_to_be_visible()
diff --git a/tests/ui/test.py b/tests/ui/test.py
new file mode 100644
index 0000000000000000000000000000000000000000..15a74bbbf81a839e0537c34062c9432e7bb6e313
--- /dev/null
+++ b/tests/ui/test.py
@@ -0,0 +1,112 @@
+import logging
+import os
+
+from playwright.sync_api import Page
+from pytest_bdd import given, parsers, scenarios, then, when
+
+from .llm_studio_page import LLMStudioPage
+from .utils import login
+
+scenarios("llm_studio.feature")
+
+
+@given("LLM Studio home page is opened")
+def open_llm_studio(page: Page, base_url):
+ page.set_viewport_size({"width": 1920, "height": 1080})
+ page.goto(base_url)
+
+
+@when("I login to LLM Studio", target_fixture="llm_studio")
+def login_to_llm_studio(logger: logging.Logger, page: Page, base_url: str):
+ okta_user = os.environ.get("OKTA_USER")
+ okta_password = os.environ.get("OKTA_PASSWORD")
+ keycloak_user = os.environ.get("KEYCLOAK_USER")
+ keycloak_password = os.environ.get("KEYCLOAK_PASSWORD")
+ if "LOCAL_LOGIN" not in os.environ:
+ if okta_user and okta_password:
+ login(page, "okta", okta_user, okta_password)
+ elif keycloak_user and keycloak_password:
+ login(page, "keycloak", keycloak_user, keycloak_password)
+
+ return LLMStudioPage(logger, page, base_url)
+
+
+@then(parsers.parse("I should see the dataset {dataset_name}"))
+def view_datasets(llm_studio: LLMStudioPage, dataset_name: str):
+ llm_studio.assert_dataset_import(dataset_name)
+
+
+@when(parsers.parse("I upload dataset {filepath}"))
+def upload_dataset_using_filesystem(llm_studio: LLMStudioPage, filepath: str):
+ llm_studio.import_dataset_from_filesystem(filepath)
+
+
+@when(parsers.parse("I name the dataset {dataset_name}"))
+def dataset_name(llm_studio: LLMStudioPage, dataset_name: str):
+ llm_studio.dataset_name(dataset_name)
+
+
+@then("I see the home page")
+def view_home_page(llm_studio: LLMStudioPage):
+ llm_studio.open_home_page()
+
+
+@when(parsers.parse("I delete dataset {dataset_name}"))
+def delete_dataset(llm_studio: LLMStudioPage, dataset_name: str):
+ llm_studio.delete_dataset(dataset_name)
+
+
+@then(parsers.parse("I should not see the dataset {dataset_name}"))
+def assert_dataset_deletion(llm_studio: LLMStudioPage, dataset_name: str):
+ llm_studio.view_datasets()
+ llm_studio.assert_dataset_deletion(dataset_name)
+
+
+@when(parsers.parse("I create experiment {experiment_name}"))
+def create_experiment(llm_studio: LLMStudioPage, experiment_name: str):
+ llm_studio.create_experiment(experiment_name)
+
+
+@when(parsers.parse("I tweak data sampling to {value}"))
+def tweak_data_sampling(llm_studio: LLMStudioPage, value: str):
+ llm_studio.data_sample(value)
+
+
+@when(parsers.parse("I update LLM Backbone to {llm_backbone}"))
+def update_llm_backbone(llm_studio: LLMStudioPage, llm_backbone: str):
+ llm_studio.llm_backbone(llm_backbone)
+
+
+@when(parsers.parse("I set Mixed Precision to {value}"))
+def update_mixed_precision(llm_studio: LLMStudioPage, value: bool):
+ llm_studio.mixed_precision(value)
+
+
+@when(parsers.parse("I tweak max length to {value}"))
+def tweak_max_length(llm_studio: LLMStudioPage, value: str):
+ llm_studio.max_length(value)
+
+
+@when(parsers.parse("I select {value} metric"))
+def select_metric(llm_studio: LLMStudioPage, value: str):
+ llm_studio.metric(value)
+
+
+@when("I run the experiment")
+def run_experiment(llm_studio: LLMStudioPage):
+ llm_studio.run_experiment()
+
+
+@then(parsers.parse("I should see the {experiment_name} should finish successfully"))
+def view_experiment(llm_studio: LLMStudioPage, experiment_name: str):
+ llm_studio.view_experiment(experiment_name)
+
+
+@when(parsers.parse("I delete experiment {experiment_name}"))
+def delete_experiment(llm_studio: LLMStudioPage, experiment_name: str):
+ llm_studio.delete_experiment(experiment_name)
+
+
+@then(parsers.parse("I should not see the experiment {experiment_name}"))
+def assert_experiment_deletion(llm_studio: LLMStudioPage, experiment_name: str):
+ llm_studio.assert_experiment_deletion(experiment_name)
diff --git a/tests/ui/utils.py b/tests/ui/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab6288177bce0a26fdcd90c108e06093636207d1
--- /dev/null
+++ b/tests/ui/utils.py
@@ -0,0 +1,19 @@
+from hac_playwright.main import keycloak_login, okta_login, okta_otp_local
+from playwright.sync_api import Page
+
+
+def login(
+ page: Page,
+ login_type: str,
+ username: str,
+ password: str,
+ secret: str = "",
+):
+ if login_type == "keycloak":
+ keycloak_login(page, username, password)
+ elif login_type == "okta":
+ okta_login(page, username, password)
+ elif login_type == "okta-otp-local":
+ okta_otp_local(page, username, password, secret)
+ else:
+ raise ValueError(f"Unknown login type '{login_type}'")