diff --git a/LLM-Detector-V7-11w/README.md b/LLM-Detector-V7-11w/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ad33e8258d13444ef94334df6cfbf2f985d89ed4
--- /dev/null
+++ b/LLM-Detector-V7-11w/README.md
@@ -0,0 +1,59 @@
+---
+license: other
+library_name: peft
+tags:
+- llama-factory
+- lora
+- generated_from_trainer
+base_model: ./Mistral-7B-Instruct-v0.1
+model-index:
+- name: mistral-7b
+ results: []
+---
+
+
+
+# mistral-7b
+
+This model is a fine-tuned version of [./Mistral-7B-Instruct-v0.1](https://huggingface.co/./Mistral-7B-Instruct-v0.1) on the ta, the tb, the tc, the td, the te, the tf, the tg and the th datasets.
+
+## Model description
+
+More information needed
+
+## Intended uses & limitations
+
+More information needed
+
+## Training and evaluation data
+
+More information needed
+
+## Training procedure
+
+### Training hyperparameters
+
+The following hyperparameters were used during training:
+- learning_rate: 5e-05
+- train_batch_size: 4
+- eval_batch_size: 8
+- seed: 42
+- gradient_accumulation_steps: 4
+- total_train_batch_size: 16
+- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
+- lr_scheduler_type: cosine
+- num_epochs: 3.0
+- mixed_precision_training: Native AMP
+
+### Training results
+
+
+
+### Framework versions
+
+- PEFT 0.7.1
+- Transformers 4.36.2
+- Pytorch 2.1.1+cu121
+- Datasets 2.15.0
+- Tokenizers 0.15.0
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/adapter_config.json b/LLM-Detector-V7-11w/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8a770ff96db84d75626efa2d0a8b065b51c74716
--- /dev/null
+++ b/LLM-Detector-V7-11w/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "./Mistral-7B-Instruct-v0.1",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/adapter_model.safetensors b/LLM-Detector-V7-11w/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..371ab2506e266b46f1cb30f30c60f46aad5ee021
--- /dev/null
+++ b/LLM-Detector-V7-11w/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ef38e97e8cb42bf27d3c049d2ecf7d6080586fa7d9ea1080a54cf3e9e124d01
+size 13648432
diff --git a/LLM-Detector-V7-11w/all_results.json b/LLM-Detector-V7-11w/all_results.json
new file mode 100644
index 0000000000000000000000000000000000000000..eba90efd5a369c94068ceafc0cb7d8d1f6f0219f
--- /dev/null
+++ b/LLM-Detector-V7-11w/all_results.json
@@ -0,0 +1,7 @@
+{
+ "epoch": 3.0,
+ "train_loss": 0.016247458042738187,
+ "train_runtime": 104237.6159,
+ "train_samples_per_second": 3.423,
+ "train_steps_per_second": 0.214
+}
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/README.md b/LLM-Detector-V7-11w/checkpoint-12000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1761815329bb2f98216bb131acf44b508e0087b4
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/README.md
@@ -0,0 +1,204 @@
+---
+library_name: peft
+base_model: ./Mistral-7B-Instruct-v0.1
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+
+
+### Framework versions
+
+- PEFT 0.7.1
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/adapter_config.json b/LLM-Detector-V7-11w/checkpoint-12000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8a770ff96db84d75626efa2d0a8b065b51c74716
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "./Mistral-7B-Instruct-v0.1",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/adapter_model.safetensors b/LLM-Detector-V7-11w/checkpoint-12000/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..70d9e035f1fddc19b2d6e4b23b3c822523fbaf80
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ec1db537a3e6619f4f8b0f3ec9fa8c801b1bbdcfcb0fd4fbdd42cc3f37ae8cb
+size 13648432
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/optimizer.pt b/LLM-Detector-V7-11w/checkpoint-12000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..79704bd348e75a98455003c1e93334d3a4769714
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3102c6d70788dd7b7b1a4602722ca63d798825e3706e7c4e8e27e8ccb59b34f5
+size 27370618
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/rng_state.pth b/LLM-Detector-V7-11w/checkpoint-12000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0a11d850e4f9ddde03041fb0f3247ef616fbda77
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d138cfe3a4adf21f048848ee35837c9a757a0a3616ff7adbb45b69aac247435
+size 14244
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/scheduler.pt b/LLM-Detector-V7-11w/checkpoint-12000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..9e93bbd515a59c61cc639c686e1539e4097a5ae5
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5d7ab9123dfbc4d7eb2776a2ae4bbfc0564c92aaba8d9ced3be98abb280f5b5b
+size 1064
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/special_tokens_map.json b/LLM-Detector-V7-11w/checkpoint-12000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/tokenizer.model b/LLM-Detector-V7-11w/checkpoint-12000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/tokenizer_config.json b/LLM-Detector-V7-11w/checkpoint-12000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e55462da3cd6a6f37c2b8fc230da6b14731e5ab1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/trainer_state.json b/LLM-Detector-V7-11w/checkpoint-12000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..d775e56a969b4b4df13223663deffa2d9dc3a72f
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/trainer_state.json
@@ -0,0 +1,741 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.6144764723689078,
+ "eval_steps": 500,
+ "global_step": 12000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.9997567688496474e-05,
+ "loss": 1.6283,
+ "step": 100
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9990172715142793e-05,
+ "loss": 0.068,
+ "step": 200
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997781629993153e-05,
+ "loss": 0.0404,
+ "step": 300
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.9960500896052476e-05,
+ "loss": 0.0626,
+ "step": 400
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993822994123172e-05,
+ "loss": 0.0428,
+ "step": 500
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9911007857049264e-05,
+ "loss": 0.0265,
+ "step": 600
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.987884004806111e-05,
+ "loss": 0.0263,
+ "step": 700
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.984173290072626e-05,
+ "loss": 0.03,
+ "step": 800
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.979969378213884e-05,
+ "loss": 0.0253,
+ "step": 900
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.975273103856537e-05,
+ "loss": 0.038,
+ "step": 1000
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.970085399378785e-05,
+ "loss": 0.0322,
+ "step": 1100
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.964407294725254e-05,
+ "loss": 0.0296,
+ "step": 1200
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.958239917202523e-05,
+ "loss": 0.0295,
+ "step": 1300
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.9515844912553106e-05,
+ "loss": 0.0171,
+ "step": 1400
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.944442338223378e-05,
+ "loss": 0.0205,
+ "step": 1500
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.9368148760792e-05,
+ "loss": 0.029,
+ "step": 1600
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9287036191464414e-05,
+ "loss": 0.0172,
+ "step": 1700
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9201101777993116e-05,
+ "loss": 0.0235,
+ "step": 1800
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.911036258142853e-05,
+ "loss": 0.0168,
+ "step": 1900
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.9014836616742065e-05,
+ "loss": 0.0227,
+ "step": 2000
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.891454284924965e-05,
+ "loss": 0.0227,
+ "step": 2100
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.8810575041794e-05,
+ "loss": 0.0173,
+ "step": 2200
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.8700853511049656e-05,
+ "loss": 0.0227,
+ "step": 2300
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.858642651436035e-05,
+ "loss": 0.0144,
+ "step": 2400
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.846731676957191e-05,
+ "loss": 0.03,
+ "step": 2500
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.834354792422293e-05,
+ "loss": 0.0189,
+ "step": 2600
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.821514455084985e-05,
+ "loss": 0.0111,
+ "step": 2700
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.8082132142108465e-05,
+ "loss": 0.0152,
+ "step": 2800
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.794453710571272e-05,
+ "loss": 0.0182,
+ "step": 2900
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.780238675919182e-05,
+ "loss": 0.0111,
+ "step": 3000
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.765570932446672e-05,
+ "loss": 0.0138,
+ "step": 3100
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.75045339222471e-05,
+ "loss": 0.0256,
+ "step": 3200
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.734889056624983e-05,
+ "loss": 0.0199,
+ "step": 3300
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.718881015724017e-05,
+ "loss": 0.0159,
+ "step": 3400
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.702432447689692e-05,
+ "loss": 0.0121,
+ "step": 3500
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.6855466181502544e-05,
+ "loss": 0.0181,
+ "step": 3600
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.66822687954598e-05,
+ "loss": 0.0148,
+ "step": 3700
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.65047667046359e-05,
+ "loss": 0.0152,
+ "step": 3800
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.632299514953571e-05,
+ "loss": 0.0103,
+ "step": 3900
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.613699021830524e-05,
+ "loss": 0.0217,
+ "step": 4000
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.59467888395669e-05,
+ "loss": 0.0142,
+ "step": 4100
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.575242877508777e-05,
+ "loss": 0.0151,
+ "step": 4200
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5553948612282607e-05,
+ "loss": 0.0164,
+ "step": 4300
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.5353433433160075e-05,
+ "loss": 0.0095,
+ "step": 4400
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.514687230313633e-05,
+ "loss": 0.015,
+ "step": 4500
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.49363112993749e-05,
+ "loss": 0.015,
+ "step": 4600
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.472179222575561e-05,
+ "loss": 0.0181,
+ "step": 4700
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4503357671976574e-05,
+ "loss": 0.0175,
+ "step": 4800
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.428105100509852e-05,
+ "loss": 0.0069,
+ "step": 4900
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.4054916360934957e-05,
+ "loss": 0.0108,
+ "step": 5000
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3824998635289594e-05,
+ "loss": 0.0204,
+ "step": 5100
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.3591343475042946e-05,
+ "loss": 0.0192,
+ "step": 5200
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.3353997269089774e-05,
+ "loss": 0.0086,
+ "step": 5300
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.311300713912926e-05,
+ "loss": 0.0179,
+ "step": 5400
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.286842093030963e-05,
+ "loss": 0.0142,
+ "step": 5500
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.262028720172921e-05,
+ "loss": 0.0187,
+ "step": 5600
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2368655216795735e-05,
+ "loss": 0.0176,
+ "step": 5700
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.211357493344573e-05,
+ "loss": 0.0145,
+ "step": 5800
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.185509699422615e-05,
+ "loss": 0.0087,
+ "step": 5900
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.1593272716239985e-05,
+ "loss": 0.0136,
+ "step": 6000
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.132815408095797e-05,
+ "loss": 0.0137,
+ "step": 6100
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.105979372389847e-05,
+ "loss": 0.011,
+ "step": 6200
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.0788244924177365e-05,
+ "loss": 0.024,
+ "step": 6300
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.0513561593930325e-05,
+ "loss": 0.0067,
+ "step": 6400
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.023579826760924e-05,
+ "loss": 0.0102,
+ "step": 6500
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 3.995501009115527e-05,
+ "loss": 0.013,
+ "step": 6600
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 3.9674104896418544e-05,
+ "loss": 0.0133,
+ "step": 6700
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.9387463695523555e-05,
+ "loss": 0.0087,
+ "step": 6800
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.9097966069208193e-05,
+ "loss": 0.01,
+ "step": 6900
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.880566949309125e-05,
+ "loss": 0.0113,
+ "step": 7000
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.8513595749088484e-05,
+ "loss": 0.0101,
+ "step": 7100
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.821590244319273e-05,
+ "loss": 0.0092,
+ "step": 7200
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.791558530862982e-05,
+ "loss": 0.0176,
+ "step": 7300
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.76127039690774e-05,
+ "loss": 0.0114,
+ "step": 7400
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.7307318557299355e-05,
+ "loss": 0.0083,
+ "step": 7500
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.699948970320738e-05,
+ "loss": 0.0067,
+ "step": 7600
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.668927852182374e-05,
+ "loss": 0.0021,
+ "step": 7700
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.637674660114774e-05,
+ "loss": 0.0087,
+ "step": 7800
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.606195598992832e-05,
+ "loss": 0.005,
+ "step": 7900
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.574496918534519e-05,
+ "loss": 0.014,
+ "step": 8000
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.542584912060087e-05,
+ "loss": 0.0073,
+ "step": 8100
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.510465915242623e-05,
+ "loss": 0.0073,
+ "step": 8200
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 3.478146304850188e-05,
+ "loss": 0.0106,
+ "step": 8300
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.4456324974798025e-05,
+ "loss": 0.0106,
+ "step": 8400
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 3.412930948283528e-05,
+ "loss": 0.0036,
+ "step": 8500
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.380048149686877e-05,
+ "loss": 0.0051,
+ "step": 8600
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 3.3469906300998476e-05,
+ "loss": 0.0047,
+ "step": 8700
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 3.313764952620792e-05,
+ "loss": 0.0035,
+ "step": 8800
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 3.280377713733412e-05,
+ "loss": 0.0094,
+ "step": 8900
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.246835541997116e-05,
+ "loss": 0.0077,
+ "step": 9000
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 3.213145096731021e-05,
+ "loss": 0.0076,
+ "step": 9100
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 3.1793130666918324e-05,
+ "loss": 0.0074,
+ "step": 9200
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 3.145346168745891e-05,
+ "loss": 0.0072,
+ "step": 9300
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 3.111251146535634e-05,
+ "loss": 0.0043,
+ "step": 9400
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 3.077377511336779e-05,
+ "loss": 0.0071,
+ "step": 9500
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 3.043047683848379e-05,
+ "loss": 0.0102,
+ "step": 9600
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 3.0086100419987084e-05,
+ "loss": 0.005,
+ "step": 9700
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 2.974071422889685e-05,
+ "loss": 0.0155,
+ "step": 9800
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.9394386836708154e-05,
+ "loss": 0.0086,
+ "step": 9900
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 2.9047187001778132e-05,
+ "loss": 0.0048,
+ "step": 10000
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 2.8699183655674938e-05,
+ "loss": 0.0085,
+ "step": 10100
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 2.835044588949241e-05,
+ "loss": 0.0091,
+ "step": 10200
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 2.8001042940132995e-05,
+ "loss": 0.0089,
+ "step": 10300
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 2.7651044176561748e-05,
+ "loss": 0.0093,
+ "step": 10400
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 2.7300519086034166e-05,
+ "loss": 0.0073,
+ "step": 10500
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 2.6949537260300395e-05,
+ "loss": 0.0029,
+ "step": 10600
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6598168381788864e-05,
+ "loss": 0.0027,
+ "step": 10700
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 2.6246482209771783e-05,
+ "loss": 0.011,
+ "step": 10800
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 2.5894548566515485e-05,
+ "loss": 0.0108,
+ "step": 10900
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 2.554243732341816e-05,
+ "loss": 0.0033,
+ "step": 11000
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 2.519021838713791e-05,
+ "loss": 0.0048,
+ "step": 11100
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.483796168571378e-05,
+ "loss": 0.0015,
+ "step": 11200
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.4485737154682554e-05,
+ "loss": 0.0053,
+ "step": 11300
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.4133614723194047e-05,
+ "loss": 0.0043,
+ "step": 11400
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.378166430012766e-05,
+ "loss": 0.0086,
+ "step": 11500
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.3429955760212966e-05,
+ "loss": 0.0077,
+ "step": 11600
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.307855893015712e-05,
+ "loss": 0.0066,
+ "step": 11700
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.272754357478172e-05,
+ "loss": 0.0055,
+ "step": 11800
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.237697938317202e-05,
+ "loss": 0.0067,
+ "step": 11900
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.202693595484113e-05,
+ "loss": 0.0082,
+ "step": 12000
+ }
+ ],
+ "logging_steps": 100,
+ "max_steps": 22296,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 3000,
+ "total_flos": 3.353181220065116e+18,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-12000/training_args.bin b/LLM-Detector-V7-11w/checkpoint-12000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ac97b1c26d12cbfaab826f907d39398233627e61
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-12000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fda912c16bf2c6e8ac3158b354c96cabf8fd0eff786de104f90c23c31741ef25
+size 4856
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/README.md b/LLM-Detector-V7-11w/checkpoint-15000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1761815329bb2f98216bb131acf44b508e0087b4
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/README.md
@@ -0,0 +1,204 @@
+---
+library_name: peft
+base_model: ./Mistral-7B-Instruct-v0.1
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+
+
+### Framework versions
+
+- PEFT 0.7.1
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/adapter_config.json b/LLM-Detector-V7-11w/checkpoint-15000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8a770ff96db84d75626efa2d0a8b065b51c74716
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "./Mistral-7B-Instruct-v0.1",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/adapter_model.safetensors b/LLM-Detector-V7-11w/checkpoint-15000/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..94bc27d9f9c655d83dc3b31fd58581d971c89f4d
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ac3d2109fc70a6c1b59c702cb431bca4f25ceaff721b1e0d8075f9793a35ff88
+size 13648432
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/optimizer.pt b/LLM-Detector-V7-11w/checkpoint-15000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..d2ae0cc34c95692f78d599d7bf42d408a3902dd8
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ffcc1773551890f9dac6bd1dc923b435d8c12078e0caaf83ab93f5a0f0e05a0f
+size 27370618
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/rng_state.pth b/LLM-Detector-V7-11w/checkpoint-15000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..41dfa7d7903dea42d227bad638c2c750928d590c
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c062f7f375beded48b5337f5a3f3a5cb38807fa3e85dbf3e294c0ab6b627bfc2
+size 14244
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/scheduler.pt b/LLM-Detector-V7-11w/checkpoint-15000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..79b4ae90678cf0ccc7280710d1b6651278665c4b
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0933c07370bc6a7583e4fef9f3016715d1c88ece3b281fe4985af206b423756e
+size 1064
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/special_tokens_map.json b/LLM-Detector-V7-11w/checkpoint-15000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/tokenizer.model b/LLM-Detector-V7-11w/checkpoint-15000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/tokenizer_config.json b/LLM-Detector-V7-11w/checkpoint-15000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e55462da3cd6a6f37c2b8fc230da6b14731e5ab1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/trainer_state.json b/LLM-Detector-V7-11w/checkpoint-15000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..94fcf00873a5906c6bb43a50cd9f982506b07b97
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/trainer_state.json
@@ -0,0 +1,921 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.018095590461135,
+ "eval_steps": 500,
+ "global_step": 15000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.9997567688496474e-05,
+ "loss": 1.6283,
+ "step": 100
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9990172715142793e-05,
+ "loss": 0.068,
+ "step": 200
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997781629993153e-05,
+ "loss": 0.0404,
+ "step": 300
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.9960500896052476e-05,
+ "loss": 0.0626,
+ "step": 400
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993822994123172e-05,
+ "loss": 0.0428,
+ "step": 500
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9911007857049264e-05,
+ "loss": 0.0265,
+ "step": 600
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.987884004806111e-05,
+ "loss": 0.0263,
+ "step": 700
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.984173290072626e-05,
+ "loss": 0.03,
+ "step": 800
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.979969378213884e-05,
+ "loss": 0.0253,
+ "step": 900
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.975273103856537e-05,
+ "loss": 0.038,
+ "step": 1000
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.970085399378785e-05,
+ "loss": 0.0322,
+ "step": 1100
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.964407294725254e-05,
+ "loss": 0.0296,
+ "step": 1200
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.958239917202523e-05,
+ "loss": 0.0295,
+ "step": 1300
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.9515844912553106e-05,
+ "loss": 0.0171,
+ "step": 1400
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.944442338223378e-05,
+ "loss": 0.0205,
+ "step": 1500
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.9368148760792e-05,
+ "loss": 0.029,
+ "step": 1600
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9287036191464414e-05,
+ "loss": 0.0172,
+ "step": 1700
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9201101777993116e-05,
+ "loss": 0.0235,
+ "step": 1800
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.911036258142853e-05,
+ "loss": 0.0168,
+ "step": 1900
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.9014836616742065e-05,
+ "loss": 0.0227,
+ "step": 2000
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.891454284924965e-05,
+ "loss": 0.0227,
+ "step": 2100
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.8810575041794e-05,
+ "loss": 0.0173,
+ "step": 2200
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.8700853511049656e-05,
+ "loss": 0.0227,
+ "step": 2300
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.858642651436035e-05,
+ "loss": 0.0144,
+ "step": 2400
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.846731676957191e-05,
+ "loss": 0.03,
+ "step": 2500
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.834354792422293e-05,
+ "loss": 0.0189,
+ "step": 2600
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.821514455084985e-05,
+ "loss": 0.0111,
+ "step": 2700
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.8082132142108465e-05,
+ "loss": 0.0152,
+ "step": 2800
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.794453710571272e-05,
+ "loss": 0.0182,
+ "step": 2900
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.780238675919182e-05,
+ "loss": 0.0111,
+ "step": 3000
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.765570932446672e-05,
+ "loss": 0.0138,
+ "step": 3100
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.75045339222471e-05,
+ "loss": 0.0256,
+ "step": 3200
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.734889056624983e-05,
+ "loss": 0.0199,
+ "step": 3300
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.718881015724017e-05,
+ "loss": 0.0159,
+ "step": 3400
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.702432447689692e-05,
+ "loss": 0.0121,
+ "step": 3500
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.6855466181502544e-05,
+ "loss": 0.0181,
+ "step": 3600
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.66822687954598e-05,
+ "loss": 0.0148,
+ "step": 3700
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.65047667046359e-05,
+ "loss": 0.0152,
+ "step": 3800
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.632299514953571e-05,
+ "loss": 0.0103,
+ "step": 3900
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.613699021830524e-05,
+ "loss": 0.0217,
+ "step": 4000
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.59467888395669e-05,
+ "loss": 0.0142,
+ "step": 4100
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.575242877508777e-05,
+ "loss": 0.0151,
+ "step": 4200
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5553948612282607e-05,
+ "loss": 0.0164,
+ "step": 4300
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.5353433433160075e-05,
+ "loss": 0.0095,
+ "step": 4400
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.514687230313633e-05,
+ "loss": 0.015,
+ "step": 4500
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.49363112993749e-05,
+ "loss": 0.015,
+ "step": 4600
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.472179222575561e-05,
+ "loss": 0.0181,
+ "step": 4700
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4503357671976574e-05,
+ "loss": 0.0175,
+ "step": 4800
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.428105100509852e-05,
+ "loss": 0.0069,
+ "step": 4900
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.4054916360934957e-05,
+ "loss": 0.0108,
+ "step": 5000
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3824998635289594e-05,
+ "loss": 0.0204,
+ "step": 5100
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.3591343475042946e-05,
+ "loss": 0.0192,
+ "step": 5200
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.3353997269089774e-05,
+ "loss": 0.0086,
+ "step": 5300
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.311300713912926e-05,
+ "loss": 0.0179,
+ "step": 5400
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.286842093030963e-05,
+ "loss": 0.0142,
+ "step": 5500
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.262028720172921e-05,
+ "loss": 0.0187,
+ "step": 5600
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2368655216795735e-05,
+ "loss": 0.0176,
+ "step": 5700
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.211357493344573e-05,
+ "loss": 0.0145,
+ "step": 5800
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.185509699422615e-05,
+ "loss": 0.0087,
+ "step": 5900
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.1593272716239985e-05,
+ "loss": 0.0136,
+ "step": 6000
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.132815408095797e-05,
+ "loss": 0.0137,
+ "step": 6100
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.105979372389847e-05,
+ "loss": 0.011,
+ "step": 6200
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.0788244924177365e-05,
+ "loss": 0.024,
+ "step": 6300
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.0513561593930325e-05,
+ "loss": 0.0067,
+ "step": 6400
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.023579826760924e-05,
+ "loss": 0.0102,
+ "step": 6500
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 3.995501009115527e-05,
+ "loss": 0.013,
+ "step": 6600
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 3.9674104896418544e-05,
+ "loss": 0.0133,
+ "step": 6700
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.9387463695523555e-05,
+ "loss": 0.0087,
+ "step": 6800
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.9097966069208193e-05,
+ "loss": 0.01,
+ "step": 6900
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.880566949309125e-05,
+ "loss": 0.0113,
+ "step": 7000
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.8513595749088484e-05,
+ "loss": 0.0101,
+ "step": 7100
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.821590244319273e-05,
+ "loss": 0.0092,
+ "step": 7200
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.791558530862982e-05,
+ "loss": 0.0176,
+ "step": 7300
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.76127039690774e-05,
+ "loss": 0.0114,
+ "step": 7400
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.7307318557299355e-05,
+ "loss": 0.0083,
+ "step": 7500
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.699948970320738e-05,
+ "loss": 0.0067,
+ "step": 7600
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.668927852182374e-05,
+ "loss": 0.0021,
+ "step": 7700
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.637674660114774e-05,
+ "loss": 0.0087,
+ "step": 7800
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.606195598992832e-05,
+ "loss": 0.005,
+ "step": 7900
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.574496918534519e-05,
+ "loss": 0.014,
+ "step": 8000
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.542584912060087e-05,
+ "loss": 0.0073,
+ "step": 8100
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.510465915242623e-05,
+ "loss": 0.0073,
+ "step": 8200
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 3.478146304850188e-05,
+ "loss": 0.0106,
+ "step": 8300
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.4456324974798025e-05,
+ "loss": 0.0106,
+ "step": 8400
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 3.412930948283528e-05,
+ "loss": 0.0036,
+ "step": 8500
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.380048149686877e-05,
+ "loss": 0.0051,
+ "step": 8600
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 3.3469906300998476e-05,
+ "loss": 0.0047,
+ "step": 8700
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 3.313764952620792e-05,
+ "loss": 0.0035,
+ "step": 8800
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 3.280377713733412e-05,
+ "loss": 0.0094,
+ "step": 8900
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.246835541997116e-05,
+ "loss": 0.0077,
+ "step": 9000
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 3.213145096731021e-05,
+ "loss": 0.0076,
+ "step": 9100
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 3.1793130666918324e-05,
+ "loss": 0.0074,
+ "step": 9200
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 3.145346168745891e-05,
+ "loss": 0.0072,
+ "step": 9300
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 3.111251146535634e-05,
+ "loss": 0.0043,
+ "step": 9400
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 3.077377511336779e-05,
+ "loss": 0.0071,
+ "step": 9500
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 3.043047683848379e-05,
+ "loss": 0.0102,
+ "step": 9600
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 3.0086100419987084e-05,
+ "loss": 0.005,
+ "step": 9700
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 2.974071422889685e-05,
+ "loss": 0.0155,
+ "step": 9800
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.9394386836708154e-05,
+ "loss": 0.0086,
+ "step": 9900
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 2.9047187001778132e-05,
+ "loss": 0.0048,
+ "step": 10000
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 2.8699183655674938e-05,
+ "loss": 0.0085,
+ "step": 10100
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 2.835044588949241e-05,
+ "loss": 0.0091,
+ "step": 10200
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 2.8001042940132995e-05,
+ "loss": 0.0089,
+ "step": 10300
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 2.7651044176561748e-05,
+ "loss": 0.0093,
+ "step": 10400
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 2.7300519086034166e-05,
+ "loss": 0.0073,
+ "step": 10500
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 2.6949537260300395e-05,
+ "loss": 0.0029,
+ "step": 10600
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6598168381788864e-05,
+ "loss": 0.0027,
+ "step": 10700
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 2.6246482209771783e-05,
+ "loss": 0.011,
+ "step": 10800
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 2.5894548566515485e-05,
+ "loss": 0.0108,
+ "step": 10900
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 2.554243732341816e-05,
+ "loss": 0.0033,
+ "step": 11000
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 2.519021838713791e-05,
+ "loss": 0.0048,
+ "step": 11100
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.483796168571378e-05,
+ "loss": 0.0015,
+ "step": 11200
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.4485737154682554e-05,
+ "loss": 0.0053,
+ "step": 11300
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.4133614723194047e-05,
+ "loss": 0.0043,
+ "step": 11400
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.378166430012766e-05,
+ "loss": 0.0086,
+ "step": 11500
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.3429955760212966e-05,
+ "loss": 0.0077,
+ "step": 11600
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.307855893015712e-05,
+ "loss": 0.0066,
+ "step": 11700
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.272754357478172e-05,
+ "loss": 0.0055,
+ "step": 11800
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.237697938317202e-05,
+ "loss": 0.0067,
+ "step": 11900
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.202693595484113e-05,
+ "loss": 0.0082,
+ "step": 12000
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 2.1680974167907623e-05,
+ "loss": 0.0101,
+ "step": 12100
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 2.1332173697925385e-05,
+ "loss": 0.0019,
+ "step": 12200
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 2.09841014224678e-05,
+ "loss": 0.0106,
+ "step": 12300
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 2.063682644631365e-05,
+ "loss": 0.0051,
+ "step": 12400
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 2.0290417715949304e-05,
+ "loss": 0.0076,
+ "step": 12500
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.994494400588042e-05,
+ "loss": 0.0037,
+ "step": 12600
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.9600473904977725e-05,
+ "loss": 0.0073,
+ "step": 12700
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.9257075802859685e-05,
+ "loss": 0.0052,
+ "step": 12800
+ },
+ {
+ "epoch": 1.74,
+ "learning_rate": 1.89148178763147e-05,
+ "loss": 0.0038,
+ "step": 12900
+ },
+ {
+ "epoch": 1.75,
+ "learning_rate": 1.857376807576563e-05,
+ "loss": 0.0044,
+ "step": 13000
+ },
+ {
+ "epoch": 1.76,
+ "learning_rate": 1.8233994111779146e-05,
+ "loss": 0.0031,
+ "step": 13100
+ },
+ {
+ "epoch": 1.78,
+ "learning_rate": 1.7895563441622803e-05,
+ "loss": 0.0067,
+ "step": 13200
+ },
+ {
+ "epoch": 1.79,
+ "learning_rate": 1.7558543255872352e-05,
+ "loss": 0.0096,
+ "step": 13300
+ },
+ {
+ "epoch": 1.8,
+ "learning_rate": 1.7223000465072e-05,
+ "loss": 0.0063,
+ "step": 13400
+ },
+ {
+ "epoch": 1.82,
+ "learning_rate": 1.68890016864503e-05,
+ "loss": 0.0064,
+ "step": 13500
+ },
+ {
+ "epoch": 1.83,
+ "learning_rate": 1.6556613230694243e-05,
+ "loss": 0.0047,
+ "step": 13600
+ },
+ {
+ "epoch": 1.84,
+ "learning_rate": 1.6225901088784207e-05,
+ "loss": 0.0023,
+ "step": 13700
+ },
+ {
+ "epoch": 1.86,
+ "learning_rate": 1.58969309188924e-05,
+ "loss": 0.0064,
+ "step": 13800
+ },
+ {
+ "epoch": 1.87,
+ "learning_rate": 1.556976803334737e-05,
+ "loss": 0.004,
+ "step": 13900
+ },
+ {
+ "epoch": 1.88,
+ "learning_rate": 1.5244477385667167e-05,
+ "loss": 0.0016,
+ "step": 14000
+ },
+ {
+ "epoch": 1.9,
+ "learning_rate": 1.4921123557663763e-05,
+ "loss": 0.008,
+ "step": 14100
+ },
+ {
+ "epoch": 1.91,
+ "learning_rate": 1.4599770746621238e-05,
+ "loss": 0.0047,
+ "step": 14200
+ },
+ {
+ "epoch": 1.92,
+ "learning_rate": 1.4280482752550327e-05,
+ "loss": 0.0056,
+ "step": 14300
+ },
+ {
+ "epoch": 1.94,
+ "learning_rate": 1.396332296552188e-05,
+ "loss": 0.0065,
+ "step": 14400
+ },
+ {
+ "epoch": 1.95,
+ "learning_rate": 1.3648354353081588e-05,
+ "loss": 0.004,
+ "step": 14500
+ },
+ {
+ "epoch": 1.96,
+ "learning_rate": 1.333563944774876e-05,
+ "loss": 0.0065,
+ "step": 14600
+ },
+ {
+ "epoch": 1.98,
+ "learning_rate": 1.3028332659827522e-05,
+ "loss": 0.0026,
+ "step": 14700
+ },
+ {
+ "epoch": 1.99,
+ "learning_rate": 1.2720286886495473e-05,
+ "loss": 0.008,
+ "step": 14800
+ },
+ {
+ "epoch": 2.0,
+ "learning_rate": 1.2414679074810755e-05,
+ "loss": 0.0053,
+ "step": 14900
+ },
+ {
+ "epoch": 2.02,
+ "learning_rate": 1.211156989883942e-05,
+ "loss": 0.0024,
+ "step": 15000
+ }
+ ],
+ "logging_steps": 100,
+ "max_steps": 22296,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 3000,
+ "total_flos": 4.194822128965386e+18,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-15000/training_args.bin b/LLM-Detector-V7-11w/checkpoint-15000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ac97b1c26d12cbfaab826f907d39398233627e61
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-15000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fda912c16bf2c6e8ac3158b354c96cabf8fd0eff786de104f90c23c31741ef25
+size 4856
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/README.md b/LLM-Detector-V7-11w/checkpoint-18000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1761815329bb2f98216bb131acf44b508e0087b4
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/README.md
@@ -0,0 +1,204 @@
+---
+library_name: peft
+base_model: ./Mistral-7B-Instruct-v0.1
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+
+
+### Framework versions
+
+- PEFT 0.7.1
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/adapter_config.json b/LLM-Detector-V7-11w/checkpoint-18000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8a770ff96db84d75626efa2d0a8b065b51c74716
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "./Mistral-7B-Instruct-v0.1",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/adapter_model.safetensors b/LLM-Detector-V7-11w/checkpoint-18000/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..52e9ce84ae935c4f88d476c81f05d379be252608
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:66bcc6800881908984d073432cdeb920070fd2435379ba34dd7a28847624268d
+size 13648432
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/optimizer.pt b/LLM-Detector-V7-11w/checkpoint-18000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..69ad6b0303a8ca3f82d96b396c892da230713e49
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2b3b72fecc266a932e807481578b9027a8e27baaca8eaa2102a6fee9477605bc
+size 27370618
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/rng_state.pth b/LLM-Detector-V7-11w/checkpoint-18000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..41dfa7d7903dea42d227bad638c2c750928d590c
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c062f7f375beded48b5337f5a3f3a5cb38807fa3e85dbf3e294c0ab6b627bfc2
+size 14244
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/scheduler.pt b/LLM-Detector-V7-11w/checkpoint-18000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b2a5eb97b9f090aaa6a2bd2c4f07a92d9830824e
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0be9a1e39bd11602ee7b398aebdd574e95624fd6bbb933e2dcc4fdf93be09dc1
+size 1064
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/special_tokens_map.json b/LLM-Detector-V7-11w/checkpoint-18000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/tokenizer.model b/LLM-Detector-V7-11w/checkpoint-18000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/tokenizer_config.json b/LLM-Detector-V7-11w/checkpoint-18000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e55462da3cd6a6f37c2b8fc230da6b14731e5ab1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/trainer_state.json b/LLM-Detector-V7-11w/checkpoint-18000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..6afd8581187278dbbfbf0d69a53815ed2bbf864d
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/trainer_state.json
@@ -0,0 +1,1101 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.421714708553362,
+ "eval_steps": 500,
+ "global_step": 18000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.9997567688496474e-05,
+ "loss": 1.6283,
+ "step": 100
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9990172715142793e-05,
+ "loss": 0.068,
+ "step": 200
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997781629993153e-05,
+ "loss": 0.0404,
+ "step": 300
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.9960500896052476e-05,
+ "loss": 0.0626,
+ "step": 400
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993822994123172e-05,
+ "loss": 0.0428,
+ "step": 500
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9911007857049264e-05,
+ "loss": 0.0265,
+ "step": 600
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.987884004806111e-05,
+ "loss": 0.0263,
+ "step": 700
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.984173290072626e-05,
+ "loss": 0.03,
+ "step": 800
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.979969378213884e-05,
+ "loss": 0.0253,
+ "step": 900
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.975273103856537e-05,
+ "loss": 0.038,
+ "step": 1000
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.970085399378785e-05,
+ "loss": 0.0322,
+ "step": 1100
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.964407294725254e-05,
+ "loss": 0.0296,
+ "step": 1200
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.958239917202523e-05,
+ "loss": 0.0295,
+ "step": 1300
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.9515844912553106e-05,
+ "loss": 0.0171,
+ "step": 1400
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.944442338223378e-05,
+ "loss": 0.0205,
+ "step": 1500
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.9368148760792e-05,
+ "loss": 0.029,
+ "step": 1600
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9287036191464414e-05,
+ "loss": 0.0172,
+ "step": 1700
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9201101777993116e-05,
+ "loss": 0.0235,
+ "step": 1800
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.911036258142853e-05,
+ "loss": 0.0168,
+ "step": 1900
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.9014836616742065e-05,
+ "loss": 0.0227,
+ "step": 2000
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.891454284924965e-05,
+ "loss": 0.0227,
+ "step": 2100
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.8810575041794e-05,
+ "loss": 0.0173,
+ "step": 2200
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.8700853511049656e-05,
+ "loss": 0.0227,
+ "step": 2300
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.858642651436035e-05,
+ "loss": 0.0144,
+ "step": 2400
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.846731676957191e-05,
+ "loss": 0.03,
+ "step": 2500
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.834354792422293e-05,
+ "loss": 0.0189,
+ "step": 2600
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.821514455084985e-05,
+ "loss": 0.0111,
+ "step": 2700
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.8082132142108465e-05,
+ "loss": 0.0152,
+ "step": 2800
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.794453710571272e-05,
+ "loss": 0.0182,
+ "step": 2900
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.780238675919182e-05,
+ "loss": 0.0111,
+ "step": 3000
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.765570932446672e-05,
+ "loss": 0.0138,
+ "step": 3100
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.75045339222471e-05,
+ "loss": 0.0256,
+ "step": 3200
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.734889056624983e-05,
+ "loss": 0.0199,
+ "step": 3300
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.718881015724017e-05,
+ "loss": 0.0159,
+ "step": 3400
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.702432447689692e-05,
+ "loss": 0.0121,
+ "step": 3500
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.6855466181502544e-05,
+ "loss": 0.0181,
+ "step": 3600
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.66822687954598e-05,
+ "loss": 0.0148,
+ "step": 3700
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.65047667046359e-05,
+ "loss": 0.0152,
+ "step": 3800
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.632299514953571e-05,
+ "loss": 0.0103,
+ "step": 3900
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.613699021830524e-05,
+ "loss": 0.0217,
+ "step": 4000
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.59467888395669e-05,
+ "loss": 0.0142,
+ "step": 4100
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.575242877508777e-05,
+ "loss": 0.0151,
+ "step": 4200
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5553948612282607e-05,
+ "loss": 0.0164,
+ "step": 4300
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.5353433433160075e-05,
+ "loss": 0.0095,
+ "step": 4400
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.514687230313633e-05,
+ "loss": 0.015,
+ "step": 4500
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.49363112993749e-05,
+ "loss": 0.015,
+ "step": 4600
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.472179222575561e-05,
+ "loss": 0.0181,
+ "step": 4700
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4503357671976574e-05,
+ "loss": 0.0175,
+ "step": 4800
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.428105100509852e-05,
+ "loss": 0.0069,
+ "step": 4900
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.4054916360934957e-05,
+ "loss": 0.0108,
+ "step": 5000
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3824998635289594e-05,
+ "loss": 0.0204,
+ "step": 5100
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.3591343475042946e-05,
+ "loss": 0.0192,
+ "step": 5200
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.3353997269089774e-05,
+ "loss": 0.0086,
+ "step": 5300
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.311300713912926e-05,
+ "loss": 0.0179,
+ "step": 5400
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.286842093030963e-05,
+ "loss": 0.0142,
+ "step": 5500
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.262028720172921e-05,
+ "loss": 0.0187,
+ "step": 5600
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2368655216795735e-05,
+ "loss": 0.0176,
+ "step": 5700
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.211357493344573e-05,
+ "loss": 0.0145,
+ "step": 5800
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.185509699422615e-05,
+ "loss": 0.0087,
+ "step": 5900
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.1593272716239985e-05,
+ "loss": 0.0136,
+ "step": 6000
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.132815408095797e-05,
+ "loss": 0.0137,
+ "step": 6100
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.105979372389847e-05,
+ "loss": 0.011,
+ "step": 6200
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.0788244924177365e-05,
+ "loss": 0.024,
+ "step": 6300
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.0513561593930325e-05,
+ "loss": 0.0067,
+ "step": 6400
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.023579826760924e-05,
+ "loss": 0.0102,
+ "step": 6500
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 3.995501009115527e-05,
+ "loss": 0.013,
+ "step": 6600
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 3.9674104896418544e-05,
+ "loss": 0.0133,
+ "step": 6700
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.9387463695523555e-05,
+ "loss": 0.0087,
+ "step": 6800
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.9097966069208193e-05,
+ "loss": 0.01,
+ "step": 6900
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.880566949309125e-05,
+ "loss": 0.0113,
+ "step": 7000
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.8513595749088484e-05,
+ "loss": 0.0101,
+ "step": 7100
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.821590244319273e-05,
+ "loss": 0.0092,
+ "step": 7200
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.791558530862982e-05,
+ "loss": 0.0176,
+ "step": 7300
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.76127039690774e-05,
+ "loss": 0.0114,
+ "step": 7400
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.7307318557299355e-05,
+ "loss": 0.0083,
+ "step": 7500
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.699948970320738e-05,
+ "loss": 0.0067,
+ "step": 7600
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.668927852182374e-05,
+ "loss": 0.0021,
+ "step": 7700
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.637674660114774e-05,
+ "loss": 0.0087,
+ "step": 7800
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.606195598992832e-05,
+ "loss": 0.005,
+ "step": 7900
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.574496918534519e-05,
+ "loss": 0.014,
+ "step": 8000
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.542584912060087e-05,
+ "loss": 0.0073,
+ "step": 8100
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.510465915242623e-05,
+ "loss": 0.0073,
+ "step": 8200
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 3.478146304850188e-05,
+ "loss": 0.0106,
+ "step": 8300
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.4456324974798025e-05,
+ "loss": 0.0106,
+ "step": 8400
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 3.412930948283528e-05,
+ "loss": 0.0036,
+ "step": 8500
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.380048149686877e-05,
+ "loss": 0.0051,
+ "step": 8600
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 3.3469906300998476e-05,
+ "loss": 0.0047,
+ "step": 8700
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 3.313764952620792e-05,
+ "loss": 0.0035,
+ "step": 8800
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 3.280377713733412e-05,
+ "loss": 0.0094,
+ "step": 8900
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.246835541997116e-05,
+ "loss": 0.0077,
+ "step": 9000
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 3.213145096731021e-05,
+ "loss": 0.0076,
+ "step": 9100
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 3.1793130666918324e-05,
+ "loss": 0.0074,
+ "step": 9200
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 3.145346168745891e-05,
+ "loss": 0.0072,
+ "step": 9300
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 3.111251146535634e-05,
+ "loss": 0.0043,
+ "step": 9400
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 3.077377511336779e-05,
+ "loss": 0.0071,
+ "step": 9500
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 3.043047683848379e-05,
+ "loss": 0.0102,
+ "step": 9600
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 3.0086100419987084e-05,
+ "loss": 0.005,
+ "step": 9700
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 2.974071422889685e-05,
+ "loss": 0.0155,
+ "step": 9800
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.9394386836708154e-05,
+ "loss": 0.0086,
+ "step": 9900
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 2.9047187001778132e-05,
+ "loss": 0.0048,
+ "step": 10000
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 2.8699183655674938e-05,
+ "loss": 0.0085,
+ "step": 10100
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 2.835044588949241e-05,
+ "loss": 0.0091,
+ "step": 10200
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 2.8001042940132995e-05,
+ "loss": 0.0089,
+ "step": 10300
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 2.7651044176561748e-05,
+ "loss": 0.0093,
+ "step": 10400
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 2.7300519086034166e-05,
+ "loss": 0.0073,
+ "step": 10500
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 2.6949537260300395e-05,
+ "loss": 0.0029,
+ "step": 10600
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6598168381788864e-05,
+ "loss": 0.0027,
+ "step": 10700
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 2.6246482209771783e-05,
+ "loss": 0.011,
+ "step": 10800
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 2.5894548566515485e-05,
+ "loss": 0.0108,
+ "step": 10900
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 2.554243732341816e-05,
+ "loss": 0.0033,
+ "step": 11000
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 2.519021838713791e-05,
+ "loss": 0.0048,
+ "step": 11100
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.483796168571378e-05,
+ "loss": 0.0015,
+ "step": 11200
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.4485737154682554e-05,
+ "loss": 0.0053,
+ "step": 11300
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.4133614723194047e-05,
+ "loss": 0.0043,
+ "step": 11400
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.378166430012766e-05,
+ "loss": 0.0086,
+ "step": 11500
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.3429955760212966e-05,
+ "loss": 0.0077,
+ "step": 11600
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.307855893015712e-05,
+ "loss": 0.0066,
+ "step": 11700
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.272754357478172e-05,
+ "loss": 0.0055,
+ "step": 11800
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.237697938317202e-05,
+ "loss": 0.0067,
+ "step": 11900
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.202693595484113e-05,
+ "loss": 0.0082,
+ "step": 12000
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 2.1680974167907623e-05,
+ "loss": 0.0101,
+ "step": 12100
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 2.1332173697925385e-05,
+ "loss": 0.0019,
+ "step": 12200
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 2.09841014224678e-05,
+ "loss": 0.0106,
+ "step": 12300
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 2.063682644631365e-05,
+ "loss": 0.0051,
+ "step": 12400
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 2.0290417715949304e-05,
+ "loss": 0.0076,
+ "step": 12500
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.994494400588042e-05,
+ "loss": 0.0037,
+ "step": 12600
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.9600473904977725e-05,
+ "loss": 0.0073,
+ "step": 12700
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.9257075802859685e-05,
+ "loss": 0.0052,
+ "step": 12800
+ },
+ {
+ "epoch": 1.74,
+ "learning_rate": 1.89148178763147e-05,
+ "loss": 0.0038,
+ "step": 12900
+ },
+ {
+ "epoch": 1.75,
+ "learning_rate": 1.857376807576563e-05,
+ "loss": 0.0044,
+ "step": 13000
+ },
+ {
+ "epoch": 1.76,
+ "learning_rate": 1.8233994111779146e-05,
+ "loss": 0.0031,
+ "step": 13100
+ },
+ {
+ "epoch": 1.78,
+ "learning_rate": 1.7895563441622803e-05,
+ "loss": 0.0067,
+ "step": 13200
+ },
+ {
+ "epoch": 1.79,
+ "learning_rate": 1.7558543255872352e-05,
+ "loss": 0.0096,
+ "step": 13300
+ },
+ {
+ "epoch": 1.8,
+ "learning_rate": 1.7223000465072e-05,
+ "loss": 0.0063,
+ "step": 13400
+ },
+ {
+ "epoch": 1.82,
+ "learning_rate": 1.68890016864503e-05,
+ "loss": 0.0064,
+ "step": 13500
+ },
+ {
+ "epoch": 1.83,
+ "learning_rate": 1.6556613230694243e-05,
+ "loss": 0.0047,
+ "step": 13600
+ },
+ {
+ "epoch": 1.84,
+ "learning_rate": 1.6225901088784207e-05,
+ "loss": 0.0023,
+ "step": 13700
+ },
+ {
+ "epoch": 1.86,
+ "learning_rate": 1.58969309188924e-05,
+ "loss": 0.0064,
+ "step": 13800
+ },
+ {
+ "epoch": 1.87,
+ "learning_rate": 1.556976803334737e-05,
+ "loss": 0.004,
+ "step": 13900
+ },
+ {
+ "epoch": 1.88,
+ "learning_rate": 1.5244477385667167e-05,
+ "loss": 0.0016,
+ "step": 14000
+ },
+ {
+ "epoch": 1.9,
+ "learning_rate": 1.4921123557663763e-05,
+ "loss": 0.008,
+ "step": 14100
+ },
+ {
+ "epoch": 1.91,
+ "learning_rate": 1.4599770746621238e-05,
+ "loss": 0.0047,
+ "step": 14200
+ },
+ {
+ "epoch": 1.92,
+ "learning_rate": 1.4280482752550327e-05,
+ "loss": 0.0056,
+ "step": 14300
+ },
+ {
+ "epoch": 1.94,
+ "learning_rate": 1.396332296552188e-05,
+ "loss": 0.0065,
+ "step": 14400
+ },
+ {
+ "epoch": 1.95,
+ "learning_rate": 1.3648354353081588e-05,
+ "loss": 0.004,
+ "step": 14500
+ },
+ {
+ "epoch": 1.96,
+ "learning_rate": 1.333563944774876e-05,
+ "loss": 0.0065,
+ "step": 14600
+ },
+ {
+ "epoch": 1.98,
+ "learning_rate": 1.3028332659827522e-05,
+ "loss": 0.0026,
+ "step": 14700
+ },
+ {
+ "epoch": 1.99,
+ "learning_rate": 1.2720286886495473e-05,
+ "loss": 0.008,
+ "step": 14800
+ },
+ {
+ "epoch": 2.0,
+ "learning_rate": 1.2414679074810755e-05,
+ "loss": 0.0053,
+ "step": 14900
+ },
+ {
+ "epoch": 2.02,
+ "learning_rate": 1.211156989883942e-05,
+ "loss": 0.0024,
+ "step": 15000
+ },
+ {
+ "epoch": 2.03,
+ "learning_rate": 1.1811019536579015e-05,
+ "loss": 0.0014,
+ "step": 15100
+ },
+ {
+ "epoch": 2.05,
+ "learning_rate": 1.1513087658011207e-05,
+ "loss": 0.0005,
+ "step": 15200
+ },
+ {
+ "epoch": 2.06,
+ "learning_rate": 1.1217833413255133e-05,
+ "loss": 0.0002,
+ "step": 15300
+ },
+ {
+ "epoch": 2.07,
+ "learning_rate": 1.0925315420823949e-05,
+ "loss": 0.0023,
+ "step": 15400
+ },
+ {
+ "epoch": 2.09,
+ "learning_rate": 1.063559175598702e-05,
+ "loss": 0.0003,
+ "step": 15500
+ },
+ {
+ "epoch": 2.1,
+ "learning_rate": 1.0348719939239885e-05,
+ "loss": 0.0009,
+ "step": 15600
+ },
+ {
+ "epoch": 2.11,
+ "learning_rate": 1.0064756924884342e-05,
+ "loss": 0.0059,
+ "step": 15700
+ },
+ {
+ "epoch": 2.13,
+ "learning_rate": 9.783759089721054e-06,
+ "loss": 0.0005,
+ "step": 15800
+ },
+ {
+ "epoch": 2.14,
+ "learning_rate": 9.505782221856696e-06,
+ "loss": 0.0009,
+ "step": 15900
+ },
+ {
+ "epoch": 2.15,
+ "learning_rate": 9.230881509628037e-06,
+ "loss": 0.0013,
+ "step": 16000
+ },
+ {
+ "epoch": 2.17,
+ "learning_rate": 8.959111530645085e-06,
+ "loss": 0.0017,
+ "step": 16100
+ },
+ {
+ "epoch": 2.18,
+ "learning_rate": 8.690526240955454e-06,
+ "loss": 0.0025,
+ "step": 16200
+ },
+ {
+ "epoch": 2.19,
+ "learning_rate": 8.425178964332225e-06,
+ "loss": 0.005,
+ "step": 16300
+ },
+ {
+ "epoch": 2.21,
+ "learning_rate": 8.163122381687191e-06,
+ "loss": 0.0039,
+ "step": 16400
+ },
+ {
+ "epoch": 2.22,
+ "learning_rate": 7.90440852061189e-06,
+ "loss": 0.0002,
+ "step": 16500
+ },
+ {
+ "epoch": 2.23,
+ "learning_rate": 7.64908874504823e-06,
+ "loss": 0.0014,
+ "step": 16600
+ },
+ {
+ "epoch": 2.25,
+ "learning_rate": 7.3972137450908895e-06,
+ "loss": 0.0008,
+ "step": 16700
+ },
+ {
+ "epoch": 2.26,
+ "learning_rate": 7.148833526923606e-06,
+ "loss": 0.0031,
+ "step": 16800
+ },
+ {
+ "epoch": 2.27,
+ "learning_rate": 6.9039974028910575e-06,
+ "loss": 0.0022,
+ "step": 16900
+ },
+ {
+ "epoch": 2.29,
+ "learning_rate": 6.6627539817086775e-06,
+ "loss": 0.0006,
+ "step": 17000
+ },
+ {
+ "epoch": 2.3,
+ "learning_rate": 6.425151158812067e-06,
+ "loss": 0.0001,
+ "step": 17100
+ },
+ {
+ "epoch": 2.31,
+ "learning_rate": 6.191236106848014e-06,
+ "loss": 0.0001,
+ "step": 17200
+ },
+ {
+ "epoch": 2.33,
+ "learning_rate": 5.961055266309076e-06,
+ "loss": 0.0055,
+ "step": 17300
+ },
+ {
+ "epoch": 2.34,
+ "learning_rate": 5.73465433631348e-06,
+ "loss": 0.0006,
+ "step": 17400
+ },
+ {
+ "epoch": 2.35,
+ "learning_rate": 5.512078265532167e-06,
+ "loss": 0.0001,
+ "step": 17500
+ },
+ {
+ "epoch": 2.37,
+ "learning_rate": 5.293371243264947e-06,
+ "loss": 0.0,
+ "step": 17600
+ },
+ {
+ "epoch": 2.38,
+ "learning_rate": 5.078576690667289e-06,
+ "loss": 0.001,
+ "step": 17700
+ },
+ {
+ "epoch": 2.39,
+ "learning_rate": 4.8677372521297e-06,
+ "loss": 0.0003,
+ "step": 17800
+ },
+ {
+ "epoch": 2.41,
+ "learning_rate": 4.660894786811287e-06,
+ "loss": 0.0027,
+ "step": 17900
+ },
+ {
+ "epoch": 2.42,
+ "learning_rate": 4.4580903603291895e-06,
+ "loss": 0.0027,
+ "step": 18000
+ }
+ ],
+ "logging_steps": 100,
+ "max_steps": 22296,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 3000,
+ "total_flos": 5.035419489463566e+18,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-18000/training_args.bin b/LLM-Detector-V7-11w/checkpoint-18000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ac97b1c26d12cbfaab826f907d39398233627e61
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-18000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fda912c16bf2c6e8ac3158b354c96cabf8fd0eff786de104f90c23c31741ef25
+size 4856
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/README.md b/LLM-Detector-V7-11w/checkpoint-21000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1761815329bb2f98216bb131acf44b508e0087b4
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/README.md
@@ -0,0 +1,204 @@
+---
+library_name: peft
+base_model: ./Mistral-7B-Instruct-v0.1
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+
+
+### Framework versions
+
+- PEFT 0.7.1
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/adapter_config.json b/LLM-Detector-V7-11w/checkpoint-21000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8a770ff96db84d75626efa2d0a8b065b51c74716
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "./Mistral-7B-Instruct-v0.1",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/adapter_model.safetensors b/LLM-Detector-V7-11w/checkpoint-21000/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..ab2facd138b4ac4756da49b1358848195c359b89
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4e8126d333246fb6c27064bda467eec5f40dfe2fc4e43f939918b35a5c756db0
+size 13648432
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/optimizer.pt b/LLM-Detector-V7-11w/checkpoint-21000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..bcceba26287feda9e65761b15fc2477035454031
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e5ff00fee246bf537fe0d2fc688d970100898b31ec69cf0d387101f348785a8d
+size 27370618
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/rng_state.pth b/LLM-Detector-V7-11w/checkpoint-21000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..41dfa7d7903dea42d227bad638c2c750928d590c
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c062f7f375beded48b5337f5a3f3a5cb38807fa3e85dbf3e294c0ab6b627bfc2
+size 14244
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/scheduler.pt b/LLM-Detector-V7-11w/checkpoint-21000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c7d824ce219a28bc2ecd9f78d36f7b0f41882599
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:713761332054d3e14a2c95eaeef7e343c5253856445ea0f0649f8adc824d6c21
+size 1064
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/special_tokens_map.json b/LLM-Detector-V7-11w/checkpoint-21000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/tokenizer.model b/LLM-Detector-V7-11w/checkpoint-21000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/tokenizer_config.json b/LLM-Detector-V7-11w/checkpoint-21000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e55462da3cd6a6f37c2b8fc230da6b14731e5ab1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/trainer_state.json b/LLM-Detector-V7-11w/checkpoint-21000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..33496232900f7ba8bccf89f8ff9a835dd0d6ade7
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/trainer_state.json
@@ -0,0 +1,1281 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.825333826645589,
+ "eval_steps": 500,
+ "global_step": 21000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.9997567688496474e-05,
+ "loss": 1.6283,
+ "step": 100
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9990172715142793e-05,
+ "loss": 0.068,
+ "step": 200
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997781629993153e-05,
+ "loss": 0.0404,
+ "step": 300
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.9960500896052476e-05,
+ "loss": 0.0626,
+ "step": 400
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993822994123172e-05,
+ "loss": 0.0428,
+ "step": 500
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9911007857049264e-05,
+ "loss": 0.0265,
+ "step": 600
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.987884004806111e-05,
+ "loss": 0.0263,
+ "step": 700
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.984173290072626e-05,
+ "loss": 0.03,
+ "step": 800
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.979969378213884e-05,
+ "loss": 0.0253,
+ "step": 900
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.975273103856537e-05,
+ "loss": 0.038,
+ "step": 1000
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.970085399378785e-05,
+ "loss": 0.0322,
+ "step": 1100
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.964407294725254e-05,
+ "loss": 0.0296,
+ "step": 1200
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.958239917202523e-05,
+ "loss": 0.0295,
+ "step": 1300
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.9515844912553106e-05,
+ "loss": 0.0171,
+ "step": 1400
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.944442338223378e-05,
+ "loss": 0.0205,
+ "step": 1500
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.9368148760792e-05,
+ "loss": 0.029,
+ "step": 1600
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9287036191464414e-05,
+ "loss": 0.0172,
+ "step": 1700
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9201101777993116e-05,
+ "loss": 0.0235,
+ "step": 1800
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.911036258142853e-05,
+ "loss": 0.0168,
+ "step": 1900
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.9014836616742065e-05,
+ "loss": 0.0227,
+ "step": 2000
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.891454284924965e-05,
+ "loss": 0.0227,
+ "step": 2100
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.8810575041794e-05,
+ "loss": 0.0173,
+ "step": 2200
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.8700853511049656e-05,
+ "loss": 0.0227,
+ "step": 2300
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.858642651436035e-05,
+ "loss": 0.0144,
+ "step": 2400
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.846731676957191e-05,
+ "loss": 0.03,
+ "step": 2500
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.834354792422293e-05,
+ "loss": 0.0189,
+ "step": 2600
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.821514455084985e-05,
+ "loss": 0.0111,
+ "step": 2700
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.8082132142108465e-05,
+ "loss": 0.0152,
+ "step": 2800
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.794453710571272e-05,
+ "loss": 0.0182,
+ "step": 2900
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.780238675919182e-05,
+ "loss": 0.0111,
+ "step": 3000
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.765570932446672e-05,
+ "loss": 0.0138,
+ "step": 3100
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.75045339222471e-05,
+ "loss": 0.0256,
+ "step": 3200
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.734889056624983e-05,
+ "loss": 0.0199,
+ "step": 3300
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.718881015724017e-05,
+ "loss": 0.0159,
+ "step": 3400
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.702432447689692e-05,
+ "loss": 0.0121,
+ "step": 3500
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.6855466181502544e-05,
+ "loss": 0.0181,
+ "step": 3600
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.66822687954598e-05,
+ "loss": 0.0148,
+ "step": 3700
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.65047667046359e-05,
+ "loss": 0.0152,
+ "step": 3800
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.632299514953571e-05,
+ "loss": 0.0103,
+ "step": 3900
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.613699021830524e-05,
+ "loss": 0.0217,
+ "step": 4000
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.59467888395669e-05,
+ "loss": 0.0142,
+ "step": 4100
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.575242877508777e-05,
+ "loss": 0.0151,
+ "step": 4200
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5553948612282607e-05,
+ "loss": 0.0164,
+ "step": 4300
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.5353433433160075e-05,
+ "loss": 0.0095,
+ "step": 4400
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.514687230313633e-05,
+ "loss": 0.015,
+ "step": 4500
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.49363112993749e-05,
+ "loss": 0.015,
+ "step": 4600
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.472179222575561e-05,
+ "loss": 0.0181,
+ "step": 4700
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4503357671976574e-05,
+ "loss": 0.0175,
+ "step": 4800
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.428105100509852e-05,
+ "loss": 0.0069,
+ "step": 4900
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.4054916360934957e-05,
+ "loss": 0.0108,
+ "step": 5000
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3824998635289594e-05,
+ "loss": 0.0204,
+ "step": 5100
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.3591343475042946e-05,
+ "loss": 0.0192,
+ "step": 5200
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.3353997269089774e-05,
+ "loss": 0.0086,
+ "step": 5300
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.311300713912926e-05,
+ "loss": 0.0179,
+ "step": 5400
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.286842093030963e-05,
+ "loss": 0.0142,
+ "step": 5500
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.262028720172921e-05,
+ "loss": 0.0187,
+ "step": 5600
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2368655216795735e-05,
+ "loss": 0.0176,
+ "step": 5700
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.211357493344573e-05,
+ "loss": 0.0145,
+ "step": 5800
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.185509699422615e-05,
+ "loss": 0.0087,
+ "step": 5900
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.1593272716239985e-05,
+ "loss": 0.0136,
+ "step": 6000
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.132815408095797e-05,
+ "loss": 0.0137,
+ "step": 6100
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.105979372389847e-05,
+ "loss": 0.011,
+ "step": 6200
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.0788244924177365e-05,
+ "loss": 0.024,
+ "step": 6300
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.0513561593930325e-05,
+ "loss": 0.0067,
+ "step": 6400
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.023579826760924e-05,
+ "loss": 0.0102,
+ "step": 6500
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 3.995501009115527e-05,
+ "loss": 0.013,
+ "step": 6600
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 3.9674104896418544e-05,
+ "loss": 0.0133,
+ "step": 6700
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.9387463695523555e-05,
+ "loss": 0.0087,
+ "step": 6800
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.9097966069208193e-05,
+ "loss": 0.01,
+ "step": 6900
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.880566949309125e-05,
+ "loss": 0.0113,
+ "step": 7000
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.8513595749088484e-05,
+ "loss": 0.0101,
+ "step": 7100
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.821590244319273e-05,
+ "loss": 0.0092,
+ "step": 7200
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.791558530862982e-05,
+ "loss": 0.0176,
+ "step": 7300
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.76127039690774e-05,
+ "loss": 0.0114,
+ "step": 7400
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.7307318557299355e-05,
+ "loss": 0.0083,
+ "step": 7500
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.699948970320738e-05,
+ "loss": 0.0067,
+ "step": 7600
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.668927852182374e-05,
+ "loss": 0.0021,
+ "step": 7700
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.637674660114774e-05,
+ "loss": 0.0087,
+ "step": 7800
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.606195598992832e-05,
+ "loss": 0.005,
+ "step": 7900
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.574496918534519e-05,
+ "loss": 0.014,
+ "step": 8000
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.542584912060087e-05,
+ "loss": 0.0073,
+ "step": 8100
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.510465915242623e-05,
+ "loss": 0.0073,
+ "step": 8200
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 3.478146304850188e-05,
+ "loss": 0.0106,
+ "step": 8300
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.4456324974798025e-05,
+ "loss": 0.0106,
+ "step": 8400
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 3.412930948283528e-05,
+ "loss": 0.0036,
+ "step": 8500
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.380048149686877e-05,
+ "loss": 0.0051,
+ "step": 8600
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 3.3469906300998476e-05,
+ "loss": 0.0047,
+ "step": 8700
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 3.313764952620792e-05,
+ "loss": 0.0035,
+ "step": 8800
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 3.280377713733412e-05,
+ "loss": 0.0094,
+ "step": 8900
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.246835541997116e-05,
+ "loss": 0.0077,
+ "step": 9000
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 3.213145096731021e-05,
+ "loss": 0.0076,
+ "step": 9100
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 3.1793130666918324e-05,
+ "loss": 0.0074,
+ "step": 9200
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 3.145346168745891e-05,
+ "loss": 0.0072,
+ "step": 9300
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 3.111251146535634e-05,
+ "loss": 0.0043,
+ "step": 9400
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 3.077377511336779e-05,
+ "loss": 0.0071,
+ "step": 9500
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 3.043047683848379e-05,
+ "loss": 0.0102,
+ "step": 9600
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 3.0086100419987084e-05,
+ "loss": 0.005,
+ "step": 9700
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 2.974071422889685e-05,
+ "loss": 0.0155,
+ "step": 9800
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.9394386836708154e-05,
+ "loss": 0.0086,
+ "step": 9900
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 2.9047187001778132e-05,
+ "loss": 0.0048,
+ "step": 10000
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 2.8699183655674938e-05,
+ "loss": 0.0085,
+ "step": 10100
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 2.835044588949241e-05,
+ "loss": 0.0091,
+ "step": 10200
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 2.8001042940132995e-05,
+ "loss": 0.0089,
+ "step": 10300
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 2.7651044176561748e-05,
+ "loss": 0.0093,
+ "step": 10400
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 2.7300519086034166e-05,
+ "loss": 0.0073,
+ "step": 10500
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 2.6949537260300395e-05,
+ "loss": 0.0029,
+ "step": 10600
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6598168381788864e-05,
+ "loss": 0.0027,
+ "step": 10700
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 2.6246482209771783e-05,
+ "loss": 0.011,
+ "step": 10800
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 2.5894548566515485e-05,
+ "loss": 0.0108,
+ "step": 10900
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 2.554243732341816e-05,
+ "loss": 0.0033,
+ "step": 11000
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 2.519021838713791e-05,
+ "loss": 0.0048,
+ "step": 11100
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.483796168571378e-05,
+ "loss": 0.0015,
+ "step": 11200
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.4485737154682554e-05,
+ "loss": 0.0053,
+ "step": 11300
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.4133614723194047e-05,
+ "loss": 0.0043,
+ "step": 11400
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.378166430012766e-05,
+ "loss": 0.0086,
+ "step": 11500
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.3429955760212966e-05,
+ "loss": 0.0077,
+ "step": 11600
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.307855893015712e-05,
+ "loss": 0.0066,
+ "step": 11700
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.272754357478172e-05,
+ "loss": 0.0055,
+ "step": 11800
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.237697938317202e-05,
+ "loss": 0.0067,
+ "step": 11900
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.202693595484113e-05,
+ "loss": 0.0082,
+ "step": 12000
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 2.1680974167907623e-05,
+ "loss": 0.0101,
+ "step": 12100
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 2.1332173697925385e-05,
+ "loss": 0.0019,
+ "step": 12200
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 2.09841014224678e-05,
+ "loss": 0.0106,
+ "step": 12300
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 2.063682644631365e-05,
+ "loss": 0.0051,
+ "step": 12400
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 2.0290417715949304e-05,
+ "loss": 0.0076,
+ "step": 12500
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.994494400588042e-05,
+ "loss": 0.0037,
+ "step": 12600
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.9600473904977725e-05,
+ "loss": 0.0073,
+ "step": 12700
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.9257075802859685e-05,
+ "loss": 0.0052,
+ "step": 12800
+ },
+ {
+ "epoch": 1.74,
+ "learning_rate": 1.89148178763147e-05,
+ "loss": 0.0038,
+ "step": 12900
+ },
+ {
+ "epoch": 1.75,
+ "learning_rate": 1.857376807576563e-05,
+ "loss": 0.0044,
+ "step": 13000
+ },
+ {
+ "epoch": 1.76,
+ "learning_rate": 1.8233994111779146e-05,
+ "loss": 0.0031,
+ "step": 13100
+ },
+ {
+ "epoch": 1.78,
+ "learning_rate": 1.7895563441622803e-05,
+ "loss": 0.0067,
+ "step": 13200
+ },
+ {
+ "epoch": 1.79,
+ "learning_rate": 1.7558543255872352e-05,
+ "loss": 0.0096,
+ "step": 13300
+ },
+ {
+ "epoch": 1.8,
+ "learning_rate": 1.7223000465072e-05,
+ "loss": 0.0063,
+ "step": 13400
+ },
+ {
+ "epoch": 1.82,
+ "learning_rate": 1.68890016864503e-05,
+ "loss": 0.0064,
+ "step": 13500
+ },
+ {
+ "epoch": 1.83,
+ "learning_rate": 1.6556613230694243e-05,
+ "loss": 0.0047,
+ "step": 13600
+ },
+ {
+ "epoch": 1.84,
+ "learning_rate": 1.6225901088784207e-05,
+ "loss": 0.0023,
+ "step": 13700
+ },
+ {
+ "epoch": 1.86,
+ "learning_rate": 1.58969309188924e-05,
+ "loss": 0.0064,
+ "step": 13800
+ },
+ {
+ "epoch": 1.87,
+ "learning_rate": 1.556976803334737e-05,
+ "loss": 0.004,
+ "step": 13900
+ },
+ {
+ "epoch": 1.88,
+ "learning_rate": 1.5244477385667167e-05,
+ "loss": 0.0016,
+ "step": 14000
+ },
+ {
+ "epoch": 1.9,
+ "learning_rate": 1.4921123557663763e-05,
+ "loss": 0.008,
+ "step": 14100
+ },
+ {
+ "epoch": 1.91,
+ "learning_rate": 1.4599770746621238e-05,
+ "loss": 0.0047,
+ "step": 14200
+ },
+ {
+ "epoch": 1.92,
+ "learning_rate": 1.4280482752550327e-05,
+ "loss": 0.0056,
+ "step": 14300
+ },
+ {
+ "epoch": 1.94,
+ "learning_rate": 1.396332296552188e-05,
+ "loss": 0.0065,
+ "step": 14400
+ },
+ {
+ "epoch": 1.95,
+ "learning_rate": 1.3648354353081588e-05,
+ "loss": 0.004,
+ "step": 14500
+ },
+ {
+ "epoch": 1.96,
+ "learning_rate": 1.333563944774876e-05,
+ "loss": 0.0065,
+ "step": 14600
+ },
+ {
+ "epoch": 1.98,
+ "learning_rate": 1.3028332659827522e-05,
+ "loss": 0.0026,
+ "step": 14700
+ },
+ {
+ "epoch": 1.99,
+ "learning_rate": 1.2720286886495473e-05,
+ "loss": 0.008,
+ "step": 14800
+ },
+ {
+ "epoch": 2.0,
+ "learning_rate": 1.2414679074810755e-05,
+ "loss": 0.0053,
+ "step": 14900
+ },
+ {
+ "epoch": 2.02,
+ "learning_rate": 1.211156989883942e-05,
+ "loss": 0.0024,
+ "step": 15000
+ },
+ {
+ "epoch": 2.03,
+ "learning_rate": 1.1811019536579015e-05,
+ "loss": 0.0014,
+ "step": 15100
+ },
+ {
+ "epoch": 2.05,
+ "learning_rate": 1.1513087658011207e-05,
+ "loss": 0.0005,
+ "step": 15200
+ },
+ {
+ "epoch": 2.06,
+ "learning_rate": 1.1217833413255133e-05,
+ "loss": 0.0002,
+ "step": 15300
+ },
+ {
+ "epoch": 2.07,
+ "learning_rate": 1.0925315420823949e-05,
+ "loss": 0.0023,
+ "step": 15400
+ },
+ {
+ "epoch": 2.09,
+ "learning_rate": 1.063559175598702e-05,
+ "loss": 0.0003,
+ "step": 15500
+ },
+ {
+ "epoch": 2.1,
+ "learning_rate": 1.0348719939239885e-05,
+ "loss": 0.0009,
+ "step": 15600
+ },
+ {
+ "epoch": 2.11,
+ "learning_rate": 1.0064756924884342e-05,
+ "loss": 0.0059,
+ "step": 15700
+ },
+ {
+ "epoch": 2.13,
+ "learning_rate": 9.783759089721054e-06,
+ "loss": 0.0005,
+ "step": 15800
+ },
+ {
+ "epoch": 2.14,
+ "learning_rate": 9.505782221856696e-06,
+ "loss": 0.0009,
+ "step": 15900
+ },
+ {
+ "epoch": 2.15,
+ "learning_rate": 9.230881509628037e-06,
+ "loss": 0.0013,
+ "step": 16000
+ },
+ {
+ "epoch": 2.17,
+ "learning_rate": 8.959111530645085e-06,
+ "loss": 0.0017,
+ "step": 16100
+ },
+ {
+ "epoch": 2.18,
+ "learning_rate": 8.690526240955454e-06,
+ "loss": 0.0025,
+ "step": 16200
+ },
+ {
+ "epoch": 2.19,
+ "learning_rate": 8.425178964332225e-06,
+ "loss": 0.005,
+ "step": 16300
+ },
+ {
+ "epoch": 2.21,
+ "learning_rate": 8.163122381687191e-06,
+ "loss": 0.0039,
+ "step": 16400
+ },
+ {
+ "epoch": 2.22,
+ "learning_rate": 7.90440852061189e-06,
+ "loss": 0.0002,
+ "step": 16500
+ },
+ {
+ "epoch": 2.23,
+ "learning_rate": 7.64908874504823e-06,
+ "loss": 0.0014,
+ "step": 16600
+ },
+ {
+ "epoch": 2.25,
+ "learning_rate": 7.3972137450908895e-06,
+ "loss": 0.0008,
+ "step": 16700
+ },
+ {
+ "epoch": 2.26,
+ "learning_rate": 7.148833526923606e-06,
+ "loss": 0.0031,
+ "step": 16800
+ },
+ {
+ "epoch": 2.27,
+ "learning_rate": 6.9039974028910575e-06,
+ "loss": 0.0022,
+ "step": 16900
+ },
+ {
+ "epoch": 2.29,
+ "learning_rate": 6.6627539817086775e-06,
+ "loss": 0.0006,
+ "step": 17000
+ },
+ {
+ "epoch": 2.3,
+ "learning_rate": 6.425151158812067e-06,
+ "loss": 0.0001,
+ "step": 17100
+ },
+ {
+ "epoch": 2.31,
+ "learning_rate": 6.191236106848014e-06,
+ "loss": 0.0001,
+ "step": 17200
+ },
+ {
+ "epoch": 2.33,
+ "learning_rate": 5.961055266309076e-06,
+ "loss": 0.0055,
+ "step": 17300
+ },
+ {
+ "epoch": 2.34,
+ "learning_rate": 5.73465433631348e-06,
+ "loss": 0.0006,
+ "step": 17400
+ },
+ {
+ "epoch": 2.35,
+ "learning_rate": 5.512078265532167e-06,
+ "loss": 0.0001,
+ "step": 17500
+ },
+ {
+ "epoch": 2.37,
+ "learning_rate": 5.293371243264947e-06,
+ "loss": 0.0,
+ "step": 17600
+ },
+ {
+ "epoch": 2.38,
+ "learning_rate": 5.078576690667289e-06,
+ "loss": 0.001,
+ "step": 17700
+ },
+ {
+ "epoch": 2.39,
+ "learning_rate": 4.8677372521297e-06,
+ "loss": 0.0003,
+ "step": 17800
+ },
+ {
+ "epoch": 2.41,
+ "learning_rate": 4.660894786811287e-06,
+ "loss": 0.0027,
+ "step": 17900
+ },
+ {
+ "epoch": 2.42,
+ "learning_rate": 4.4580903603291895e-06,
+ "loss": 0.0027,
+ "step": 18000
+ },
+ {
+ "epoch": 2.44,
+ "learning_rate": 4.259364236605634e-06,
+ "loss": 0.0004,
+ "step": 18100
+ },
+ {
+ "epoch": 2.45,
+ "learning_rate": 4.064755869874084e-06,
+ "loss": 0.0008,
+ "step": 18200
+ },
+ {
+ "epoch": 2.46,
+ "learning_rate": 3.874303896846149e-06,
+ "loss": 0.0011,
+ "step": 18300
+ },
+ {
+ "epoch": 2.48,
+ "learning_rate": 3.6880461290408507e-06,
+ "loss": 0.0004,
+ "step": 18400
+ },
+ {
+ "epoch": 2.49,
+ "learning_rate": 3.5060195452776557e-06,
+ "loss": 0.0015,
+ "step": 18500
+ },
+ {
+ "epoch": 2.5,
+ "learning_rate": 3.3282602843348943e-06,
+ "loss": 0.0003,
+ "step": 18600
+ },
+ {
+ "epoch": 2.52,
+ "learning_rate": 3.1548036377748863e-06,
+ "loss": 0.0001,
+ "step": 18700
+ },
+ {
+ "epoch": 2.53,
+ "learning_rate": 2.9856840429373144e-06,
+ "loss": 0.0,
+ "step": 18800
+ },
+ {
+ "epoch": 2.54,
+ "learning_rate": 2.820935076102205e-06,
+ "loss": 0.0021,
+ "step": 18900
+ },
+ {
+ "epoch": 2.56,
+ "learning_rate": 2.6621710003546298e-06,
+ "loss": 0.0019,
+ "step": 19000
+ },
+ {
+ "epoch": 2.57,
+ "learning_rate": 2.50621603456484e-06,
+ "loss": 0.0,
+ "step": 19100
+ },
+ {
+ "epoch": 2.58,
+ "learning_rate": 2.356219574465643e-06,
+ "loss": 0.0013,
+ "step": 19200
+ },
+ {
+ "epoch": 2.6,
+ "learning_rate": 2.2091812188310717e-06,
+ "loss": 0.0014,
+ "step": 19300
+ },
+ {
+ "epoch": 2.61,
+ "learning_rate": 2.0666676547438866e-06,
+ "loss": 0.0013,
+ "step": 19400
+ },
+ {
+ "epoch": 2.62,
+ "learning_rate": 1.9287071762367076e-06,
+ "loss": 0.0007,
+ "step": 19500
+ },
+ {
+ "epoch": 2.64,
+ "learning_rate": 1.7953271733920502e-06,
+ "loss": 0.0003,
+ "step": 19600
+ },
+ {
+ "epoch": 2.65,
+ "learning_rate": 1.6665541269044155e-06,
+ "loss": 0.0016,
+ "step": 19700
+ },
+ {
+ "epoch": 2.66,
+ "learning_rate": 1.5424136028229118e-06,
+ "loss": 0.0007,
+ "step": 19800
+ },
+ {
+ "epoch": 2.68,
+ "learning_rate": 1.4229302474755013e-06,
+ "loss": 0.0027,
+ "step": 19900
+ },
+ {
+ "epoch": 2.69,
+ "learning_rate": 1.3081277825757992e-06,
+ "loss": 0.0011,
+ "step": 20000
+ },
+ {
+ "epoch": 2.7,
+ "learning_rate": 1.1980290005134843e-06,
+ "loss": 0.0017,
+ "step": 20100
+ },
+ {
+ "epoch": 2.72,
+ "learning_rate": 1.0926557598292002e-06,
+ "loss": 0.0001,
+ "step": 20200
+ },
+ {
+ "epoch": 2.73,
+ "learning_rate": 9.92028980874829e-07,
+ "loss": 0.0025,
+ "step": 20300
+ },
+ {
+ "epoch": 2.74,
+ "learning_rate": 8.961686416600834e-07,
+ "loss": 0.0006,
+ "step": 20400
+ },
+ {
+ "epoch": 2.76,
+ "learning_rate": 8.05093773886137e-07,
+ "loss": 0.0023,
+ "step": 20500
+ },
+ {
+ "epoch": 2.77,
+ "learning_rate": 7.18822459167165e-07,
+ "loss": 0.0009,
+ "step": 20600
+ },
+ {
+ "epoch": 2.78,
+ "learning_rate": 6.373718254404887e-07,
+ "loss": 0.0005,
+ "step": 20700
+ },
+ {
+ "epoch": 2.8,
+ "learning_rate": 5.607580435660759e-07,
+ "loss": 0.0008,
+ "step": 20800
+ },
+ {
+ "epoch": 2.81,
+ "learning_rate": 4.889963241160366e-07,
+ "loss": 0.0005,
+ "step": 20900
+ },
+ {
+ "epoch": 2.83,
+ "learning_rate": 4.2210091435480324e-07,
+ "loss": 0.0009,
+ "step": 21000
+ }
+ ],
+ "logging_steps": 100,
+ "max_steps": 22296,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 3000,
+ "total_flos": 5.871674650524254e+18,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-21000/training_args.bin b/LLM-Detector-V7-11w/checkpoint-21000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ac97b1c26d12cbfaab826f907d39398233627e61
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-21000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fda912c16bf2c6e8ac3158b354c96cabf8fd0eff786de104f90c23c31741ef25
+size 4856
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/README.md b/LLM-Detector-V7-11w/checkpoint-3000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1761815329bb2f98216bb131acf44b508e0087b4
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/README.md
@@ -0,0 +1,204 @@
+---
+library_name: peft
+base_model: ./Mistral-7B-Instruct-v0.1
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+
+
+### Framework versions
+
+- PEFT 0.7.1
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/adapter_config.json b/LLM-Detector-V7-11w/checkpoint-3000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8a770ff96db84d75626efa2d0a8b065b51c74716
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "./Mistral-7B-Instruct-v0.1",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/adapter_model.safetensors b/LLM-Detector-V7-11w/checkpoint-3000/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..11b12760962deef1e8a44e7a7d2d000ba69e66d2
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:396682504d8dadf09b1303b4e81a170d7af25aad2b17dc774e0ec96033f02116
+size 13648432
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/optimizer.pt b/LLM-Detector-V7-11w/checkpoint-3000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c9ab1870618b72bf3d5eb3b2421846c547e656f0
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:41189b97be1e0546c7ebbb90746b49bbfd07e25bb28053d6585eae730d57730b
+size 27370618
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/rng_state.pth b/LLM-Detector-V7-11w/checkpoint-3000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..33cefe6919222ddfa3c3946df69b8e5c5a17a0fc
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ff264f99d31b522cc7e2a4eac9d38606d0c58a34c0adc74d71e0ca8b371dc36
+size 14244
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/scheduler.pt b/LLM-Detector-V7-11w/checkpoint-3000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..83894b3257f71598077b5b4b6439fc8cbfb26607
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4e9bf597a5a404943218e93ed02ccecc3d46f962878f4a4b9b42bb524682f052
+size 1064
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/special_tokens_map.json b/LLM-Detector-V7-11w/checkpoint-3000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/tokenizer.model b/LLM-Detector-V7-11w/checkpoint-3000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/tokenizer_config.json b/LLM-Detector-V7-11w/checkpoint-3000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e55462da3cd6a6f37c2b8fc230da6b14731e5ab1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/trainer_state.json b/LLM-Detector-V7-11w/checkpoint-3000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..2cb0e1e72fa75245bb60a8052d47ea951d2f4ebc
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/trainer_state.json
@@ -0,0 +1,201 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.40361911809222695,
+ "eval_steps": 500,
+ "global_step": 3000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.9997567688496474e-05,
+ "loss": 1.6283,
+ "step": 100
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9990172715142793e-05,
+ "loss": 0.068,
+ "step": 200
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997781629993153e-05,
+ "loss": 0.0404,
+ "step": 300
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.9960500896052476e-05,
+ "loss": 0.0626,
+ "step": 400
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993822994123172e-05,
+ "loss": 0.0428,
+ "step": 500
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9911007857049264e-05,
+ "loss": 0.0265,
+ "step": 600
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.987884004806111e-05,
+ "loss": 0.0263,
+ "step": 700
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.984173290072626e-05,
+ "loss": 0.03,
+ "step": 800
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.979969378213884e-05,
+ "loss": 0.0253,
+ "step": 900
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.975273103856537e-05,
+ "loss": 0.038,
+ "step": 1000
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.970085399378785e-05,
+ "loss": 0.0322,
+ "step": 1100
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.964407294725254e-05,
+ "loss": 0.0296,
+ "step": 1200
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.958239917202523e-05,
+ "loss": 0.0295,
+ "step": 1300
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.9515844912553106e-05,
+ "loss": 0.0171,
+ "step": 1400
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.944442338223378e-05,
+ "loss": 0.0205,
+ "step": 1500
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.9368148760792e-05,
+ "loss": 0.029,
+ "step": 1600
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9287036191464414e-05,
+ "loss": 0.0172,
+ "step": 1700
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9201101777993116e-05,
+ "loss": 0.0235,
+ "step": 1800
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.911036258142853e-05,
+ "loss": 0.0168,
+ "step": 1900
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.9014836616742065e-05,
+ "loss": 0.0227,
+ "step": 2000
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.891454284924965e-05,
+ "loss": 0.0227,
+ "step": 2100
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.8810575041794e-05,
+ "loss": 0.0173,
+ "step": 2200
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.8700853511049656e-05,
+ "loss": 0.0227,
+ "step": 2300
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.858642651436035e-05,
+ "loss": 0.0144,
+ "step": 2400
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.846731676957191e-05,
+ "loss": 0.03,
+ "step": 2500
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.834354792422293e-05,
+ "loss": 0.0189,
+ "step": 2600
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.821514455084985e-05,
+ "loss": 0.0111,
+ "step": 2700
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.8082132142108465e-05,
+ "loss": 0.0152,
+ "step": 2800
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.794453710571272e-05,
+ "loss": 0.0182,
+ "step": 2900
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.780238675919182e-05,
+ "loss": 0.0111,
+ "step": 3000
+ }
+ ],
+ "logging_steps": 100,
+ "max_steps": 22296,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 3000,
+ "total_flos": 8.395428848876913e+17,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-3000/training_args.bin b/LLM-Detector-V7-11w/checkpoint-3000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ac97b1c26d12cbfaab826f907d39398233627e61
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-3000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fda912c16bf2c6e8ac3158b354c96cabf8fd0eff786de104f90c23c31741ef25
+size 4856
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/README.md b/LLM-Detector-V7-11w/checkpoint-6000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1761815329bb2f98216bb131acf44b508e0087b4
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/README.md
@@ -0,0 +1,204 @@
+---
+library_name: peft
+base_model: ./Mistral-7B-Instruct-v0.1
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+
+
+### Framework versions
+
+- PEFT 0.7.1
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/adapter_config.json b/LLM-Detector-V7-11w/checkpoint-6000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8a770ff96db84d75626efa2d0a8b065b51c74716
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "./Mistral-7B-Instruct-v0.1",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/adapter_model.safetensors b/LLM-Detector-V7-11w/checkpoint-6000/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..744012e72026c853f15a42d129ad1badded385b1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ed510a66782b6d1d229faeccc14ff5787b8fb47b0d0a7130c7fc710c50d5d984
+size 13648432
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/optimizer.pt b/LLM-Detector-V7-11w/checkpoint-6000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..c9da04ea4ac7fde5a5d9b7891a8cc1cbe11618c7
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5c8532e36559fb2c63547f2b60bf2175b540b4273b92ca86f4e16cecd72c5ae6
+size 27370618
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/rng_state.pth b/LLM-Detector-V7-11w/checkpoint-6000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..33cefe6919222ddfa3c3946df69b8e5c5a17a0fc
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:1ff264f99d31b522cc7e2a4eac9d38606d0c58a34c0adc74d71e0ca8b371dc36
+size 14244
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/scheduler.pt b/LLM-Detector-V7-11w/checkpoint-6000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..b1dcf9c285eeb37b759dff70f576e87e774fb2b8
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e0bea79e28c80b79ba1b8ed7b9e7ce4aa1d745ae6c35ed3827a75cf8bfb268a1
+size 1064
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/special_tokens_map.json b/LLM-Detector-V7-11w/checkpoint-6000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/tokenizer.model b/LLM-Detector-V7-11w/checkpoint-6000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/tokenizer_config.json b/LLM-Detector-V7-11w/checkpoint-6000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e55462da3cd6a6f37c2b8fc230da6b14731e5ab1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/trainer_state.json b/LLM-Detector-V7-11w/checkpoint-6000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..5eb6db2f04556f344bede62e4ca26866e7c732ef
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/trainer_state.json
@@ -0,0 +1,381 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 0.8072382361844539,
+ "eval_steps": 500,
+ "global_step": 6000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.9997567688496474e-05,
+ "loss": 1.6283,
+ "step": 100
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9990172715142793e-05,
+ "loss": 0.068,
+ "step": 200
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997781629993153e-05,
+ "loss": 0.0404,
+ "step": 300
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.9960500896052476e-05,
+ "loss": 0.0626,
+ "step": 400
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993822994123172e-05,
+ "loss": 0.0428,
+ "step": 500
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9911007857049264e-05,
+ "loss": 0.0265,
+ "step": 600
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.987884004806111e-05,
+ "loss": 0.0263,
+ "step": 700
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.984173290072626e-05,
+ "loss": 0.03,
+ "step": 800
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.979969378213884e-05,
+ "loss": 0.0253,
+ "step": 900
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.975273103856537e-05,
+ "loss": 0.038,
+ "step": 1000
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.970085399378785e-05,
+ "loss": 0.0322,
+ "step": 1100
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.964407294725254e-05,
+ "loss": 0.0296,
+ "step": 1200
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.958239917202523e-05,
+ "loss": 0.0295,
+ "step": 1300
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.9515844912553106e-05,
+ "loss": 0.0171,
+ "step": 1400
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.944442338223378e-05,
+ "loss": 0.0205,
+ "step": 1500
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.9368148760792e-05,
+ "loss": 0.029,
+ "step": 1600
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9287036191464414e-05,
+ "loss": 0.0172,
+ "step": 1700
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9201101777993116e-05,
+ "loss": 0.0235,
+ "step": 1800
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.911036258142853e-05,
+ "loss": 0.0168,
+ "step": 1900
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.9014836616742065e-05,
+ "loss": 0.0227,
+ "step": 2000
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.891454284924965e-05,
+ "loss": 0.0227,
+ "step": 2100
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.8810575041794e-05,
+ "loss": 0.0173,
+ "step": 2200
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.8700853511049656e-05,
+ "loss": 0.0227,
+ "step": 2300
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.858642651436035e-05,
+ "loss": 0.0144,
+ "step": 2400
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.846731676957191e-05,
+ "loss": 0.03,
+ "step": 2500
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.834354792422293e-05,
+ "loss": 0.0189,
+ "step": 2600
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.821514455084985e-05,
+ "loss": 0.0111,
+ "step": 2700
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.8082132142108465e-05,
+ "loss": 0.0152,
+ "step": 2800
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.794453710571272e-05,
+ "loss": 0.0182,
+ "step": 2900
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.780238675919182e-05,
+ "loss": 0.0111,
+ "step": 3000
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.765570932446672e-05,
+ "loss": 0.0138,
+ "step": 3100
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.75045339222471e-05,
+ "loss": 0.0256,
+ "step": 3200
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.734889056624983e-05,
+ "loss": 0.0199,
+ "step": 3300
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.718881015724017e-05,
+ "loss": 0.0159,
+ "step": 3400
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.702432447689692e-05,
+ "loss": 0.0121,
+ "step": 3500
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.6855466181502544e-05,
+ "loss": 0.0181,
+ "step": 3600
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.66822687954598e-05,
+ "loss": 0.0148,
+ "step": 3700
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.65047667046359e-05,
+ "loss": 0.0152,
+ "step": 3800
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.632299514953571e-05,
+ "loss": 0.0103,
+ "step": 3900
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.613699021830524e-05,
+ "loss": 0.0217,
+ "step": 4000
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.59467888395669e-05,
+ "loss": 0.0142,
+ "step": 4100
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.575242877508777e-05,
+ "loss": 0.0151,
+ "step": 4200
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5553948612282607e-05,
+ "loss": 0.0164,
+ "step": 4300
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.5353433433160075e-05,
+ "loss": 0.0095,
+ "step": 4400
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.514687230313633e-05,
+ "loss": 0.015,
+ "step": 4500
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.49363112993749e-05,
+ "loss": 0.015,
+ "step": 4600
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.472179222575561e-05,
+ "loss": 0.0181,
+ "step": 4700
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4503357671976574e-05,
+ "loss": 0.0175,
+ "step": 4800
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.428105100509852e-05,
+ "loss": 0.0069,
+ "step": 4900
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.4054916360934957e-05,
+ "loss": 0.0108,
+ "step": 5000
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3824998635289594e-05,
+ "loss": 0.0204,
+ "step": 5100
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.3591343475042946e-05,
+ "loss": 0.0192,
+ "step": 5200
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.3353997269089774e-05,
+ "loss": 0.0086,
+ "step": 5300
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.311300713912926e-05,
+ "loss": 0.0179,
+ "step": 5400
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.286842093030963e-05,
+ "loss": 0.0142,
+ "step": 5500
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.262028720172921e-05,
+ "loss": 0.0187,
+ "step": 5600
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2368655216795735e-05,
+ "loss": 0.0176,
+ "step": 5700
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.211357493344573e-05,
+ "loss": 0.0145,
+ "step": 5800
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.185509699422615e-05,
+ "loss": 0.0087,
+ "step": 5900
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.1593272716239985e-05,
+ "loss": 0.0136,
+ "step": 6000
+ }
+ ],
+ "logging_steps": 100,
+ "max_steps": 22296,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 3000,
+ "total_flos": 1.6775846445215908e+18,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-6000/training_args.bin b/LLM-Detector-V7-11w/checkpoint-6000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ac97b1c26d12cbfaab826f907d39398233627e61
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-6000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fda912c16bf2c6e8ac3158b354c96cabf8fd0eff786de104f90c23c31741ef25
+size 4856
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/README.md b/LLM-Detector-V7-11w/checkpoint-9000/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..1761815329bb2f98216bb131acf44b508e0087b4
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/README.md
@@ -0,0 +1,204 @@
+---
+library_name: peft
+base_model: ./Mistral-7B-Instruct-v0.1
+---
+
+# Model Card for Model ID
+
+
+
+
+
+## Model Details
+
+### Model Description
+
+
+
+
+
+- **Developed by:** [More Information Needed]
+- **Funded by [optional]:** [More Information Needed]
+- **Shared by [optional]:** [More Information Needed]
+- **Model type:** [More Information Needed]
+- **Language(s) (NLP):** [More Information Needed]
+- **License:** [More Information Needed]
+- **Finetuned from model [optional]:** [More Information Needed]
+
+### Model Sources [optional]
+
+
+
+- **Repository:** [More Information Needed]
+- **Paper [optional]:** [More Information Needed]
+- **Demo [optional]:** [More Information Needed]
+
+## Uses
+
+
+
+### Direct Use
+
+
+
+[More Information Needed]
+
+### Downstream Use [optional]
+
+
+
+[More Information Needed]
+
+### Out-of-Scope Use
+
+
+
+[More Information Needed]
+
+## Bias, Risks, and Limitations
+
+
+
+[More Information Needed]
+
+### Recommendations
+
+
+
+Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
+
+## How to Get Started with the Model
+
+Use the code below to get started with the model.
+
+[More Information Needed]
+
+## Training Details
+
+### Training Data
+
+
+
+[More Information Needed]
+
+### Training Procedure
+
+
+
+#### Preprocessing [optional]
+
+[More Information Needed]
+
+
+#### Training Hyperparameters
+
+- **Training regime:** [More Information Needed]
+
+#### Speeds, Sizes, Times [optional]
+
+
+
+[More Information Needed]
+
+## Evaluation
+
+
+
+### Testing Data, Factors & Metrics
+
+#### Testing Data
+
+
+
+[More Information Needed]
+
+#### Factors
+
+
+
+[More Information Needed]
+
+#### Metrics
+
+
+
+[More Information Needed]
+
+### Results
+
+[More Information Needed]
+
+#### Summary
+
+
+
+## Model Examination [optional]
+
+
+
+[More Information Needed]
+
+## Environmental Impact
+
+
+
+Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
+
+- **Hardware Type:** [More Information Needed]
+- **Hours used:** [More Information Needed]
+- **Cloud Provider:** [More Information Needed]
+- **Compute Region:** [More Information Needed]
+- **Carbon Emitted:** [More Information Needed]
+
+## Technical Specifications [optional]
+
+### Model Architecture and Objective
+
+[More Information Needed]
+
+### Compute Infrastructure
+
+[More Information Needed]
+
+#### Hardware
+
+[More Information Needed]
+
+#### Software
+
+[More Information Needed]
+
+## Citation [optional]
+
+
+
+**BibTeX:**
+
+[More Information Needed]
+
+**APA:**
+
+[More Information Needed]
+
+## Glossary [optional]
+
+
+
+[More Information Needed]
+
+## More Information [optional]
+
+[More Information Needed]
+
+## Model Card Authors [optional]
+
+[More Information Needed]
+
+## Model Card Contact
+
+[More Information Needed]
+
+
+### Framework versions
+
+- PEFT 0.7.1
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/adapter_config.json b/LLM-Detector-V7-11w/checkpoint-9000/adapter_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..8a770ff96db84d75626efa2d0a8b065b51c74716
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/adapter_config.json
@@ -0,0 +1,26 @@
+{
+ "alpha_pattern": {},
+ "auto_mapping": null,
+ "base_model_name_or_path": "./Mistral-7B-Instruct-v0.1",
+ "bias": "none",
+ "fan_in_fan_out": false,
+ "inference_mode": true,
+ "init_lora_weights": true,
+ "layers_pattern": null,
+ "layers_to_transform": null,
+ "loftq_config": {},
+ "lora_alpha": 16,
+ "lora_dropout": 0.0,
+ "megatron_config": null,
+ "megatron_core": "megatron.core",
+ "modules_to_save": null,
+ "peft_type": "LORA",
+ "r": 8,
+ "rank_pattern": {},
+ "revision": null,
+ "target_modules": [
+ "v_proj",
+ "q_proj"
+ ],
+ "task_type": "CAUSAL_LM"
+}
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/adapter_model.safetensors b/LLM-Detector-V7-11w/checkpoint-9000/adapter_model.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..047407ac3fb8c1add8b08c69543c8a53d92c3698
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/adapter_model.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e85bd74e2dc60b2e34d1736e888e20c9575dd993ff269e166eba5fc9e27228b5
+size 13648432
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/optimizer.pt b/LLM-Detector-V7-11w/checkpoint-9000/optimizer.pt
new file mode 100644
index 0000000000000000000000000000000000000000..43973f8fba5b757f5b3b4872d78bf771110b5d30
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/optimizer.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ba1fabf02863c93ade8f1d30b5cf2ce0847e035301e0804272bd965f381b5e6b
+size 27370618
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/rng_state.pth b/LLM-Detector-V7-11w/checkpoint-9000/rng_state.pth
new file mode 100644
index 0000000000000000000000000000000000000000..0a11d850e4f9ddde03041fb0f3247ef616fbda77
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/rng_state.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8d138cfe3a4adf21f048848ee35837c9a757a0a3616ff7adbb45b69aac247435
+size 14244
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/scheduler.pt b/LLM-Detector-V7-11w/checkpoint-9000/scheduler.pt
new file mode 100644
index 0000000000000000000000000000000000000000..cb2488fd6af99e9968fb156edd018f1b7bbec6c1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/scheduler.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:65aca61bc22048703d8a3472283a80b4f1257d82b382cf5ca194486ad6808f3c
+size 1064
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/special_tokens_map.json b/LLM-Detector-V7-11w/checkpoint-9000/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/tokenizer.model b/LLM-Detector-V7-11w/checkpoint-9000/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/tokenizer_config.json b/LLM-Detector-V7-11w/checkpoint-9000/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e55462da3cd6a6f37c2b8fc230da6b14731e5ab1
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/trainer_state.json b/LLM-Detector-V7-11w/checkpoint-9000/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..b19bb986dd4d6cdedd7a6ae23b086469d476d42a
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/trainer_state.json
@@ -0,0 +1,561 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 1.210857354276681,
+ "eval_steps": 500,
+ "global_step": 9000,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.9997567688496474e-05,
+ "loss": 1.6283,
+ "step": 100
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9990172715142793e-05,
+ "loss": 0.068,
+ "step": 200
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997781629993153e-05,
+ "loss": 0.0404,
+ "step": 300
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.9960500896052476e-05,
+ "loss": 0.0626,
+ "step": 400
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993822994123172e-05,
+ "loss": 0.0428,
+ "step": 500
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9911007857049264e-05,
+ "loss": 0.0265,
+ "step": 600
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.987884004806111e-05,
+ "loss": 0.0263,
+ "step": 700
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.984173290072626e-05,
+ "loss": 0.03,
+ "step": 800
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.979969378213884e-05,
+ "loss": 0.0253,
+ "step": 900
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.975273103856537e-05,
+ "loss": 0.038,
+ "step": 1000
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.970085399378785e-05,
+ "loss": 0.0322,
+ "step": 1100
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.964407294725254e-05,
+ "loss": 0.0296,
+ "step": 1200
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.958239917202523e-05,
+ "loss": 0.0295,
+ "step": 1300
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.9515844912553106e-05,
+ "loss": 0.0171,
+ "step": 1400
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.944442338223378e-05,
+ "loss": 0.0205,
+ "step": 1500
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.9368148760792e-05,
+ "loss": 0.029,
+ "step": 1600
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9287036191464414e-05,
+ "loss": 0.0172,
+ "step": 1700
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9201101777993116e-05,
+ "loss": 0.0235,
+ "step": 1800
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.911036258142853e-05,
+ "loss": 0.0168,
+ "step": 1900
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.9014836616742065e-05,
+ "loss": 0.0227,
+ "step": 2000
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.891454284924965e-05,
+ "loss": 0.0227,
+ "step": 2100
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.8810575041794e-05,
+ "loss": 0.0173,
+ "step": 2200
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.8700853511049656e-05,
+ "loss": 0.0227,
+ "step": 2300
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.858642651436035e-05,
+ "loss": 0.0144,
+ "step": 2400
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.846731676957191e-05,
+ "loss": 0.03,
+ "step": 2500
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.834354792422293e-05,
+ "loss": 0.0189,
+ "step": 2600
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.821514455084985e-05,
+ "loss": 0.0111,
+ "step": 2700
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.8082132142108465e-05,
+ "loss": 0.0152,
+ "step": 2800
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.794453710571272e-05,
+ "loss": 0.0182,
+ "step": 2900
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.780238675919182e-05,
+ "loss": 0.0111,
+ "step": 3000
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.765570932446672e-05,
+ "loss": 0.0138,
+ "step": 3100
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.75045339222471e-05,
+ "loss": 0.0256,
+ "step": 3200
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.734889056624983e-05,
+ "loss": 0.0199,
+ "step": 3300
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.718881015724017e-05,
+ "loss": 0.0159,
+ "step": 3400
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.702432447689692e-05,
+ "loss": 0.0121,
+ "step": 3500
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.6855466181502544e-05,
+ "loss": 0.0181,
+ "step": 3600
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.66822687954598e-05,
+ "loss": 0.0148,
+ "step": 3700
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.65047667046359e-05,
+ "loss": 0.0152,
+ "step": 3800
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.632299514953571e-05,
+ "loss": 0.0103,
+ "step": 3900
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.613699021830524e-05,
+ "loss": 0.0217,
+ "step": 4000
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.59467888395669e-05,
+ "loss": 0.0142,
+ "step": 4100
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.575242877508777e-05,
+ "loss": 0.0151,
+ "step": 4200
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5553948612282607e-05,
+ "loss": 0.0164,
+ "step": 4300
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.5353433433160075e-05,
+ "loss": 0.0095,
+ "step": 4400
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.514687230313633e-05,
+ "loss": 0.015,
+ "step": 4500
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.49363112993749e-05,
+ "loss": 0.015,
+ "step": 4600
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.472179222575561e-05,
+ "loss": 0.0181,
+ "step": 4700
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4503357671976574e-05,
+ "loss": 0.0175,
+ "step": 4800
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.428105100509852e-05,
+ "loss": 0.0069,
+ "step": 4900
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.4054916360934957e-05,
+ "loss": 0.0108,
+ "step": 5000
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3824998635289594e-05,
+ "loss": 0.0204,
+ "step": 5100
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.3591343475042946e-05,
+ "loss": 0.0192,
+ "step": 5200
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.3353997269089774e-05,
+ "loss": 0.0086,
+ "step": 5300
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.311300713912926e-05,
+ "loss": 0.0179,
+ "step": 5400
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.286842093030963e-05,
+ "loss": 0.0142,
+ "step": 5500
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.262028720172921e-05,
+ "loss": 0.0187,
+ "step": 5600
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2368655216795735e-05,
+ "loss": 0.0176,
+ "step": 5700
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.211357493344573e-05,
+ "loss": 0.0145,
+ "step": 5800
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.185509699422615e-05,
+ "loss": 0.0087,
+ "step": 5900
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.1593272716239985e-05,
+ "loss": 0.0136,
+ "step": 6000
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.132815408095797e-05,
+ "loss": 0.0137,
+ "step": 6100
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.105979372389847e-05,
+ "loss": 0.011,
+ "step": 6200
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.0788244924177365e-05,
+ "loss": 0.024,
+ "step": 6300
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.0513561593930325e-05,
+ "loss": 0.0067,
+ "step": 6400
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.023579826760924e-05,
+ "loss": 0.0102,
+ "step": 6500
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 3.995501009115527e-05,
+ "loss": 0.013,
+ "step": 6600
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 3.9674104896418544e-05,
+ "loss": 0.0133,
+ "step": 6700
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.9387463695523555e-05,
+ "loss": 0.0087,
+ "step": 6800
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.9097966069208193e-05,
+ "loss": 0.01,
+ "step": 6900
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.880566949309125e-05,
+ "loss": 0.0113,
+ "step": 7000
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.8513595749088484e-05,
+ "loss": 0.0101,
+ "step": 7100
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.821590244319273e-05,
+ "loss": 0.0092,
+ "step": 7200
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.791558530862982e-05,
+ "loss": 0.0176,
+ "step": 7300
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.76127039690774e-05,
+ "loss": 0.0114,
+ "step": 7400
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.7307318557299355e-05,
+ "loss": 0.0083,
+ "step": 7500
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.699948970320738e-05,
+ "loss": 0.0067,
+ "step": 7600
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.668927852182374e-05,
+ "loss": 0.0021,
+ "step": 7700
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.637674660114774e-05,
+ "loss": 0.0087,
+ "step": 7800
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.606195598992832e-05,
+ "loss": 0.005,
+ "step": 7900
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.574496918534519e-05,
+ "loss": 0.014,
+ "step": 8000
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.542584912060087e-05,
+ "loss": 0.0073,
+ "step": 8100
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.510465915242623e-05,
+ "loss": 0.0073,
+ "step": 8200
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 3.478146304850188e-05,
+ "loss": 0.0106,
+ "step": 8300
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.4456324974798025e-05,
+ "loss": 0.0106,
+ "step": 8400
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 3.412930948283528e-05,
+ "loss": 0.0036,
+ "step": 8500
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.380048149686877e-05,
+ "loss": 0.0051,
+ "step": 8600
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 3.3469906300998476e-05,
+ "loss": 0.0047,
+ "step": 8700
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 3.313764952620792e-05,
+ "loss": 0.0035,
+ "step": 8800
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 3.280377713733412e-05,
+ "loss": 0.0094,
+ "step": 8900
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.246835541997116e-05,
+ "loss": 0.0077,
+ "step": 9000
+ }
+ ],
+ "logging_steps": 100,
+ "max_steps": 22296,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 3000,
+ "total_flos": 2.5168113233162404e+18,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V7-11w/checkpoint-9000/training_args.bin b/LLM-Detector-V7-11w/checkpoint-9000/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ac97b1c26d12cbfaab826f907d39398233627e61
--- /dev/null
+++ b/LLM-Detector-V7-11w/checkpoint-9000/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fda912c16bf2c6e8ac3158b354c96cabf8fd0eff786de104f90c23c31741ef25
+size 4856
diff --git a/LLM-Detector-V7-11w/special_tokens_map.json b/LLM-Detector-V7-11w/special_tokens_map.json
new file mode 100644
index 0000000000000000000000000000000000000000..72ecfeeb7e14d244c936169d2ed139eeae235ef1
--- /dev/null
+++ b/LLM-Detector-V7-11w/special_tokens_map.json
@@ -0,0 +1,24 @@
+{
+ "bos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "eos_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ },
+ "pad_token": "",
+ "unk_token": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false
+ }
+}
diff --git a/LLM-Detector-V7-11w/tokenizer.model b/LLM-Detector-V7-11w/tokenizer.model
new file mode 100644
index 0000000000000000000000000000000000000000..8b443ef19c2a19acc3ac64fb9c3db4a72921dff6
--- /dev/null
+++ b/LLM-Detector-V7-11w/tokenizer.model
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
+size 493443
diff --git a/LLM-Detector-V7-11w/tokenizer_config.json b/LLM-Detector-V7-11w/tokenizer_config.json
new file mode 100644
index 0000000000000000000000000000000000000000..e55462da3cd6a6f37c2b8fc230da6b14731e5ab1
--- /dev/null
+++ b/LLM-Detector-V7-11w/tokenizer_config.json
@@ -0,0 +1,45 @@
+{
+ "add_bos_token": true,
+ "add_eos_token": false,
+ "added_tokens_decoder": {
+ "0": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "1": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ },
+ "2": {
+ "content": "",
+ "lstrip": false,
+ "normalized": false,
+ "rstrip": false,
+ "single_word": false,
+ "special": true
+ }
+ },
+ "additional_special_tokens": [],
+ "bos_token": "",
+ "chat_template": "{{ bos_token }}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if message['role'] == 'user' %}{{ '[INST] ' + message['content'] + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ message['content'] + eos_token + ' ' }}{% else %}{{ raise_exception('Only user and assistant roles are supported!') }}{% endif %}{% endfor %}",
+ "clean_up_tokenization_spaces": false,
+ "eos_token": "",
+ "legacy": true,
+ "model_max_length": 1000000000000000019884624838656,
+ "pad_token": "",
+ "padding_side": "right",
+ "sp_model_kwargs": {},
+ "spaces_between_special_tokens": false,
+ "split_special_tokens": false,
+ "tokenizer_class": "LlamaTokenizer",
+ "unk_token": "",
+ "use_default_system_prompt": false
+}
diff --git a/LLM-Detector-V7-11w/train_results.json b/LLM-Detector-V7-11w/train_results.json
new file mode 100644
index 0000000000000000000000000000000000000000..eba90efd5a369c94068ceafc0cb7d8d1f6f0219f
--- /dev/null
+++ b/LLM-Detector-V7-11w/train_results.json
@@ -0,0 +1,7 @@
+{
+ "epoch": 3.0,
+ "train_loss": 0.016247458042738187,
+ "train_runtime": 104237.6159,
+ "train_samples_per_second": 3.423,
+ "train_steps_per_second": 0.214
+}
\ No newline at end of file
diff --git a/LLM-Detector-V7-11w/trainer_log.jsonl b/LLM-Detector-V7-11w/trainer_log.jsonl
new file mode 100644
index 0000000000000000000000000000000000000000..432bfea229ffed230e116bc4029ab825d36093b5
--- /dev/null
+++ b/LLM-Detector-V7-11w/trainer_log.jsonl
@@ -0,0 +1,223 @@
+{"current_steps": 100, "total_steps": 22296, "loss": 1.6283, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9997567688496474e-05, "epoch": 0.01, "percentage": 0.45, "elapsed_time": "0:07:41", "remaining_time": "1 day, 4:29:00"}
+{"current_steps": 200, "total_steps": 22296, "loss": 0.068, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9990172715142793e-05, "epoch": 0.03, "percentage": 0.9, "elapsed_time": "0:15:23", "remaining_time": "1 day, 4:20:34"}
+{"current_steps": 300, "total_steps": 22296, "loss": 0.0404, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.997781629993153e-05, "epoch": 0.04, "percentage": 1.35, "elapsed_time": "0:23:11", "remaining_time": "1 day, 4:20:59"}
+{"current_steps": 400, "total_steps": 22296, "loss": 0.0626, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9960500896052476e-05, "epoch": 0.05, "percentage": 1.79, "elapsed_time": "0:31:09", "remaining_time": "1 day, 4:25:24"}
+{"current_steps": 500, "total_steps": 22296, "loss": 0.0428, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.993822994123172e-05, "epoch": 0.07, "percentage": 2.24, "elapsed_time": "0:38:40", "remaining_time": "1 day, 4:06:05"}
+{"current_steps": 600, "total_steps": 22296, "loss": 0.0265, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9911007857049264e-05, "epoch": 0.08, "percentage": 2.69, "elapsed_time": "0:46:28", "remaining_time": "1 day, 4:00:33"}
+{"current_steps": 700, "total_steps": 22296, "loss": 0.0263, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.987884004806111e-05, "epoch": 0.09, "percentage": 3.14, "elapsed_time": "0:54:18", "remaining_time": "1 day, 3:55:22"}
+{"current_steps": 800, "total_steps": 22296, "loss": 0.03, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.984173290072626e-05, "epoch": 0.11, "percentage": 3.59, "elapsed_time": "1:02:00", "remaining_time": "1 day, 3:46:00"}
+{"current_steps": 900, "total_steps": 22296, "loss": 0.0253, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.979969378213884e-05, "epoch": 0.12, "percentage": 4.04, "elapsed_time": "1:09:46", "remaining_time": "1 day, 3:38:57"}
+{"current_steps": 1000, "total_steps": 22296, "loss": 0.038, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.975273103856537e-05, "epoch": 0.13, "percentage": 4.49, "elapsed_time": "1:17:46", "remaining_time": "1 day, 3:36:27"}
+{"current_steps": 1100, "total_steps": 22296, "loss": 0.0322, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.970085399378785e-05, "epoch": 0.15, "percentage": 4.93, "elapsed_time": "1:25:36", "remaining_time": "1 day, 3:29:27"}
+{"current_steps": 1200, "total_steps": 22296, "loss": 0.0296, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.964407294725254e-05, "epoch": 0.16, "percentage": 5.38, "elapsed_time": "1:33:16", "remaining_time": "1 day, 3:19:48"}
+{"current_steps": 1300, "total_steps": 22296, "loss": 0.0295, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.958239917202523e-05, "epoch": 0.17, "percentage": 5.83, "elapsed_time": "1:41:05", "remaining_time": "1 day, 3:12:41"}
+{"current_steps": 1400, "total_steps": 22296, "loss": 0.0171, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9515844912553106e-05, "epoch": 0.19, "percentage": 6.28, "elapsed_time": "1:48:48", "remaining_time": "1 day, 3:04:00"}
+{"current_steps": 1500, "total_steps": 22296, "loss": 0.0205, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.944442338223378e-05, "epoch": 0.2, "percentage": 6.73, "elapsed_time": "1:56:32", "remaining_time": "1 day, 2:55:49"}
+{"current_steps": 1600, "total_steps": 22296, "loss": 0.029, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9368148760792e-05, "epoch": 0.22, "percentage": 7.18, "elapsed_time": "2:04:25", "remaining_time": "1 day, 2:49:21"}
+{"current_steps": 1700, "total_steps": 22296, "loss": 0.0172, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9287036191464414e-05, "epoch": 0.23, "percentage": 7.62, "elapsed_time": "2:12:12", "remaining_time": "1 day, 2:41:46"}
+{"current_steps": 1800, "total_steps": 22296, "loss": 0.0235, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9201101777993116e-05, "epoch": 0.24, "percentage": 8.07, "elapsed_time": "2:20:10", "remaining_time": "1 day, 2:36:06"}
+{"current_steps": 1900, "total_steps": 22296, "loss": 0.0168, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.911036258142853e-05, "epoch": 0.26, "percentage": 8.52, "elapsed_time": "2:28:09", "remaining_time": "1 day, 2:30:23"}
+{"current_steps": 2000, "total_steps": 22296, "loss": 0.0227, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.9014836616742065e-05, "epoch": 0.27, "percentage": 8.97, "elapsed_time": "2:35:56", "remaining_time": "1 day, 2:22:32"}
+{"current_steps": 2100, "total_steps": 22296, "loss": 0.0227, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.891454284924965e-05, "epoch": 0.28, "percentage": 9.42, "elapsed_time": "2:43:32", "remaining_time": "1 day, 2:12:50"}
+{"current_steps": 2200, "total_steps": 22296, "loss": 0.0173, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8810575041794e-05, "epoch": 0.3, "percentage": 9.87, "elapsed_time": "2:51:20", "remaining_time": "1 day, 2:05:08"}
+{"current_steps": 2300, "total_steps": 22296, "loss": 0.0227, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8700853511049656e-05, "epoch": 0.31, "percentage": 10.32, "elapsed_time": "2:59:29", "remaining_time": "1 day, 2:00:33"}
+{"current_steps": 2400, "total_steps": 22296, "loss": 0.0144, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.858642651436035e-05, "epoch": 0.32, "percentage": 10.76, "elapsed_time": "3:07:29", "remaining_time": "1 day, 1:54:17"}
+{"current_steps": 2500, "total_steps": 22296, "loss": 0.03, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.846731676957191e-05, "epoch": 0.34, "percentage": 11.21, "elapsed_time": "3:15:27", "remaining_time": "1 day, 1:47:39"}
+{"current_steps": 2600, "total_steps": 22296, "loss": 0.0189, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.834354792422293e-05, "epoch": 0.35, "percentage": 11.66, "elapsed_time": "3:23:08", "remaining_time": "1 day, 1:38:54"}
+{"current_steps": 2700, "total_steps": 22296, "loss": 0.0111, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.821514455084985e-05, "epoch": 0.36, "percentage": 12.11, "elapsed_time": "3:30:59", "remaining_time": "1 day, 1:31:22"}
+{"current_steps": 2800, "total_steps": 22296, "loss": 0.0152, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8082132142108465e-05, "epoch": 0.38, "percentage": 12.56, "elapsed_time": "3:38:52", "remaining_time": "1 day, 1:23:57"}
+{"current_steps": 2900, "total_steps": 22296, "loss": 0.0182, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.794453710571272e-05, "epoch": 0.39, "percentage": 13.01, "elapsed_time": "3:46:27", "remaining_time": "1 day, 1:14:40"}
+{"current_steps": 3000, "total_steps": 22296, "loss": 0.0111, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.780238675919182e-05, "epoch": 0.4, "percentage": 13.46, "elapsed_time": "3:54:27", "remaining_time": "1 day, 1:08:04"}
+{"current_steps": 3100, "total_steps": 22296, "loss": 0.0138, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.765570932446672e-05, "epoch": 0.42, "percentage": 13.9, "elapsed_time": "4:02:23", "remaining_time": "1 day, 1:00:55"}
+{"current_steps": 3200, "total_steps": 22296, "loss": 0.0256, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.75045339222471e-05, "epoch": 0.43, "percentage": 14.35, "elapsed_time": "4:10:06", "remaining_time": "1 day, 0:52:32"}
+{"current_steps": 3300, "total_steps": 22296, "loss": 0.0199, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.734889056624983e-05, "epoch": 0.44, "percentage": 14.8, "elapsed_time": "4:17:57", "remaining_time": "1 day, 0:44:55"}
+{"current_steps": 3400, "total_steps": 22296, "loss": 0.0159, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.718881015724017e-05, "epoch": 0.46, "percentage": 15.25, "elapsed_time": "4:25:29", "remaining_time": "1 day, 0:35:28"}
+{"current_steps": 3500, "total_steps": 22296, "loss": 0.0121, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.702432447689692e-05, "epoch": 0.47, "percentage": 15.7, "elapsed_time": "4:33:24", "remaining_time": "1 day, 0:28:14"}
+{"current_steps": 3600, "total_steps": 22296, "loss": 0.0181, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.6855466181502544e-05, "epoch": 0.48, "percentage": 16.15, "elapsed_time": "4:41:13", "remaining_time": "1 day, 0:20:27"}
+{"current_steps": 3700, "total_steps": 22296, "loss": 0.0148, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.66822687954598e-05, "epoch": 0.5, "percentage": 16.59, "elapsed_time": "4:49:08", "remaining_time": "1 day, 0:13:12"}
+{"current_steps": 3800, "total_steps": 22296, "loss": 0.0152, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.65047667046359e-05, "epoch": 0.51, "percentage": 17.04, "elapsed_time": "4:56:46", "remaining_time": "1 day, 0:04:30"}
+{"current_steps": 3900, "total_steps": 22296, "loss": 0.0103, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.632299514953571e-05, "epoch": 0.52, "percentage": 17.49, "elapsed_time": "5:04:34", "remaining_time": "23:56:38"}
+{"current_steps": 4000, "total_steps": 22296, "loss": 0.0217, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.613699021830524e-05, "epoch": 0.54, "percentage": 17.94, "elapsed_time": "5:12:35", "remaining_time": "23:49:47"}
+{"current_steps": 4100, "total_steps": 22296, "loss": 0.0142, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.59467888395669e-05, "epoch": 0.55, "percentage": 18.39, "elapsed_time": "5:20:11", "remaining_time": "23:41:01"}
+{"current_steps": 4200, "total_steps": 22296, "loss": 0.0151, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.575242877508777e-05, "epoch": 0.57, "percentage": 18.84, "elapsed_time": "5:27:47", "remaining_time": "23:32:20"}
+{"current_steps": 4300, "total_steps": 22296, "loss": 0.0164, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.5553948612282607e-05, "epoch": 0.58, "percentage": 19.29, "elapsed_time": "5:35:38", "remaining_time": "23:24:42"}
+{"current_steps": 4400, "total_steps": 22296, "loss": 0.0095, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.5353433433160075e-05, "epoch": 0.59, "percentage": 19.73, "elapsed_time": "5:43:23", "remaining_time": "23:16:40"}
+{"current_steps": 4500, "total_steps": 22296, "loss": 0.015, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.514687230313633e-05, "epoch": 0.61, "percentage": 20.18, "elapsed_time": "5:51:13", "remaining_time": "23:08:56"}
+{"current_steps": 4600, "total_steps": 22296, "loss": 0.015, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.49363112993749e-05, "epoch": 0.62, "percentage": 20.63, "elapsed_time": "5:58:56", "remaining_time": "23:00:51"}
+{"current_steps": 4700, "total_steps": 22296, "loss": 0.0181, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.472179222575561e-05, "epoch": 0.63, "percentage": 21.08, "elapsed_time": "6:06:46", "remaining_time": "22:53:07"}
+{"current_steps": 4800, "total_steps": 22296, "loss": 0.0175, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4503357671976574e-05, "epoch": 0.65, "percentage": 21.53, "elapsed_time": "6:14:32", "remaining_time": "22:45:12"}
+{"current_steps": 4900, "total_steps": 22296, "loss": 0.0069, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.428105100509852e-05, "epoch": 0.66, "percentage": 21.98, "elapsed_time": "6:22:21", "remaining_time": "22:37:28"}
+{"current_steps": 5000, "total_steps": 22296, "loss": 0.0108, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4054916360934957e-05, "epoch": 0.67, "percentage": 22.43, "elapsed_time": "6:30:11", "remaining_time": "22:29:45"}
+{"current_steps": 5100, "total_steps": 22296, "loss": 0.0204, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.3824998635289594e-05, "epoch": 0.69, "percentage": 22.87, "elapsed_time": "6:37:52", "remaining_time": "22:21:33"}
+{"current_steps": 5200, "total_steps": 22296, "loss": 0.0192, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.3591343475042946e-05, "epoch": 0.7, "percentage": 23.32, "elapsed_time": "6:45:45", "remaining_time": "22:14:00"}
+{"current_steps": 5300, "total_steps": 22296, "loss": 0.0086, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.3353997269089774e-05, "epoch": 0.71, "percentage": 23.77, "elapsed_time": "6:53:35", "remaining_time": "22:06:16"}
+{"current_steps": 5400, "total_steps": 22296, "loss": 0.0179, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.311300713912926e-05, "epoch": 0.73, "percentage": 24.22, "elapsed_time": "7:01:35", "remaining_time": "21:59:07"}
+{"current_steps": 5500, "total_steps": 22296, "loss": 0.0142, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.286842093030963e-05, "epoch": 0.74, "percentage": 24.67, "elapsed_time": "7:09:30", "remaining_time": "21:51:37"}
+{"current_steps": 5600, "total_steps": 22296, "loss": 0.0187, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.262028720172921e-05, "epoch": 0.75, "percentage": 25.12, "elapsed_time": "7:17:03", "remaining_time": "21:43:02"}
+{"current_steps": 5700, "total_steps": 22296, "loss": 0.0176, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.2368655216795735e-05, "epoch": 0.77, "percentage": 25.57, "elapsed_time": "7:24:54", "remaining_time": "21:35:24"}
+{"current_steps": 5800, "total_steps": 22296, "loss": 0.0145, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.211357493344573e-05, "epoch": 0.78, "percentage": 26.01, "elapsed_time": "7:32:40", "remaining_time": "21:27:27"}
+{"current_steps": 5900, "total_steps": 22296, "loss": 0.0087, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.185509699422615e-05, "epoch": 0.79, "percentage": 26.46, "elapsed_time": "7:40:36", "remaining_time": "21:20:01"}
+{"current_steps": 6000, "total_steps": 22296, "loss": 0.0136, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.1593272716239985e-05, "epoch": 0.81, "percentage": 26.91, "elapsed_time": "7:48:16", "remaining_time": "21:11:50"}
+{"current_steps": 6100, "total_steps": 22296, "loss": 0.0137, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.132815408095797e-05, "epoch": 0.82, "percentage": 27.36, "elapsed_time": "7:56:12", "remaining_time": "21:04:23"}
+{"current_steps": 6200, "total_steps": 22296, "loss": 0.011, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.105979372389847e-05, "epoch": 0.83, "percentage": 27.81, "elapsed_time": "8:03:58", "remaining_time": "20:56:27"}
+{"current_steps": 6300, "total_steps": 22296, "loss": 0.024, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.0788244924177365e-05, "epoch": 0.85, "percentage": 28.26, "elapsed_time": "8:11:53", "remaining_time": "20:48:55"}
+{"current_steps": 6400, "total_steps": 22296, "loss": 0.0067, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.0513561593930325e-05, "epoch": 0.86, "percentage": 28.7, "elapsed_time": "8:19:27", "remaining_time": "20:40:30"}
+{"current_steps": 6500, "total_steps": 22296, "loss": 0.0102, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.023579826760924e-05, "epoch": 0.87, "percentage": 29.15, "elapsed_time": "8:27:24", "remaining_time": "20:33:04"}
+{"current_steps": 6600, "total_steps": 22296, "loss": 0.013, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.995501009115527e-05, "epoch": 0.89, "percentage": 29.6, "elapsed_time": "8:35:12", "remaining_time": "20:25:14"}
+{"current_steps": 6700, "total_steps": 22296, "loss": 0.0133, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9674104896418544e-05, "epoch": 0.9, "percentage": 30.05, "elapsed_time": "8:43:12", "remaining_time": "20:17:54"}
+{"current_steps": 6800, "total_steps": 22296, "loss": 0.0087, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9387463695523555e-05, "epoch": 0.91, "percentage": 30.5, "elapsed_time": "8:50:59", "remaining_time": "20:10:01"}
+{"current_steps": 6900, "total_steps": 22296, "loss": 0.01, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.9097966069208193e-05, "epoch": 0.93, "percentage": 30.95, "elapsed_time": "8:58:47", "remaining_time": "20:02:13"}
+{"current_steps": 7000, "total_steps": 22296, "loss": 0.0113, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.880566949309125e-05, "epoch": 0.94, "percentage": 31.4, "elapsed_time": "9:06:27", "remaining_time": "19:54:04"}
+{"current_steps": 7100, "total_steps": 22296, "loss": 0.0101, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.8513595749088484e-05, "epoch": 0.96, "percentage": 31.84, "elapsed_time": "9:14:13", "remaining_time": "19:46:11"}
+{"current_steps": 7200, "total_steps": 22296, "loss": 0.0092, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.821590244319273e-05, "epoch": 0.97, "percentage": 32.29, "elapsed_time": "9:22:10", "remaining_time": "19:38:42"}
+{"current_steps": 7300, "total_steps": 22296, "loss": 0.0176, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.791558530862982e-05, "epoch": 0.98, "percentage": 32.74, "elapsed_time": "9:29:59", "remaining_time": "19:30:54"}
+{"current_steps": 7400, "total_steps": 22296, "loss": 0.0114, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.76127039690774e-05, "epoch": 1.0, "percentage": 33.19, "elapsed_time": "9:37:46", "remaining_time": "19:23:02"}
+{"current_steps": 7500, "total_steps": 22296, "loss": 0.0083, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.7307318557299355e-05, "epoch": 1.01, "percentage": 33.64, "elapsed_time": "9:45:25", "remaining_time": "19:14:55"}
+{"current_steps": 7600, "total_steps": 22296, "loss": 0.0067, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.699948970320738e-05, "epoch": 1.02, "percentage": 34.09, "elapsed_time": "9:53:01", "remaining_time": "19:06:43"}
+{"current_steps": 7700, "total_steps": 22296, "loss": 0.0021, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.668927852182374e-05, "epoch": 1.04, "percentage": 34.54, "elapsed_time": "10:00:39", "remaining_time": "18:58:36"}
+{"current_steps": 7800, "total_steps": 22296, "loss": 0.0087, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.637674660114774e-05, "epoch": 1.05, "percentage": 34.98, "elapsed_time": "10:08:39", "remaining_time": "18:51:10"}
+{"current_steps": 7900, "total_steps": 22296, "loss": 0.005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.606195598992832e-05, "epoch": 1.06, "percentage": 35.43, "elapsed_time": "10:16:21", "remaining_time": "18:43:10"}
+{"current_steps": 8000, "total_steps": 22296, "loss": 0.014, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.574496918534519e-05, "epoch": 1.08, "percentage": 35.88, "elapsed_time": "10:24:04", "remaining_time": "18:35:12"}
+{"current_steps": 8100, "total_steps": 22296, "loss": 0.0073, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.542584912060087e-05, "epoch": 1.09, "percentage": 36.33, "elapsed_time": "10:31:58", "remaining_time": "18:27:36"}
+{"current_steps": 8200, "total_steps": 22296, "loss": 0.0073, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.510465915242623e-05, "epoch": 1.1, "percentage": 36.78, "elapsed_time": "10:39:46", "remaining_time": "18:19:48"}
+{"current_steps": 8300, "total_steps": 22296, "loss": 0.0106, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.478146304850188e-05, "epoch": 1.12, "percentage": 37.23, "elapsed_time": "10:47:27", "remaining_time": "18:11:47"}
+{"current_steps": 8400, "total_steps": 22296, "loss": 0.0106, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.4456324974798025e-05, "epoch": 1.13, "percentage": 37.67, "elapsed_time": "10:54:59", "remaining_time": "18:03:33"}
+{"current_steps": 8500, "total_steps": 22296, "loss": 0.0036, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.412930948283528e-05, "epoch": 1.14, "percentage": 38.12, "elapsed_time": "11:02:53", "remaining_time": "17:55:54"}
+{"current_steps": 8600, "total_steps": 22296, "loss": 0.0051, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.380048149686877e-05, "epoch": 1.16, "percentage": 38.57, "elapsed_time": "11:10:46", "remaining_time": "17:48:15"}
+{"current_steps": 8700, "total_steps": 22296, "loss": 0.0047, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.3469906300998476e-05, "epoch": 1.17, "percentage": 39.02, "elapsed_time": "11:18:26", "remaining_time": "17:40:13"}
+{"current_steps": 8800, "total_steps": 22296, "loss": 0.0035, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.313764952620792e-05, "epoch": 1.18, "percentage": 39.47, "elapsed_time": "11:25:59", "remaining_time": "17:32:03"}
+{"current_steps": 8900, "total_steps": 22296, "loss": 0.0094, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.280377713733412e-05, "epoch": 1.2, "percentage": 39.92, "elapsed_time": "11:33:59", "remaining_time": "17:24:34"}
+{"current_steps": 9000, "total_steps": 22296, "loss": 0.0077, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.246835541997116e-05, "epoch": 1.21, "percentage": 40.37, "elapsed_time": "11:41:51", "remaining_time": "17:16:53"}
+{"current_steps": 9100, "total_steps": 22296, "loss": 0.0076, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.213145096731021e-05, "epoch": 1.22, "percentage": 40.81, "elapsed_time": "11:49:31", "remaining_time": "17:08:53"}
+{"current_steps": 9200, "total_steps": 22296, "loss": 0.0074, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.1793130666918324e-05, "epoch": 1.24, "percentage": 41.26, "elapsed_time": "11:57:20", "remaining_time": "17:01:06"}
+{"current_steps": 9300, "total_steps": 22296, "loss": 0.0072, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.145346168745891e-05, "epoch": 1.25, "percentage": 41.71, "elapsed_time": "12:04:51", "remaining_time": "16:52:56"}
+{"current_steps": 9400, "total_steps": 22296, "loss": 0.0043, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.111251146535634e-05, "epoch": 1.26, "percentage": 42.16, "elapsed_time": "12:12:26", "remaining_time": "16:44:51"}
+{"current_steps": 9500, "total_steps": 22296, "loss": 0.0071, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.077377511336779e-05, "epoch": 1.28, "percentage": 42.61, "elapsed_time": "12:20:08", "remaining_time": "16:36:56"}
+{"current_steps": 9600, "total_steps": 22296, "loss": 0.0102, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.043047683848379e-05, "epoch": 1.29, "percentage": 43.06, "elapsed_time": "12:27:54", "remaining_time": "16:29:05"}
+{"current_steps": 9700, "total_steps": 22296, "loss": 0.005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.0086100419987084e-05, "epoch": 1.31, "percentage": 43.51, "elapsed_time": "12:35:43", "remaining_time": "16:21:20"}
+{"current_steps": 9800, "total_steps": 22296, "loss": 0.0155, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.974071422889685e-05, "epoch": 1.32, "percentage": 43.95, "elapsed_time": "12:43:41", "remaining_time": "16:13:46"}
+{"current_steps": 9900, "total_steps": 22296, "loss": 0.0086, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9394386836708154e-05, "epoch": 1.33, "percentage": 44.4, "elapsed_time": "12:51:21", "remaining_time": "16:05:50"}
+{"current_steps": 10000, "total_steps": 22296, "loss": 0.0048, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9047187001778132e-05, "epoch": 1.35, "percentage": 44.85, "elapsed_time": "12:59:02", "remaining_time": "15:57:54"}
+{"current_steps": 10100, "total_steps": 22296, "loss": 0.0085, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8699183655674938e-05, "epoch": 1.36, "percentage": 45.3, "elapsed_time": "13:06:42", "remaining_time": "15:49:58"}
+{"current_steps": 10200, "total_steps": 22296, "loss": 0.0091, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.835044588949241e-05, "epoch": 1.37, "percentage": 45.75, "elapsed_time": "13:14:31", "remaining_time": "15:42:12"}
+{"current_steps": 10300, "total_steps": 22296, "loss": 0.0089, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.8001042940132995e-05, "epoch": 1.39, "percentage": 46.2, "elapsed_time": "13:22:21", "remaining_time": "15:34:29"}
+{"current_steps": 10400, "total_steps": 22296, "loss": 0.0093, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7651044176561748e-05, "epoch": 1.4, "percentage": 46.65, "elapsed_time": "13:30:15", "remaining_time": "15:26:48"}
+{"current_steps": 10500, "total_steps": 22296, "loss": 0.0073, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.7300519086034166e-05, "epoch": 1.41, "percentage": 47.09, "elapsed_time": "13:38:00", "remaining_time": "15:18:58"}
+{"current_steps": 10600, "total_steps": 22296, "loss": 0.0029, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6949537260300395e-05, "epoch": 1.43, "percentage": 47.54, "elapsed_time": "13:45:52", "remaining_time": "15:11:15"}
+{"current_steps": 10700, "total_steps": 22296, "loss": 0.0027, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6598168381788864e-05, "epoch": 1.44, "percentage": 47.99, "elapsed_time": "13:53:36", "remaining_time": "15:03:24"}
+{"current_steps": 10800, "total_steps": 22296, "loss": 0.011, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6246482209771783e-05, "epoch": 1.45, "percentage": 48.44, "elapsed_time": "14:01:30", "remaining_time": "14:55:43"}
+{"current_steps": 10900, "total_steps": 22296, "loss": 0.0108, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.5894548566515485e-05, "epoch": 1.47, "percentage": 48.89, "elapsed_time": "14:09:13", "remaining_time": "14:47:52"}
+{"current_steps": 11000, "total_steps": 22296, "loss": 0.0033, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.554243732341816e-05, "epoch": 1.48, "percentage": 49.34, "elapsed_time": "14:16:51", "remaining_time": "14:39:54"}
+{"current_steps": 11100, "total_steps": 22296, "loss": 0.0048, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.519021838713791e-05, "epoch": 1.49, "percentage": 49.78, "elapsed_time": "14:24:30", "remaining_time": "14:31:59"}
+{"current_steps": 11200, "total_steps": 22296, "loss": 0.0015, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.483796168571378e-05, "epoch": 1.51, "percentage": 50.23, "elapsed_time": "14:32:24", "remaining_time": "14:24:18"}
+{"current_steps": 11300, "total_steps": 22296, "loss": 0.0053, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4485737154682554e-05, "epoch": 1.52, "percentage": 50.68, "elapsed_time": "14:39:58", "remaining_time": "14:16:18"}
+{"current_steps": 11400, "total_steps": 22296, "loss": 0.0043, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.4133614723194047e-05, "epoch": 1.53, "percentage": 51.13, "elapsed_time": "14:47:55", "remaining_time": "14:08:39"}
+{"current_steps": 11500, "total_steps": 22296, "loss": 0.0086, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.378166430012766e-05, "epoch": 1.55, "percentage": 51.58, "elapsed_time": "14:55:23", "remaining_time": "14:00:34"}
+{"current_steps": 11600, "total_steps": 22296, "loss": 0.0077, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3429955760212966e-05, "epoch": 1.56, "percentage": 52.03, "elapsed_time": "15:02:54", "remaining_time": "13:52:32"}
+{"current_steps": 11700, "total_steps": 22296, "loss": 0.0066, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.307855893015712e-05, "epoch": 1.57, "percentage": 52.48, "elapsed_time": "15:10:42", "remaining_time": "13:44:46"}
+{"current_steps": 11800, "total_steps": 22296, "loss": 0.0055, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.272754357478172e-05, "epoch": 1.59, "percentage": 52.92, "elapsed_time": "15:18:45", "remaining_time": "13:37:13"}
+{"current_steps": 11900, "total_steps": 22296, "loss": 0.0067, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.237697938317202e-05, "epoch": 1.6, "percentage": 53.37, "elapsed_time": "15:26:38", "remaining_time": "13:29:31"}
+{"current_steps": 12000, "total_steps": 22296, "loss": 0.0082, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.202693595484113e-05, "epoch": 1.61, "percentage": 53.82, "elapsed_time": "15:34:33", "remaining_time": "13:21:51"}
+{"current_steps": 12100, "total_steps": 22296, "loss": 0.0101, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1680974167907623e-05, "epoch": 1.63, "percentage": 54.27, "elapsed_time": "15:42:24", "remaining_time": "13:14:07"}
+{"current_steps": 12200, "total_steps": 22296, "loss": 0.0019, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.1332173697925385e-05, "epoch": 1.64, "percentage": 54.72, "elapsed_time": "15:50:18", "remaining_time": "13:06:24"}
+{"current_steps": 12300, "total_steps": 22296, "loss": 0.0106, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.09841014224678e-05, "epoch": 1.65, "percentage": 55.17, "elapsed_time": "15:57:50", "remaining_time": "12:58:25"}
+{"current_steps": 12400, "total_steps": 22296, "loss": 0.0051, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.063682644631365e-05, "epoch": 1.67, "percentage": 55.62, "elapsed_time": "16:05:53", "remaining_time": "12:50:50"}
+{"current_steps": 12500, "total_steps": 22296, "loss": 0.0076, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0290417715949304e-05, "epoch": 1.68, "percentage": 56.06, "elapsed_time": "16:13:36", "remaining_time": "12:42:59"}
+{"current_steps": 12600, "total_steps": 22296, "loss": 0.0037, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.994494400588042e-05, "epoch": 1.7, "percentage": 56.51, "elapsed_time": "16:21:22", "remaining_time": "12:35:11"}
+{"current_steps": 12700, "total_steps": 22296, "loss": 0.0073, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9600473904977725e-05, "epoch": 1.71, "percentage": 56.96, "elapsed_time": "16:29:11", "remaining_time": "12:27:25"}
+{"current_steps": 12800, "total_steps": 22296, "loss": 0.0052, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9257075802859685e-05, "epoch": 1.72, "percentage": 57.41, "elapsed_time": "16:37:08", "remaining_time": "12:19:44"}
+{"current_steps": 12900, "total_steps": 22296, "loss": 0.0038, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.89148178763147e-05, "epoch": 1.74, "percentage": 57.86, "elapsed_time": "16:44:59", "remaining_time": "12:12:00"}
+{"current_steps": 13000, "total_steps": 22296, "loss": 0.0044, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.857376807576563e-05, "epoch": 1.75, "percentage": 58.31, "elapsed_time": "16:52:54", "remaining_time": "12:04:18"}
+{"current_steps": 13100, "total_steps": 22296, "loss": 0.0031, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.8233994111779146e-05, "epoch": 1.76, "percentage": 58.75, "elapsed_time": "17:00:55", "remaining_time": "11:56:40"}
+{"current_steps": 13200, "total_steps": 22296, "loss": 0.0067, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7895563441622803e-05, "epoch": 1.78, "percentage": 59.2, "elapsed_time": "17:08:41", "remaining_time": "11:48:51"}
+{"current_steps": 13300, "total_steps": 22296, "loss": 0.0096, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7558543255872352e-05, "epoch": 1.79, "percentage": 59.65, "elapsed_time": "17:16:40", "remaining_time": "11:41:11"}
+{"current_steps": 13400, "total_steps": 22296, "loss": 0.0063, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7223000465072e-05, "epoch": 1.8, "percentage": 60.1, "elapsed_time": "17:24:46", "remaining_time": "11:33:36"}
+{"current_steps": 13500, "total_steps": 22296, "loss": 0.0064, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.68890016864503e-05, "epoch": 1.82, "percentage": 60.55, "elapsed_time": "17:32:34", "remaining_time": "11:25:48"}
+{"current_steps": 13600, "total_steps": 22296, "loss": 0.0047, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6556613230694243e-05, "epoch": 1.83, "percentage": 61.0, "elapsed_time": "17:40:18", "remaining_time": "11:17:58"}
+{"current_steps": 13700, "total_steps": 22296, "loss": 0.0023, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6225901088784207e-05, "epoch": 1.84, "percentage": 61.45, "elapsed_time": "17:48:06", "remaining_time": "11:10:10"}
+{"current_steps": 13800, "total_steps": 22296, "loss": 0.0064, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.58969309188924e-05, "epoch": 1.86, "percentage": 61.89, "elapsed_time": "17:55:57", "remaining_time": "11:02:24"}
+{"current_steps": 13900, "total_steps": 22296, "loss": 0.004, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.556976803334737e-05, "epoch": 1.87, "percentage": 62.34, "elapsed_time": "18:03:44", "remaining_time": "10:54:36"}
+{"current_steps": 14000, "total_steps": 22296, "loss": 0.0016, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5244477385667167e-05, "epoch": 1.88, "percentage": 62.79, "elapsed_time": "18:11:32", "remaining_time": "10:46:49"}
+{"current_steps": 14100, "total_steps": 22296, "loss": 0.008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4921123557663763e-05, "epoch": 1.9, "percentage": 63.24, "elapsed_time": "18:19:12", "remaining_time": "10:38:56"}
+{"current_steps": 14200, "total_steps": 22296, "loss": 0.0047, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4599770746621238e-05, "epoch": 1.91, "percentage": 63.69, "elapsed_time": "18:26:49", "remaining_time": "10:31:03"}
+{"current_steps": 14300, "total_steps": 22296, "loss": 0.0056, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4280482752550327e-05, "epoch": 1.92, "percentage": 64.14, "elapsed_time": "18:34:32", "remaining_time": "10:23:12"}
+{"current_steps": 14400, "total_steps": 22296, "loss": 0.0065, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.396332296552188e-05, "epoch": 1.94, "percentage": 64.59, "elapsed_time": "18:42:25", "remaining_time": "10:15:27"}
+{"current_steps": 14500, "total_steps": 22296, "loss": 0.004, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3648354353081588e-05, "epoch": 1.95, "percentage": 65.03, "elapsed_time": "18:50:10", "remaining_time": "10:07:38"}
+{"current_steps": 14600, "total_steps": 22296, "loss": 0.0065, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.333563944774876e-05, "epoch": 1.96, "percentage": 65.48, "elapsed_time": "18:58:01", "remaining_time": "9:59:52"}
+{"current_steps": 14700, "total_steps": 22296, "loss": 0.0026, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3028332659827522e-05, "epoch": 1.98, "percentage": 65.93, "elapsed_time": "19:05:46", "remaining_time": "9:52:03"}
+{"current_steps": 14800, "total_steps": 22296, "loss": 0.008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2720286886495473e-05, "epoch": 1.99, "percentage": 66.38, "elapsed_time": "19:13:28", "remaining_time": "9:44:13"}
+{"current_steps": 14900, "total_steps": 22296, "loss": 0.0053, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2414679074810755e-05, "epoch": 2.0, "percentage": 66.83, "elapsed_time": "19:21:08", "remaining_time": "9:36:21"}
+{"current_steps": 15000, "total_steps": 22296, "loss": 0.0024, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.211156989883942e-05, "epoch": 2.02, "percentage": 67.28, "elapsed_time": "19:28:51", "remaining_time": "9:28:32"}
+{"current_steps": 15100, "total_steps": 22296, "loss": 0.0014, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1811019536579015e-05, "epoch": 2.03, "percentage": 67.73, "elapsed_time": "19:36:36", "remaining_time": "9:20:43"}
+{"current_steps": 15200, "total_steps": 22296, "loss": 0.0005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1513087658011207e-05, "epoch": 2.05, "percentage": 68.17, "elapsed_time": "19:44:32", "remaining_time": "9:12:59"}
+{"current_steps": 15300, "total_steps": 22296, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1217833413255133e-05, "epoch": 2.06, "percentage": 68.62, "elapsed_time": "19:52:11", "remaining_time": "9:05:08"}
+{"current_steps": 15400, "total_steps": 22296, "loss": 0.0023, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0925315420823949e-05, "epoch": 2.07, "percentage": 69.07, "elapsed_time": "20:00:03", "remaining_time": "8:57:22"}
+{"current_steps": 15500, "total_steps": 22296, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.063559175598702e-05, "epoch": 2.09, "percentage": 69.52, "elapsed_time": "20:07:36", "remaining_time": "8:49:28"}
+{"current_steps": 15600, "total_steps": 22296, "loss": 0.0009, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0348719939239885e-05, "epoch": 2.1, "percentage": 69.97, "elapsed_time": "20:15:16", "remaining_time": "8:41:37"}
+{"current_steps": 15700, "total_steps": 22296, "loss": 0.0059, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0064756924884342e-05, "epoch": 2.11, "percentage": 70.42, "elapsed_time": "20:23:05", "remaining_time": "8:33:51"}
+{"current_steps": 15800, "total_steps": 22296, "loss": 0.0005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.783759089721054e-06, "epoch": 2.13, "percentage": 70.86, "elapsed_time": "20:30:38", "remaining_time": "8:25:58"}
+{"current_steps": 15900, "total_steps": 22296, "loss": 0.0009, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.505782221856696e-06, "epoch": 2.14, "percentage": 71.31, "elapsed_time": "20:38:26", "remaining_time": "8:18:10"}
+{"current_steps": 16000, "total_steps": 22296, "loss": 0.0013, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.230881509628037e-06, "epoch": 2.15, "percentage": 71.76, "elapsed_time": "20:46:23", "remaining_time": "8:10:27"}
+{"current_steps": 16100, "total_steps": 22296, "loss": 0.0017, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.959111530645085e-06, "epoch": 2.17, "percentage": 72.21, "elapsed_time": "20:53:56", "remaining_time": "8:02:34"}
+{"current_steps": 16200, "total_steps": 22296, "loss": 0.0025, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.690526240955454e-06, "epoch": 2.18, "percentage": 72.66, "elapsed_time": "21:01:39", "remaining_time": "7:54:45"}
+{"current_steps": 16300, "total_steps": 22296, "loss": 0.005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.425178964332225e-06, "epoch": 2.19, "percentage": 73.11, "elapsed_time": "21:09:34", "remaining_time": "7:47:00"}
+{"current_steps": 16400, "total_steps": 22296, "loss": 0.0039, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.163122381687191e-06, "epoch": 2.21, "percentage": 73.56, "elapsed_time": "21:17:16", "remaining_time": "7:39:11"}
+{"current_steps": 16500, "total_steps": 22296, "loss": 0.0002, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.90440852061189e-06, "epoch": 2.22, "percentage": 74.0, "elapsed_time": "21:25:17", "remaining_time": "7:31:29"}
+{"current_steps": 16600, "total_steps": 22296, "loss": 0.0014, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.64908874504823e-06, "epoch": 2.23, "percentage": 74.45, "elapsed_time": "21:32:59", "remaining_time": "7:23:39"}
+{"current_steps": 16700, "total_steps": 22296, "loss": 0.0008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.3972137450908895e-06, "epoch": 2.25, "percentage": 74.9, "elapsed_time": "21:40:48", "remaining_time": "7:15:53"}
+{"current_steps": 16800, "total_steps": 22296, "loss": 0.0031, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.148833526923606e-06, "epoch": 2.26, "percentage": 75.35, "elapsed_time": "21:48:34", "remaining_time": "7:08:05"}
+{"current_steps": 16900, "total_steps": 22296, "loss": 0.0022, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.9039974028910575e-06, "epoch": 2.27, "percentage": 75.8, "elapsed_time": "21:56:28", "remaining_time": "7:00:20"}
+{"current_steps": 17000, "total_steps": 22296, "loss": 0.0006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.6627539817086775e-06, "epoch": 2.29, "percentage": 76.25, "elapsed_time": "22:04:32", "remaining_time": "6:52:37"}
+{"current_steps": 17100, "total_steps": 22296, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.425151158812067e-06, "epoch": 2.3, "percentage": 76.7, "elapsed_time": "22:12:29", "remaining_time": "6:44:53"}
+{"current_steps": 17200, "total_steps": 22296, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.191236106848014e-06, "epoch": 2.31, "percentage": 77.14, "elapsed_time": "22:20:16", "remaining_time": "6:37:05"}
+{"current_steps": 17300, "total_steps": 22296, "loss": 0.0055, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.961055266309076e-06, "epoch": 2.33, "percentage": 77.59, "elapsed_time": "22:28:04", "remaining_time": "6:29:18"}
+{"current_steps": 17400, "total_steps": 22296, "loss": 0.0006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.73465433631348e-06, "epoch": 2.34, "percentage": 78.04, "elapsed_time": "22:35:52", "remaining_time": "6:21:30"}
+{"current_steps": 17500, "total_steps": 22296, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.512078265532167e-06, "epoch": 2.35, "percentage": 78.49, "elapsed_time": "22:43:35", "remaining_time": "6:13:42"}
+{"current_steps": 17600, "total_steps": 22296, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.293371243264947e-06, "epoch": 2.37, "percentage": 78.94, "elapsed_time": "22:51:20", "remaining_time": "6:05:53"}
+{"current_steps": 17700, "total_steps": 22296, "loss": 0.001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.078576690667289e-06, "epoch": 2.38, "percentage": 79.39, "elapsed_time": "22:59:11", "remaining_time": "5:58:07"}
+{"current_steps": 17800, "total_steps": 22296, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.8677372521297e-06, "epoch": 2.39, "percentage": 79.83, "elapsed_time": "23:06:57", "remaining_time": "5:50:19"}
+{"current_steps": 17900, "total_steps": 22296, "loss": 0.0027, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.660894786811287e-06, "epoch": 2.41, "percentage": 80.28, "elapsed_time": "23:14:48", "remaining_time": "5:42:32"}
+{"current_steps": 18000, "total_steps": 22296, "loss": 0.0027, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.4580903603291895e-06, "epoch": 2.42, "percentage": 80.73, "elapsed_time": "23:22:46", "remaining_time": "5:34:47"}
+{"current_steps": 18100, "total_steps": 22296, "loss": 0.0004, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.259364236605634e-06, "epoch": 2.44, "percentage": 81.18, "elapsed_time": "23:30:28", "remaining_time": "5:26:58"}
+{"current_steps": 18200, "total_steps": 22296, "loss": 0.0008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.064755869874084e-06, "epoch": 2.45, "percentage": 81.63, "elapsed_time": "23:38:11", "remaining_time": "5:19:10"}
+{"current_steps": 18300, "total_steps": 22296, "loss": 0.0011, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.874303896846149e-06, "epoch": 2.46, "percentage": 82.08, "elapsed_time": "23:46:00", "remaining_time": "5:11:23"}
+{"current_steps": 18400, "total_steps": 22296, "loss": 0.0004, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.6880461290408507e-06, "epoch": 2.48, "percentage": 82.53, "elapsed_time": "23:53:39", "remaining_time": "5:03:33"}
+{"current_steps": 18500, "total_steps": 22296, "loss": 0.0015, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.5060195452776557e-06, "epoch": 2.49, "percentage": 82.97, "elapsed_time": "1 day, 0:01:05", "remaining_time": "4:55:41"}
+{"current_steps": 18600, "total_steps": 22296, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.3282602843348943e-06, "epoch": 2.5, "percentage": 83.42, "elapsed_time": "1 day, 0:08:50", "remaining_time": "4:47:53"}
+{"current_steps": 18700, "total_steps": 22296, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.1548036377748863e-06, "epoch": 2.52, "percentage": 83.87, "elapsed_time": "1 day, 0:16:41", "remaining_time": "4:40:07"}
+{"current_steps": 18800, "total_steps": 22296, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.9856840429373144e-06, "epoch": 2.53, "percentage": 84.32, "elapsed_time": "1 day, 0:24:26", "remaining_time": "4:32:19"}
+{"current_steps": 18900, "total_steps": 22296, "loss": 0.0021, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.820935076102205e-06, "epoch": 2.54, "percentage": 84.77, "elapsed_time": "1 day, 0:32:16", "remaining_time": "4:24:32"}
+{"current_steps": 19000, "total_steps": 22296, "loss": 0.0019, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.6621710003546298e-06, "epoch": 2.56, "percentage": 85.22, "elapsed_time": "1 day, 0:39:56", "remaining_time": "4:16:43"}
+{"current_steps": 19100, "total_steps": 22296, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.50621603456484e-06, "epoch": 2.57, "percentage": 85.67, "elapsed_time": "1 day, 0:47:40", "remaining_time": "4:08:55"}
+{"current_steps": 19200, "total_steps": 22296, "loss": 0.0013, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.356219574465643e-06, "epoch": 2.58, "percentage": 86.11, "elapsed_time": "1 day, 0:55:30", "remaining_time": "4:01:09"}
+{"current_steps": 19300, "total_steps": 22296, "loss": 0.0014, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.2091812188310717e-06, "epoch": 2.6, "percentage": 86.56, "elapsed_time": "1 day, 1:03:17", "remaining_time": "3:53:21"}
+{"current_steps": 19400, "total_steps": 22296, "loss": 0.0013, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.0666676547438866e-06, "epoch": 2.61, "percentage": 87.01, "elapsed_time": "1 day, 1:11:13", "remaining_time": "3:45:35"}
+{"current_steps": 19500, "total_steps": 22296, "loss": 0.0007, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.9287071762367076e-06, "epoch": 2.62, "percentage": 87.46, "elapsed_time": "1 day, 1:19:05", "remaining_time": "3:37:48"}
+{"current_steps": 19600, "total_steps": 22296, "loss": 0.0003, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.7953271733920502e-06, "epoch": 2.64, "percentage": 87.91, "elapsed_time": "1 day, 1:26:58", "remaining_time": "3:30:02"}
+{"current_steps": 19700, "total_steps": 22296, "loss": 0.0016, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6665541269044155e-06, "epoch": 2.65, "percentage": 88.36, "elapsed_time": "1 day, 1:34:35", "remaining_time": "3:22:13"}
+{"current_steps": 19800, "total_steps": 22296, "loss": 0.0007, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.5424136028229118e-06, "epoch": 2.66, "percentage": 88.81, "elapsed_time": "1 day, 1:42:32", "remaining_time": "3:14:27"}
+{"current_steps": 19900, "total_steps": 22296, "loss": 0.0027, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.4229302474755013e-06, "epoch": 2.68, "percentage": 89.25, "elapsed_time": "1 day, 1:49:53", "remaining_time": "3:06:36"}
+{"current_steps": 20000, "total_steps": 22296, "loss": 0.0011, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.3081277825757992e-06, "epoch": 2.69, "percentage": 89.7, "elapsed_time": "1 day, 1:58:31", "remaining_time": "2:58:55"}
+{"current_steps": 20100, "total_steps": 22296, "loss": 0.0017, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.1980290005134843e-06, "epoch": 2.7, "percentage": 90.15, "elapsed_time": "1 day, 2:06:24", "remaining_time": "2:51:08"}
+{"current_steps": 20200, "total_steps": 22296, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0926557598292002e-06, "epoch": 2.72, "percentage": 90.6, "elapsed_time": "1 day, 2:14:12", "remaining_time": "2:43:20"}
+{"current_steps": 20300, "total_steps": 22296, "loss": 0.0025, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.92028980874829e-07, "epoch": 2.73, "percentage": 91.05, "elapsed_time": "1 day, 2:22:00", "remaining_time": "2:35:33"}
+{"current_steps": 20400, "total_steps": 22296, "loss": 0.0006, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.961686416600834e-07, "epoch": 2.74, "percentage": 91.5, "elapsed_time": "1 day, 2:29:33", "remaining_time": "2:27:44"}
+{"current_steps": 20500, "total_steps": 22296, "loss": 0.0023, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 8.05093773886137e-07, "epoch": 2.76, "percentage": 91.94, "elapsed_time": "1 day, 2:37:26", "remaining_time": "2:19:57"}
+{"current_steps": 20600, "total_steps": 22296, "loss": 0.0009, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 7.18822459167165e-07, "epoch": 2.77, "percentage": 92.39, "elapsed_time": "1 day, 2:45:04", "remaining_time": "2:12:08"}
+{"current_steps": 20700, "total_steps": 22296, "loss": 0.0005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.373718254404887e-07, "epoch": 2.78, "percentage": 92.84, "elapsed_time": "1 day, 2:52:46", "remaining_time": "2:04:20"}
+{"current_steps": 20800, "total_steps": 22296, "loss": 0.0008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 5.607580435660759e-07, "epoch": 2.8, "percentage": 93.29, "elapsed_time": "1 day, 3:00:23", "remaining_time": "1:56:32"}
+{"current_steps": 20900, "total_steps": 22296, "loss": 0.0005, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.889963241160366e-07, "epoch": 2.81, "percentage": 93.74, "elapsed_time": "1 day, 3:08:09", "remaining_time": "1:48:45"}
+{"current_steps": 21000, "total_steps": 22296, "loss": 0.0009, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.2210091435480324e-07, "epoch": 2.83, "percentage": 94.19, "elapsed_time": "1 day, 3:15:55", "remaining_time": "1:40:57"}
+{"current_steps": 21100, "total_steps": 22296, "loss": 0.0008, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.600850954105062e-07, "epoch": 2.84, "percentage": 94.64, "elapsed_time": "1 day, 3:23:43", "remaining_time": "1:33:10"}
+{"current_steps": 21200, "total_steps": 22296, "loss": 0.0026, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 3.029611796382281e-07, "epoch": 2.85, "percentage": 95.08, "elapsed_time": "1 day, 3:31:31", "remaining_time": "1:25:22"}
+{"current_steps": 21300, "total_steps": 22296, "loss": 0.0, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.507405081755454e-07, "epoch": 2.87, "percentage": 95.53, "elapsed_time": "1 day, 3:39:19", "remaining_time": "1:17:35"}
+{"current_steps": 21400, "total_steps": 22296, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.034334486909012e-07, "epoch": 2.88, "percentage": 95.98, "elapsed_time": "1 day, 3:47:15", "remaining_time": "1:09:48"}
+{"current_steps": 21500, "total_steps": 22296, "loss": 0.0011, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.6104939332527992e-07, "epoch": 2.89, "percentage": 96.43, "elapsed_time": "1 day, 3:55:02", "remaining_time": "1:02:00"}
+{"current_steps": 21600, "total_steps": 22296, "loss": 0.0022, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.2359675682749317e-07, "epoch": 2.91, "percentage": 96.88, "elapsed_time": "1 day, 4:02:32", "remaining_time": "0:54:12"}
+{"current_steps": 21700, "total_steps": 22296, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 9.108297488358564e-08, "epoch": 2.92, "percentage": 97.33, "elapsed_time": "1 day, 4:10:29", "remaining_time": "0:46:25"}
+{"current_steps": 21800, "total_steps": 22296, "loss": 0.0001, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 6.35145026405659e-08, "epoch": 2.93, "percentage": 97.78, "elapsed_time": "1 day, 4:18:29", "remaining_time": "0:38:38"}
+{"current_steps": 21900, "total_steps": 22296, "loss": 0.0012, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 4.0896813424834426e-08, "epoch": 2.95, "percentage": 98.22, "elapsed_time": "1 day, 4:26:11", "remaining_time": "0:30:51"}
+{"current_steps": 22000, "total_steps": 22296, "loss": 0.0026, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.3234397655541584e-08, "epoch": 2.96, "percentage": 98.67, "elapsed_time": "1 day, 4:33:56", "remaining_time": "0:23:03"}
+{"current_steps": 22100, "total_steps": 22296, "loss": 0.0026, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 1.0530761953062019e-08, "epoch": 2.97, "percentage": 99.12, "elapsed_time": "1 day, 4:41:53", "remaining_time": "0:15:16"}
+{"current_steps": 22200, "total_steps": 22296, "loss": 0.0017, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": 2.788428442818125e-09, "epoch": 2.99, "percentage": 99.57, "elapsed_time": "1 day, 4:49:38", "remaining_time": "0:07:28"}
+{"current_steps": 22296, "total_steps": 22296, "loss": null, "eval_loss": null, "predict_loss": null, "reward": null, "learning_rate": null, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "1 day, 4:57:17", "remaining_time": "0:00:00"}
diff --git a/LLM-Detector-V7-11w/trainer_state.json b/LLM-Detector-V7-11w/trainer_state.json
new file mode 100644
index 0000000000000000000000000000000000000000..f7acc7849b517db5c012eda5295b4b8d8984fea3
--- /dev/null
+++ b/LLM-Detector-V7-11w/trainer_state.json
@@ -0,0 +1,1362 @@
+{
+ "best_metric": null,
+ "best_model_checkpoint": null,
+ "epoch": 2.999697285661431,
+ "eval_steps": 500,
+ "global_step": 22296,
+ "is_hyper_param_search": false,
+ "is_local_process_zero": true,
+ "is_world_process_zero": true,
+ "log_history": [
+ {
+ "epoch": 0.01,
+ "learning_rate": 4.9997567688496474e-05,
+ "loss": 1.6283,
+ "step": 100
+ },
+ {
+ "epoch": 0.03,
+ "learning_rate": 4.9990172715142793e-05,
+ "loss": 0.068,
+ "step": 200
+ },
+ {
+ "epoch": 0.04,
+ "learning_rate": 4.997781629993153e-05,
+ "loss": 0.0404,
+ "step": 300
+ },
+ {
+ "epoch": 0.05,
+ "learning_rate": 4.9960500896052476e-05,
+ "loss": 0.0626,
+ "step": 400
+ },
+ {
+ "epoch": 0.07,
+ "learning_rate": 4.993822994123172e-05,
+ "loss": 0.0428,
+ "step": 500
+ },
+ {
+ "epoch": 0.08,
+ "learning_rate": 4.9911007857049264e-05,
+ "loss": 0.0265,
+ "step": 600
+ },
+ {
+ "epoch": 0.09,
+ "learning_rate": 4.987884004806111e-05,
+ "loss": 0.0263,
+ "step": 700
+ },
+ {
+ "epoch": 0.11,
+ "learning_rate": 4.984173290072626e-05,
+ "loss": 0.03,
+ "step": 800
+ },
+ {
+ "epoch": 0.12,
+ "learning_rate": 4.979969378213884e-05,
+ "loss": 0.0253,
+ "step": 900
+ },
+ {
+ "epoch": 0.13,
+ "learning_rate": 4.975273103856537e-05,
+ "loss": 0.038,
+ "step": 1000
+ },
+ {
+ "epoch": 0.15,
+ "learning_rate": 4.970085399378785e-05,
+ "loss": 0.0322,
+ "step": 1100
+ },
+ {
+ "epoch": 0.16,
+ "learning_rate": 4.964407294725254e-05,
+ "loss": 0.0296,
+ "step": 1200
+ },
+ {
+ "epoch": 0.17,
+ "learning_rate": 4.958239917202523e-05,
+ "loss": 0.0295,
+ "step": 1300
+ },
+ {
+ "epoch": 0.19,
+ "learning_rate": 4.9515844912553106e-05,
+ "loss": 0.0171,
+ "step": 1400
+ },
+ {
+ "epoch": 0.2,
+ "learning_rate": 4.944442338223378e-05,
+ "loss": 0.0205,
+ "step": 1500
+ },
+ {
+ "epoch": 0.22,
+ "learning_rate": 4.9368148760792e-05,
+ "loss": 0.029,
+ "step": 1600
+ },
+ {
+ "epoch": 0.23,
+ "learning_rate": 4.9287036191464414e-05,
+ "loss": 0.0172,
+ "step": 1700
+ },
+ {
+ "epoch": 0.24,
+ "learning_rate": 4.9201101777993116e-05,
+ "loss": 0.0235,
+ "step": 1800
+ },
+ {
+ "epoch": 0.26,
+ "learning_rate": 4.911036258142853e-05,
+ "loss": 0.0168,
+ "step": 1900
+ },
+ {
+ "epoch": 0.27,
+ "learning_rate": 4.9014836616742065e-05,
+ "loss": 0.0227,
+ "step": 2000
+ },
+ {
+ "epoch": 0.28,
+ "learning_rate": 4.891454284924965e-05,
+ "loss": 0.0227,
+ "step": 2100
+ },
+ {
+ "epoch": 0.3,
+ "learning_rate": 4.8810575041794e-05,
+ "loss": 0.0173,
+ "step": 2200
+ },
+ {
+ "epoch": 0.31,
+ "learning_rate": 4.8700853511049656e-05,
+ "loss": 0.0227,
+ "step": 2300
+ },
+ {
+ "epoch": 0.32,
+ "learning_rate": 4.858642651436035e-05,
+ "loss": 0.0144,
+ "step": 2400
+ },
+ {
+ "epoch": 0.34,
+ "learning_rate": 4.846731676957191e-05,
+ "loss": 0.03,
+ "step": 2500
+ },
+ {
+ "epoch": 0.35,
+ "learning_rate": 4.834354792422293e-05,
+ "loss": 0.0189,
+ "step": 2600
+ },
+ {
+ "epoch": 0.36,
+ "learning_rate": 4.821514455084985e-05,
+ "loss": 0.0111,
+ "step": 2700
+ },
+ {
+ "epoch": 0.38,
+ "learning_rate": 4.8082132142108465e-05,
+ "loss": 0.0152,
+ "step": 2800
+ },
+ {
+ "epoch": 0.39,
+ "learning_rate": 4.794453710571272e-05,
+ "loss": 0.0182,
+ "step": 2900
+ },
+ {
+ "epoch": 0.4,
+ "learning_rate": 4.780238675919182e-05,
+ "loss": 0.0111,
+ "step": 3000
+ },
+ {
+ "epoch": 0.42,
+ "learning_rate": 4.765570932446672e-05,
+ "loss": 0.0138,
+ "step": 3100
+ },
+ {
+ "epoch": 0.43,
+ "learning_rate": 4.75045339222471e-05,
+ "loss": 0.0256,
+ "step": 3200
+ },
+ {
+ "epoch": 0.44,
+ "learning_rate": 4.734889056624983e-05,
+ "loss": 0.0199,
+ "step": 3300
+ },
+ {
+ "epoch": 0.46,
+ "learning_rate": 4.718881015724017e-05,
+ "loss": 0.0159,
+ "step": 3400
+ },
+ {
+ "epoch": 0.47,
+ "learning_rate": 4.702432447689692e-05,
+ "loss": 0.0121,
+ "step": 3500
+ },
+ {
+ "epoch": 0.48,
+ "learning_rate": 4.6855466181502544e-05,
+ "loss": 0.0181,
+ "step": 3600
+ },
+ {
+ "epoch": 0.5,
+ "learning_rate": 4.66822687954598e-05,
+ "loss": 0.0148,
+ "step": 3700
+ },
+ {
+ "epoch": 0.51,
+ "learning_rate": 4.65047667046359e-05,
+ "loss": 0.0152,
+ "step": 3800
+ },
+ {
+ "epoch": 0.52,
+ "learning_rate": 4.632299514953571e-05,
+ "loss": 0.0103,
+ "step": 3900
+ },
+ {
+ "epoch": 0.54,
+ "learning_rate": 4.613699021830524e-05,
+ "loss": 0.0217,
+ "step": 4000
+ },
+ {
+ "epoch": 0.55,
+ "learning_rate": 4.59467888395669e-05,
+ "loss": 0.0142,
+ "step": 4100
+ },
+ {
+ "epoch": 0.57,
+ "learning_rate": 4.575242877508777e-05,
+ "loss": 0.0151,
+ "step": 4200
+ },
+ {
+ "epoch": 0.58,
+ "learning_rate": 4.5553948612282607e-05,
+ "loss": 0.0164,
+ "step": 4300
+ },
+ {
+ "epoch": 0.59,
+ "learning_rate": 4.5353433433160075e-05,
+ "loss": 0.0095,
+ "step": 4400
+ },
+ {
+ "epoch": 0.61,
+ "learning_rate": 4.514687230313633e-05,
+ "loss": 0.015,
+ "step": 4500
+ },
+ {
+ "epoch": 0.62,
+ "learning_rate": 4.49363112993749e-05,
+ "loss": 0.015,
+ "step": 4600
+ },
+ {
+ "epoch": 0.63,
+ "learning_rate": 4.472179222575561e-05,
+ "loss": 0.0181,
+ "step": 4700
+ },
+ {
+ "epoch": 0.65,
+ "learning_rate": 4.4503357671976574e-05,
+ "loss": 0.0175,
+ "step": 4800
+ },
+ {
+ "epoch": 0.66,
+ "learning_rate": 4.428105100509852e-05,
+ "loss": 0.0069,
+ "step": 4900
+ },
+ {
+ "epoch": 0.67,
+ "learning_rate": 4.4054916360934957e-05,
+ "loss": 0.0108,
+ "step": 5000
+ },
+ {
+ "epoch": 0.69,
+ "learning_rate": 4.3824998635289594e-05,
+ "loss": 0.0204,
+ "step": 5100
+ },
+ {
+ "epoch": 0.7,
+ "learning_rate": 4.3591343475042946e-05,
+ "loss": 0.0192,
+ "step": 5200
+ },
+ {
+ "epoch": 0.71,
+ "learning_rate": 4.3353997269089774e-05,
+ "loss": 0.0086,
+ "step": 5300
+ },
+ {
+ "epoch": 0.73,
+ "learning_rate": 4.311300713912926e-05,
+ "loss": 0.0179,
+ "step": 5400
+ },
+ {
+ "epoch": 0.74,
+ "learning_rate": 4.286842093030963e-05,
+ "loss": 0.0142,
+ "step": 5500
+ },
+ {
+ "epoch": 0.75,
+ "learning_rate": 4.262028720172921e-05,
+ "loss": 0.0187,
+ "step": 5600
+ },
+ {
+ "epoch": 0.77,
+ "learning_rate": 4.2368655216795735e-05,
+ "loss": 0.0176,
+ "step": 5700
+ },
+ {
+ "epoch": 0.78,
+ "learning_rate": 4.211357493344573e-05,
+ "loss": 0.0145,
+ "step": 5800
+ },
+ {
+ "epoch": 0.79,
+ "learning_rate": 4.185509699422615e-05,
+ "loss": 0.0087,
+ "step": 5900
+ },
+ {
+ "epoch": 0.81,
+ "learning_rate": 4.1593272716239985e-05,
+ "loss": 0.0136,
+ "step": 6000
+ },
+ {
+ "epoch": 0.82,
+ "learning_rate": 4.132815408095797e-05,
+ "loss": 0.0137,
+ "step": 6100
+ },
+ {
+ "epoch": 0.83,
+ "learning_rate": 4.105979372389847e-05,
+ "loss": 0.011,
+ "step": 6200
+ },
+ {
+ "epoch": 0.85,
+ "learning_rate": 4.0788244924177365e-05,
+ "loss": 0.024,
+ "step": 6300
+ },
+ {
+ "epoch": 0.86,
+ "learning_rate": 4.0513561593930325e-05,
+ "loss": 0.0067,
+ "step": 6400
+ },
+ {
+ "epoch": 0.87,
+ "learning_rate": 4.023579826760924e-05,
+ "loss": 0.0102,
+ "step": 6500
+ },
+ {
+ "epoch": 0.89,
+ "learning_rate": 3.995501009115527e-05,
+ "loss": 0.013,
+ "step": 6600
+ },
+ {
+ "epoch": 0.9,
+ "learning_rate": 3.9674104896418544e-05,
+ "loss": 0.0133,
+ "step": 6700
+ },
+ {
+ "epoch": 0.91,
+ "learning_rate": 3.9387463695523555e-05,
+ "loss": 0.0087,
+ "step": 6800
+ },
+ {
+ "epoch": 0.93,
+ "learning_rate": 3.9097966069208193e-05,
+ "loss": 0.01,
+ "step": 6900
+ },
+ {
+ "epoch": 0.94,
+ "learning_rate": 3.880566949309125e-05,
+ "loss": 0.0113,
+ "step": 7000
+ },
+ {
+ "epoch": 0.96,
+ "learning_rate": 3.8513595749088484e-05,
+ "loss": 0.0101,
+ "step": 7100
+ },
+ {
+ "epoch": 0.97,
+ "learning_rate": 3.821590244319273e-05,
+ "loss": 0.0092,
+ "step": 7200
+ },
+ {
+ "epoch": 0.98,
+ "learning_rate": 3.791558530862982e-05,
+ "loss": 0.0176,
+ "step": 7300
+ },
+ {
+ "epoch": 1.0,
+ "learning_rate": 3.76127039690774e-05,
+ "loss": 0.0114,
+ "step": 7400
+ },
+ {
+ "epoch": 1.01,
+ "learning_rate": 3.7307318557299355e-05,
+ "loss": 0.0083,
+ "step": 7500
+ },
+ {
+ "epoch": 1.02,
+ "learning_rate": 3.699948970320738e-05,
+ "loss": 0.0067,
+ "step": 7600
+ },
+ {
+ "epoch": 1.04,
+ "learning_rate": 3.668927852182374e-05,
+ "loss": 0.0021,
+ "step": 7700
+ },
+ {
+ "epoch": 1.05,
+ "learning_rate": 3.637674660114774e-05,
+ "loss": 0.0087,
+ "step": 7800
+ },
+ {
+ "epoch": 1.06,
+ "learning_rate": 3.606195598992832e-05,
+ "loss": 0.005,
+ "step": 7900
+ },
+ {
+ "epoch": 1.08,
+ "learning_rate": 3.574496918534519e-05,
+ "loss": 0.014,
+ "step": 8000
+ },
+ {
+ "epoch": 1.09,
+ "learning_rate": 3.542584912060087e-05,
+ "loss": 0.0073,
+ "step": 8100
+ },
+ {
+ "epoch": 1.1,
+ "learning_rate": 3.510465915242623e-05,
+ "loss": 0.0073,
+ "step": 8200
+ },
+ {
+ "epoch": 1.12,
+ "learning_rate": 3.478146304850188e-05,
+ "loss": 0.0106,
+ "step": 8300
+ },
+ {
+ "epoch": 1.13,
+ "learning_rate": 3.4456324974798025e-05,
+ "loss": 0.0106,
+ "step": 8400
+ },
+ {
+ "epoch": 1.14,
+ "learning_rate": 3.412930948283528e-05,
+ "loss": 0.0036,
+ "step": 8500
+ },
+ {
+ "epoch": 1.16,
+ "learning_rate": 3.380048149686877e-05,
+ "loss": 0.0051,
+ "step": 8600
+ },
+ {
+ "epoch": 1.17,
+ "learning_rate": 3.3469906300998476e-05,
+ "loss": 0.0047,
+ "step": 8700
+ },
+ {
+ "epoch": 1.18,
+ "learning_rate": 3.313764952620792e-05,
+ "loss": 0.0035,
+ "step": 8800
+ },
+ {
+ "epoch": 1.2,
+ "learning_rate": 3.280377713733412e-05,
+ "loss": 0.0094,
+ "step": 8900
+ },
+ {
+ "epoch": 1.21,
+ "learning_rate": 3.246835541997116e-05,
+ "loss": 0.0077,
+ "step": 9000
+ },
+ {
+ "epoch": 1.22,
+ "learning_rate": 3.213145096731021e-05,
+ "loss": 0.0076,
+ "step": 9100
+ },
+ {
+ "epoch": 1.24,
+ "learning_rate": 3.1793130666918324e-05,
+ "loss": 0.0074,
+ "step": 9200
+ },
+ {
+ "epoch": 1.25,
+ "learning_rate": 3.145346168745891e-05,
+ "loss": 0.0072,
+ "step": 9300
+ },
+ {
+ "epoch": 1.26,
+ "learning_rate": 3.111251146535634e-05,
+ "loss": 0.0043,
+ "step": 9400
+ },
+ {
+ "epoch": 1.28,
+ "learning_rate": 3.077377511336779e-05,
+ "loss": 0.0071,
+ "step": 9500
+ },
+ {
+ "epoch": 1.29,
+ "learning_rate": 3.043047683848379e-05,
+ "loss": 0.0102,
+ "step": 9600
+ },
+ {
+ "epoch": 1.31,
+ "learning_rate": 3.0086100419987084e-05,
+ "loss": 0.005,
+ "step": 9700
+ },
+ {
+ "epoch": 1.32,
+ "learning_rate": 2.974071422889685e-05,
+ "loss": 0.0155,
+ "step": 9800
+ },
+ {
+ "epoch": 1.33,
+ "learning_rate": 2.9394386836708154e-05,
+ "loss": 0.0086,
+ "step": 9900
+ },
+ {
+ "epoch": 1.35,
+ "learning_rate": 2.9047187001778132e-05,
+ "loss": 0.0048,
+ "step": 10000
+ },
+ {
+ "epoch": 1.36,
+ "learning_rate": 2.8699183655674938e-05,
+ "loss": 0.0085,
+ "step": 10100
+ },
+ {
+ "epoch": 1.37,
+ "learning_rate": 2.835044588949241e-05,
+ "loss": 0.0091,
+ "step": 10200
+ },
+ {
+ "epoch": 1.39,
+ "learning_rate": 2.8001042940132995e-05,
+ "loss": 0.0089,
+ "step": 10300
+ },
+ {
+ "epoch": 1.4,
+ "learning_rate": 2.7651044176561748e-05,
+ "loss": 0.0093,
+ "step": 10400
+ },
+ {
+ "epoch": 1.41,
+ "learning_rate": 2.7300519086034166e-05,
+ "loss": 0.0073,
+ "step": 10500
+ },
+ {
+ "epoch": 1.43,
+ "learning_rate": 2.6949537260300395e-05,
+ "loss": 0.0029,
+ "step": 10600
+ },
+ {
+ "epoch": 1.44,
+ "learning_rate": 2.6598168381788864e-05,
+ "loss": 0.0027,
+ "step": 10700
+ },
+ {
+ "epoch": 1.45,
+ "learning_rate": 2.6246482209771783e-05,
+ "loss": 0.011,
+ "step": 10800
+ },
+ {
+ "epoch": 1.47,
+ "learning_rate": 2.5894548566515485e-05,
+ "loss": 0.0108,
+ "step": 10900
+ },
+ {
+ "epoch": 1.48,
+ "learning_rate": 2.554243732341816e-05,
+ "loss": 0.0033,
+ "step": 11000
+ },
+ {
+ "epoch": 1.49,
+ "learning_rate": 2.519021838713791e-05,
+ "loss": 0.0048,
+ "step": 11100
+ },
+ {
+ "epoch": 1.51,
+ "learning_rate": 2.483796168571378e-05,
+ "loss": 0.0015,
+ "step": 11200
+ },
+ {
+ "epoch": 1.52,
+ "learning_rate": 2.4485737154682554e-05,
+ "loss": 0.0053,
+ "step": 11300
+ },
+ {
+ "epoch": 1.53,
+ "learning_rate": 2.4133614723194047e-05,
+ "loss": 0.0043,
+ "step": 11400
+ },
+ {
+ "epoch": 1.55,
+ "learning_rate": 2.378166430012766e-05,
+ "loss": 0.0086,
+ "step": 11500
+ },
+ {
+ "epoch": 1.56,
+ "learning_rate": 2.3429955760212966e-05,
+ "loss": 0.0077,
+ "step": 11600
+ },
+ {
+ "epoch": 1.57,
+ "learning_rate": 2.307855893015712e-05,
+ "loss": 0.0066,
+ "step": 11700
+ },
+ {
+ "epoch": 1.59,
+ "learning_rate": 2.272754357478172e-05,
+ "loss": 0.0055,
+ "step": 11800
+ },
+ {
+ "epoch": 1.6,
+ "learning_rate": 2.237697938317202e-05,
+ "loss": 0.0067,
+ "step": 11900
+ },
+ {
+ "epoch": 1.61,
+ "learning_rate": 2.202693595484113e-05,
+ "loss": 0.0082,
+ "step": 12000
+ },
+ {
+ "epoch": 1.63,
+ "learning_rate": 2.1680974167907623e-05,
+ "loss": 0.0101,
+ "step": 12100
+ },
+ {
+ "epoch": 1.64,
+ "learning_rate": 2.1332173697925385e-05,
+ "loss": 0.0019,
+ "step": 12200
+ },
+ {
+ "epoch": 1.65,
+ "learning_rate": 2.09841014224678e-05,
+ "loss": 0.0106,
+ "step": 12300
+ },
+ {
+ "epoch": 1.67,
+ "learning_rate": 2.063682644631365e-05,
+ "loss": 0.0051,
+ "step": 12400
+ },
+ {
+ "epoch": 1.68,
+ "learning_rate": 2.0290417715949304e-05,
+ "loss": 0.0076,
+ "step": 12500
+ },
+ {
+ "epoch": 1.7,
+ "learning_rate": 1.994494400588042e-05,
+ "loss": 0.0037,
+ "step": 12600
+ },
+ {
+ "epoch": 1.71,
+ "learning_rate": 1.9600473904977725e-05,
+ "loss": 0.0073,
+ "step": 12700
+ },
+ {
+ "epoch": 1.72,
+ "learning_rate": 1.9257075802859685e-05,
+ "loss": 0.0052,
+ "step": 12800
+ },
+ {
+ "epoch": 1.74,
+ "learning_rate": 1.89148178763147e-05,
+ "loss": 0.0038,
+ "step": 12900
+ },
+ {
+ "epoch": 1.75,
+ "learning_rate": 1.857376807576563e-05,
+ "loss": 0.0044,
+ "step": 13000
+ },
+ {
+ "epoch": 1.76,
+ "learning_rate": 1.8233994111779146e-05,
+ "loss": 0.0031,
+ "step": 13100
+ },
+ {
+ "epoch": 1.78,
+ "learning_rate": 1.7895563441622803e-05,
+ "loss": 0.0067,
+ "step": 13200
+ },
+ {
+ "epoch": 1.79,
+ "learning_rate": 1.7558543255872352e-05,
+ "loss": 0.0096,
+ "step": 13300
+ },
+ {
+ "epoch": 1.8,
+ "learning_rate": 1.7223000465072e-05,
+ "loss": 0.0063,
+ "step": 13400
+ },
+ {
+ "epoch": 1.82,
+ "learning_rate": 1.68890016864503e-05,
+ "loss": 0.0064,
+ "step": 13500
+ },
+ {
+ "epoch": 1.83,
+ "learning_rate": 1.6556613230694243e-05,
+ "loss": 0.0047,
+ "step": 13600
+ },
+ {
+ "epoch": 1.84,
+ "learning_rate": 1.6225901088784207e-05,
+ "loss": 0.0023,
+ "step": 13700
+ },
+ {
+ "epoch": 1.86,
+ "learning_rate": 1.58969309188924e-05,
+ "loss": 0.0064,
+ "step": 13800
+ },
+ {
+ "epoch": 1.87,
+ "learning_rate": 1.556976803334737e-05,
+ "loss": 0.004,
+ "step": 13900
+ },
+ {
+ "epoch": 1.88,
+ "learning_rate": 1.5244477385667167e-05,
+ "loss": 0.0016,
+ "step": 14000
+ },
+ {
+ "epoch": 1.9,
+ "learning_rate": 1.4921123557663763e-05,
+ "loss": 0.008,
+ "step": 14100
+ },
+ {
+ "epoch": 1.91,
+ "learning_rate": 1.4599770746621238e-05,
+ "loss": 0.0047,
+ "step": 14200
+ },
+ {
+ "epoch": 1.92,
+ "learning_rate": 1.4280482752550327e-05,
+ "loss": 0.0056,
+ "step": 14300
+ },
+ {
+ "epoch": 1.94,
+ "learning_rate": 1.396332296552188e-05,
+ "loss": 0.0065,
+ "step": 14400
+ },
+ {
+ "epoch": 1.95,
+ "learning_rate": 1.3648354353081588e-05,
+ "loss": 0.004,
+ "step": 14500
+ },
+ {
+ "epoch": 1.96,
+ "learning_rate": 1.333563944774876e-05,
+ "loss": 0.0065,
+ "step": 14600
+ },
+ {
+ "epoch": 1.98,
+ "learning_rate": 1.3028332659827522e-05,
+ "loss": 0.0026,
+ "step": 14700
+ },
+ {
+ "epoch": 1.99,
+ "learning_rate": 1.2720286886495473e-05,
+ "loss": 0.008,
+ "step": 14800
+ },
+ {
+ "epoch": 2.0,
+ "learning_rate": 1.2414679074810755e-05,
+ "loss": 0.0053,
+ "step": 14900
+ },
+ {
+ "epoch": 2.02,
+ "learning_rate": 1.211156989883942e-05,
+ "loss": 0.0024,
+ "step": 15000
+ },
+ {
+ "epoch": 2.03,
+ "learning_rate": 1.1811019536579015e-05,
+ "loss": 0.0014,
+ "step": 15100
+ },
+ {
+ "epoch": 2.05,
+ "learning_rate": 1.1513087658011207e-05,
+ "loss": 0.0005,
+ "step": 15200
+ },
+ {
+ "epoch": 2.06,
+ "learning_rate": 1.1217833413255133e-05,
+ "loss": 0.0002,
+ "step": 15300
+ },
+ {
+ "epoch": 2.07,
+ "learning_rate": 1.0925315420823949e-05,
+ "loss": 0.0023,
+ "step": 15400
+ },
+ {
+ "epoch": 2.09,
+ "learning_rate": 1.063559175598702e-05,
+ "loss": 0.0003,
+ "step": 15500
+ },
+ {
+ "epoch": 2.1,
+ "learning_rate": 1.0348719939239885e-05,
+ "loss": 0.0009,
+ "step": 15600
+ },
+ {
+ "epoch": 2.11,
+ "learning_rate": 1.0064756924884342e-05,
+ "loss": 0.0059,
+ "step": 15700
+ },
+ {
+ "epoch": 2.13,
+ "learning_rate": 9.783759089721054e-06,
+ "loss": 0.0005,
+ "step": 15800
+ },
+ {
+ "epoch": 2.14,
+ "learning_rate": 9.505782221856696e-06,
+ "loss": 0.0009,
+ "step": 15900
+ },
+ {
+ "epoch": 2.15,
+ "learning_rate": 9.230881509628037e-06,
+ "loss": 0.0013,
+ "step": 16000
+ },
+ {
+ "epoch": 2.17,
+ "learning_rate": 8.959111530645085e-06,
+ "loss": 0.0017,
+ "step": 16100
+ },
+ {
+ "epoch": 2.18,
+ "learning_rate": 8.690526240955454e-06,
+ "loss": 0.0025,
+ "step": 16200
+ },
+ {
+ "epoch": 2.19,
+ "learning_rate": 8.425178964332225e-06,
+ "loss": 0.005,
+ "step": 16300
+ },
+ {
+ "epoch": 2.21,
+ "learning_rate": 8.163122381687191e-06,
+ "loss": 0.0039,
+ "step": 16400
+ },
+ {
+ "epoch": 2.22,
+ "learning_rate": 7.90440852061189e-06,
+ "loss": 0.0002,
+ "step": 16500
+ },
+ {
+ "epoch": 2.23,
+ "learning_rate": 7.64908874504823e-06,
+ "loss": 0.0014,
+ "step": 16600
+ },
+ {
+ "epoch": 2.25,
+ "learning_rate": 7.3972137450908895e-06,
+ "loss": 0.0008,
+ "step": 16700
+ },
+ {
+ "epoch": 2.26,
+ "learning_rate": 7.148833526923606e-06,
+ "loss": 0.0031,
+ "step": 16800
+ },
+ {
+ "epoch": 2.27,
+ "learning_rate": 6.9039974028910575e-06,
+ "loss": 0.0022,
+ "step": 16900
+ },
+ {
+ "epoch": 2.29,
+ "learning_rate": 6.6627539817086775e-06,
+ "loss": 0.0006,
+ "step": 17000
+ },
+ {
+ "epoch": 2.3,
+ "learning_rate": 6.425151158812067e-06,
+ "loss": 0.0001,
+ "step": 17100
+ },
+ {
+ "epoch": 2.31,
+ "learning_rate": 6.191236106848014e-06,
+ "loss": 0.0001,
+ "step": 17200
+ },
+ {
+ "epoch": 2.33,
+ "learning_rate": 5.961055266309076e-06,
+ "loss": 0.0055,
+ "step": 17300
+ },
+ {
+ "epoch": 2.34,
+ "learning_rate": 5.73465433631348e-06,
+ "loss": 0.0006,
+ "step": 17400
+ },
+ {
+ "epoch": 2.35,
+ "learning_rate": 5.512078265532167e-06,
+ "loss": 0.0001,
+ "step": 17500
+ },
+ {
+ "epoch": 2.37,
+ "learning_rate": 5.293371243264947e-06,
+ "loss": 0.0,
+ "step": 17600
+ },
+ {
+ "epoch": 2.38,
+ "learning_rate": 5.078576690667289e-06,
+ "loss": 0.001,
+ "step": 17700
+ },
+ {
+ "epoch": 2.39,
+ "learning_rate": 4.8677372521297e-06,
+ "loss": 0.0003,
+ "step": 17800
+ },
+ {
+ "epoch": 2.41,
+ "learning_rate": 4.660894786811287e-06,
+ "loss": 0.0027,
+ "step": 17900
+ },
+ {
+ "epoch": 2.42,
+ "learning_rate": 4.4580903603291895e-06,
+ "loss": 0.0027,
+ "step": 18000
+ },
+ {
+ "epoch": 2.44,
+ "learning_rate": 4.259364236605634e-06,
+ "loss": 0.0004,
+ "step": 18100
+ },
+ {
+ "epoch": 2.45,
+ "learning_rate": 4.064755869874084e-06,
+ "loss": 0.0008,
+ "step": 18200
+ },
+ {
+ "epoch": 2.46,
+ "learning_rate": 3.874303896846149e-06,
+ "loss": 0.0011,
+ "step": 18300
+ },
+ {
+ "epoch": 2.48,
+ "learning_rate": 3.6880461290408507e-06,
+ "loss": 0.0004,
+ "step": 18400
+ },
+ {
+ "epoch": 2.49,
+ "learning_rate": 3.5060195452776557e-06,
+ "loss": 0.0015,
+ "step": 18500
+ },
+ {
+ "epoch": 2.5,
+ "learning_rate": 3.3282602843348943e-06,
+ "loss": 0.0003,
+ "step": 18600
+ },
+ {
+ "epoch": 2.52,
+ "learning_rate": 3.1548036377748863e-06,
+ "loss": 0.0001,
+ "step": 18700
+ },
+ {
+ "epoch": 2.53,
+ "learning_rate": 2.9856840429373144e-06,
+ "loss": 0.0,
+ "step": 18800
+ },
+ {
+ "epoch": 2.54,
+ "learning_rate": 2.820935076102205e-06,
+ "loss": 0.0021,
+ "step": 18900
+ },
+ {
+ "epoch": 2.56,
+ "learning_rate": 2.6621710003546298e-06,
+ "loss": 0.0019,
+ "step": 19000
+ },
+ {
+ "epoch": 2.57,
+ "learning_rate": 2.50621603456484e-06,
+ "loss": 0.0,
+ "step": 19100
+ },
+ {
+ "epoch": 2.58,
+ "learning_rate": 2.356219574465643e-06,
+ "loss": 0.0013,
+ "step": 19200
+ },
+ {
+ "epoch": 2.6,
+ "learning_rate": 2.2091812188310717e-06,
+ "loss": 0.0014,
+ "step": 19300
+ },
+ {
+ "epoch": 2.61,
+ "learning_rate": 2.0666676547438866e-06,
+ "loss": 0.0013,
+ "step": 19400
+ },
+ {
+ "epoch": 2.62,
+ "learning_rate": 1.9287071762367076e-06,
+ "loss": 0.0007,
+ "step": 19500
+ },
+ {
+ "epoch": 2.64,
+ "learning_rate": 1.7953271733920502e-06,
+ "loss": 0.0003,
+ "step": 19600
+ },
+ {
+ "epoch": 2.65,
+ "learning_rate": 1.6665541269044155e-06,
+ "loss": 0.0016,
+ "step": 19700
+ },
+ {
+ "epoch": 2.66,
+ "learning_rate": 1.5424136028229118e-06,
+ "loss": 0.0007,
+ "step": 19800
+ },
+ {
+ "epoch": 2.68,
+ "learning_rate": 1.4229302474755013e-06,
+ "loss": 0.0027,
+ "step": 19900
+ },
+ {
+ "epoch": 2.69,
+ "learning_rate": 1.3081277825757992e-06,
+ "loss": 0.0011,
+ "step": 20000
+ },
+ {
+ "epoch": 2.7,
+ "learning_rate": 1.1980290005134843e-06,
+ "loss": 0.0017,
+ "step": 20100
+ },
+ {
+ "epoch": 2.72,
+ "learning_rate": 1.0926557598292002e-06,
+ "loss": 0.0001,
+ "step": 20200
+ },
+ {
+ "epoch": 2.73,
+ "learning_rate": 9.92028980874829e-07,
+ "loss": 0.0025,
+ "step": 20300
+ },
+ {
+ "epoch": 2.74,
+ "learning_rate": 8.961686416600834e-07,
+ "loss": 0.0006,
+ "step": 20400
+ },
+ {
+ "epoch": 2.76,
+ "learning_rate": 8.05093773886137e-07,
+ "loss": 0.0023,
+ "step": 20500
+ },
+ {
+ "epoch": 2.77,
+ "learning_rate": 7.18822459167165e-07,
+ "loss": 0.0009,
+ "step": 20600
+ },
+ {
+ "epoch": 2.78,
+ "learning_rate": 6.373718254404887e-07,
+ "loss": 0.0005,
+ "step": 20700
+ },
+ {
+ "epoch": 2.8,
+ "learning_rate": 5.607580435660759e-07,
+ "loss": 0.0008,
+ "step": 20800
+ },
+ {
+ "epoch": 2.81,
+ "learning_rate": 4.889963241160366e-07,
+ "loss": 0.0005,
+ "step": 20900
+ },
+ {
+ "epoch": 2.83,
+ "learning_rate": 4.2210091435480324e-07,
+ "loss": 0.0009,
+ "step": 21000
+ },
+ {
+ "epoch": 2.84,
+ "learning_rate": 3.600850954105062e-07,
+ "loss": 0.0008,
+ "step": 21100
+ },
+ {
+ "epoch": 2.85,
+ "learning_rate": 3.029611796382281e-07,
+ "loss": 0.0026,
+ "step": 21200
+ },
+ {
+ "epoch": 2.87,
+ "learning_rate": 2.507405081755454e-07,
+ "loss": 0.0,
+ "step": 21300
+ },
+ {
+ "epoch": 2.88,
+ "learning_rate": 2.034334486909012e-07,
+ "loss": 0.0001,
+ "step": 21400
+ },
+ {
+ "epoch": 2.89,
+ "learning_rate": 1.6104939332527992e-07,
+ "loss": 0.0011,
+ "step": 21500
+ },
+ {
+ "epoch": 2.91,
+ "learning_rate": 1.2359675682749317e-07,
+ "loss": 0.0022,
+ "step": 21600
+ },
+ {
+ "epoch": 2.92,
+ "learning_rate": 9.108297488358564e-08,
+ "loss": 0.0001,
+ "step": 21700
+ },
+ {
+ "epoch": 2.93,
+ "learning_rate": 6.35145026405659e-08,
+ "loss": 0.0001,
+ "step": 21800
+ },
+ {
+ "epoch": 2.95,
+ "learning_rate": 4.0896813424834426e-08,
+ "loss": 0.0012,
+ "step": 21900
+ },
+ {
+ "epoch": 2.96,
+ "learning_rate": 2.3234397655541584e-08,
+ "loss": 0.0026,
+ "step": 22000
+ },
+ {
+ "epoch": 2.97,
+ "learning_rate": 1.0530761953062019e-08,
+ "loss": 0.0026,
+ "step": 22100
+ },
+ {
+ "epoch": 2.99,
+ "learning_rate": 2.788428442818125e-09,
+ "loss": 0.0017,
+ "step": 22200
+ },
+ {
+ "epoch": 3.0,
+ "step": 22296,
+ "total_flos": 6.236837368994857e+18,
+ "train_loss": 0.016247458042738187,
+ "train_runtime": 104237.6159,
+ "train_samples_per_second": 3.423,
+ "train_steps_per_second": 0.214
+ }
+ ],
+ "logging_steps": 100,
+ "max_steps": 22296,
+ "num_input_tokens_seen": 0,
+ "num_train_epochs": 3,
+ "save_steps": 3000,
+ "total_flos": 6.236837368994857e+18,
+ "train_batch_size": 4,
+ "trial_name": null,
+ "trial_params": null
+}
diff --git a/LLM-Detector-V7-11w/training_args.bin b/LLM-Detector-V7-11w/training_args.bin
new file mode 100644
index 0000000000000000000000000000000000000000..ac97b1c26d12cbfaab826f907d39398233627e61
--- /dev/null
+++ b/LLM-Detector-V7-11w/training_args.bin
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:fda912c16bf2c6e8ac3158b354c96cabf8fd0eff786de104f90c23c31741ef25
+size 4856
diff --git a/LLM-Detector-V7-11w/training_loss.png b/LLM-Detector-V7-11w/training_loss.png
new file mode 100644
index 0000000000000000000000000000000000000000..19739febfc3dc55973ee32b1a9a3c0804ef68fff
Binary files /dev/null and b/LLM-Detector-V7-11w/training_loss.png differ