bombaygamercc
commited on
Commit
•
c50ab00
1
Parent(s):
6425c2c
Upload 13 files
Browse files- README.md +194 -3
- config.json +25 -0
- generation_config.json +6 -0
- gitattributes +35 -0
- model-00001-of-00003.safetensors +3 -0
- model.py +318 -0
- model.safetensors.index.json +298 -0
- params.json +11 -0
- special_tokens_map.json +23 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer.model.v3 +0 -0
- tokenizer_config.json +0 -0
README.md
CHANGED
@@ -1,3 +1,194 @@
|
|
1 |
-
---
|
2 |
-
license:
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
base_model: mistralai/Mistral-7B-v0.3
|
4 |
+
extra_gated_description: If you want to learn more about how we process your personal data, please read our <a href="https://mistral.ai/terms/">Privacy Policy</a>.
|
5 |
+
---
|
6 |
+
|
7 |
+
# Model Card for Mistral-7B-Instruct-v0.3
|
8 |
+
|
9 |
+
The Mistral-7B-Instruct-v0.3 Large Language Model (LLM) is an instruct fine-tuned version of the Mistral-7B-v0.3.
|
10 |
+
|
11 |
+
Mistral-7B-v0.3 has the following changes compared to [Mistral-7B-v0.2](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2/edit/main/README.md)
|
12 |
+
- Extended vocabulary to 32768
|
13 |
+
- Supports v3 Tokenizer
|
14 |
+
- Supports function calling
|
15 |
+
|
16 |
+
## Installation
|
17 |
+
|
18 |
+
It is recommended to use `mistralai/Mistral-7B-Instruct-v0.3` with [mistral-inference](https://github.com/mistralai/mistral-inference). For HF transformers code snippets, please keep scrolling.
|
19 |
+
|
20 |
+
```
|
21 |
+
pip install mistral_inference
|
22 |
+
```
|
23 |
+
|
24 |
+
## Download
|
25 |
+
|
26 |
+
```py
|
27 |
+
from huggingface_hub import snapshot_download
|
28 |
+
from pathlib import Path
|
29 |
+
|
30 |
+
mistral_models_path = Path.home().joinpath('mistral_models', '7B-Instruct-v0.3')
|
31 |
+
mistral_models_path.mkdir(parents=True, exist_ok=True)
|
32 |
+
|
33 |
+
snapshot_download(repo_id="mistralai/Mistral-7B-Instruct-v0.3", allow_patterns=["params.json", "consolidated.safetensors", "tokenizer.model.v3"], local_dir=mistral_models_path)
|
34 |
+
```
|
35 |
+
|
36 |
+
### Chat
|
37 |
+
|
38 |
+
After installing `mistral_inference`, a `mistral-chat` CLI command should be available in your environment. You can chat with the model using
|
39 |
+
|
40 |
+
```
|
41 |
+
mistral-chat $HOME/mistral_models/7B-Instruct-v0.3 --instruct --max_tokens 256
|
42 |
+
```
|
43 |
+
|
44 |
+
### Instruct following
|
45 |
+
|
46 |
+
```py
|
47 |
+
from mistral_inference.transformer import Transformer
|
48 |
+
from mistral_inference.generate import generate
|
49 |
+
|
50 |
+
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
|
51 |
+
from mistral_common.protocol.instruct.messages import UserMessage
|
52 |
+
from mistral_common.protocol.instruct.request import ChatCompletionRequest
|
53 |
+
|
54 |
+
|
55 |
+
tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tokenizer.model.v3")
|
56 |
+
model = Transformer.from_folder(mistral_models_path)
|
57 |
+
|
58 |
+
completion_request = ChatCompletionRequest(messages=[UserMessage(content="Explain Machine Learning to me in a nutshell.")])
|
59 |
+
|
60 |
+
tokens = tokenizer.encode_chat_completion(completion_request).tokens
|
61 |
+
|
62 |
+
out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
|
63 |
+
result = tokenizer.instruct_tokenizer.tokenizer.decode(out_tokens[0])
|
64 |
+
|
65 |
+
print(result)
|
66 |
+
```
|
67 |
+
|
68 |
+
### Function calling
|
69 |
+
|
70 |
+
```py
|
71 |
+
from mistral_common.protocol.instruct.tool_calls import Function, Tool
|
72 |
+
from mistral_inference.transformer import Transformer
|
73 |
+
from mistral_inference.generate import generate
|
74 |
+
|
75 |
+
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
|
76 |
+
from mistral_common.protocol.instruct.messages import UserMessage
|
77 |
+
from mistral_common.protocol.instruct.request import ChatCompletionRequest
|
78 |
+
|
79 |
+
|
80 |
+
tokenizer = MistralTokenizer.from_file(f"{mistral_models_path}/tokenizer.model.v3")
|
81 |
+
model = Transformer.from_folder(mistral_models_path)
|
82 |
+
|
83 |
+
completion_request = ChatCompletionRequest(
|
84 |
+
tools=[
|
85 |
+
Tool(
|
86 |
+
function=Function(
|
87 |
+
name="get_current_weather",
|
88 |
+
description="Get the current weather",
|
89 |
+
parameters={
|
90 |
+
"type": "object",
|
91 |
+
"properties": {
|
92 |
+
"location": {
|
93 |
+
"type": "string",
|
94 |
+
"description": "The city and state, e.g. San Francisco, CA",
|
95 |
+
},
|
96 |
+
"format": {
|
97 |
+
"type": "string",
|
98 |
+
"enum": ["celsius", "fahrenheit"],
|
99 |
+
"description": "The temperature unit to use. Infer this from the users location.",
|
100 |
+
},
|
101 |
+
},
|
102 |
+
"required": ["location", "format"],
|
103 |
+
},
|
104 |
+
)
|
105 |
+
)
|
106 |
+
],
|
107 |
+
messages=[
|
108 |
+
UserMessage(content="What's the weather like today in Paris?"),
|
109 |
+
],
|
110 |
+
)
|
111 |
+
|
112 |
+
tokens = tokenizer.encode_chat_completion(completion_request).tokens
|
113 |
+
|
114 |
+
out_tokens, _ = generate([tokens], model, max_tokens=64, temperature=0.0, eos_id=tokenizer.instruct_tokenizer.tokenizer.eos_id)
|
115 |
+
result = tokenizer.instruct_tokenizer.tokenizer.decode(out_tokens[0])
|
116 |
+
|
117 |
+
print(result)
|
118 |
+
```
|
119 |
+
|
120 |
+
## Generate with `transformers`
|
121 |
+
|
122 |
+
If you want to use Hugging Face `transformers` to generate text, you can do something like this.
|
123 |
+
|
124 |
+
```py
|
125 |
+
from transformers import pipeline
|
126 |
+
|
127 |
+
messages = [
|
128 |
+
{"role": "system", "content": "You are a pirate chatbot who always responds in pirate speak!"},
|
129 |
+
{"role": "user", "content": "Who are you?"},
|
130 |
+
]
|
131 |
+
chatbot = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.3")
|
132 |
+
chatbot(messages)
|
133 |
+
```
|
134 |
+
|
135 |
+
|
136 |
+
## Function calling with `transformers`
|
137 |
+
|
138 |
+
To use this example, you'll need `transformers` version 4.42.0 or higher. Please see the
|
139 |
+
[function calling guide](https://huggingface.co/docs/transformers/main/chat_templating#advanced-tool-use--function-calling)
|
140 |
+
in the `transformers` docs for more information.
|
141 |
+
|
142 |
+
```python
|
143 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
144 |
+
import torch
|
145 |
+
|
146 |
+
model_id = "mistralai/Mistral-7B-Instruct-v0.3"
|
147 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
148 |
+
|
149 |
+
def get_current_weather(location: str, format: str):
|
150 |
+
"""
|
151 |
+
Get the current weather
|
152 |
+
|
153 |
+
Args:
|
154 |
+
location: The city and state, e.g. San Francisco, CA
|
155 |
+
format: The temperature unit to use. Infer this from the users location. (choices: ["celsius", "fahrenheit"])
|
156 |
+
"""
|
157 |
+
pass
|
158 |
+
|
159 |
+
conversation = [{"role": "user", "content": "What's the weather like in Paris?"}]
|
160 |
+
tools = [get_current_weather]
|
161 |
+
|
162 |
+
|
163 |
+
# format and tokenize the tool use prompt
|
164 |
+
inputs = tokenizer.apply_chat_template(
|
165 |
+
conversation,
|
166 |
+
tools=tools,
|
167 |
+
add_generation_prompt=True,
|
168 |
+
return_dict=True,
|
169 |
+
return_tensors="pt",
|
170 |
+
)
|
171 |
+
|
172 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
|
173 |
+
|
174 |
+
inputs.to(model.device)
|
175 |
+
outputs = model.generate(**inputs, max_new_tokens=1000)
|
176 |
+
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
177 |
+
```
|
178 |
+
|
179 |
+
Note that, for reasons of space, this example does not show a complete cycle of calling a tool and adding the tool call and tool
|
180 |
+
results to the chat history so that the model can use them in its next generation. For a full tool calling example, please
|
181 |
+
see the [function calling guide](https://huggingface.co/docs/transformers/main/chat_templating#advanced-tool-use--function-calling),
|
182 |
+
and note that Mistral **does** use tool call IDs, so these must be included in your tool calls and tool results. They should be
|
183 |
+
exactly 9 alphanumeric characters.
|
184 |
+
|
185 |
+
|
186 |
+
## Limitations
|
187 |
+
|
188 |
+
The Mistral 7B Instruct model is a quick demonstration that the base model can be easily fine-tuned to achieve compelling performance.
|
189 |
+
It does not have any moderation mechanisms. We're looking forward to engaging with the community on ways to
|
190 |
+
make the model finely respect guardrails, allowing for deployment in environments requiring moderated outputs.
|
191 |
+
|
192 |
+
## The Mistral AI Team
|
193 |
+
|
194 |
+
Albert Jiang, Alexandre Sablayrolles, Alexis Tacnet, Antoine Roux, Arthur Mensch, Audrey Herblin-Stoop, Baptiste Bout, Baudouin de Monicault, Blanche Savary, Bam4d, Caroline Feldman, Devendra Singh Chaplot, Diego de las Casas, Eleonore Arcelin, Emma Bou Hanna, Etienne Metzger, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Harizo Rajaona, Jean-Malo Delignon, Jia Li, Justus Murke, Louis Martin, Louis Ternon, Lucile Saulnier, Lélio Renard Lavaud, Margaret Jennings, Marie Pellat, Marie Torelli, Marie-Anne Lachaux, Nicolas Schuhl, Patrick von Platen, Pierre Stock, Sandeep Subramanian, Sophia Yang, Szymon Antoniak, Teven Le Scao, Thibaut Lavril, Timothée Lacroix, Théophile Gervet, Thomas Wang, Valera Nemychnikova, William El Sayed, William Marshall
|
config.json
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"MistralForLearnyfi"
|
4 |
+
],
|
5 |
+
"attention_dropout": 0.0,
|
6 |
+
"bos_token_id": 1,
|
7 |
+
"eos_token_id": 2,
|
8 |
+
"hidden_act": "silu",
|
9 |
+
"hidden_size": 4096,
|
10 |
+
"initializer_range": 0.02,
|
11 |
+
"intermediate_size": 14336,
|
12 |
+
"max_position_embeddings": 32768,
|
13 |
+
"model_type": "mistral",
|
14 |
+
"num_attention_heads": 32,
|
15 |
+
"num_hidden_layers": 32,
|
16 |
+
"num_key_value_heads": 8,
|
17 |
+
"rms_norm_eps": 1e-05,
|
18 |
+
"rope_theta": 1000000.0,
|
19 |
+
"sliding_window": null,
|
20 |
+
"tie_word_embeddings": false,
|
21 |
+
"torch_dtype": "bfloat16",
|
22 |
+
"transformers_version": "4.42.0.dev0",
|
23 |
+
"use_cache": true,
|
24 |
+
"vocab_size": 32768
|
25 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.42.0.dev0"
|
6 |
+
}
|
gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
model-00001-of-00003.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ce6fb6f6f4d0183f4813cbf4ece24109da629a08d4210da46f77e1d8b0bd5c19
|
3 |
+
size 4949453792
|
model.py
ADDED
@@ -0,0 +1,318 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import time
|
3 |
+
import json
|
4 |
+
import requests
|
5 |
+
import whisper
|
6 |
+
import cv2
|
7 |
+
import pytesseract
|
8 |
+
import re
|
9 |
+
import boto3
|
10 |
+
from moviepy.editor import VideoFileClip
|
11 |
+
from flask import Flask, request, jsonify
|
12 |
+
from flask_apscheduler import APScheduler
|
13 |
+
from flask_cors import CORS
|
14 |
+
from werkzeug.utils import secure_filename
|
15 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, Trainer, TrainingArguments
|
16 |
+
from sentence_transformers import SentenceTransformer
|
17 |
+
from chromadb import Client as ChromaClient
|
18 |
+
from chromadb.config import Settings
|
19 |
+
from chromadb.utils import embedding_functions
|
20 |
+
|
21 |
+
app = Flask(__name__)
|
22 |
+
CORS(app)
|
23 |
+
scheduler = APScheduler()
|
24 |
+
scheduler.init_app(app)
|
25 |
+
scheduler.start()
|
26 |
+
|
27 |
+
# Load Whisper model globally to avoid redundancy
|
28 |
+
MODEL = whisper.load_model("base")
|
29 |
+
app.config['UPLOAD_FOLDER'] = "/home/ubuntu/classcut/data"
|
30 |
+
OCR_TEXT_SUFFIX = "_ocrtext.txt"
|
31 |
+
TRANSCRIPT_SUFFIX = "_transcript.txt"
|
32 |
+
DETAILS_SUFFIX = "_details.json"
|
33 |
+
|
34 |
+
ALLOWED_VIDEO_EXTENSIONS = {'mp4', 'avi', 'mov', 'mkv'}
|
35 |
+
ALLOWED_AUDIO_EXTENSIONS = {'wav', 'mp3', 'm4a', 'flac'}
|
36 |
+
|
37 |
+
# Initialize Mistral 7B model and tokenizer
|
38 |
+
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1", trust_remote_code=True)
|
39 |
+
model = AutoModelForCausalLM.from_pretrained(
|
40 |
+
"mistralai/Mistral-7B-v0.1",
|
41 |
+
torch_dtype="auto",
|
42 |
+
device_map="auto",
|
43 |
+
trust_remote_code=True
|
44 |
+
)
|
45 |
+
|
46 |
+
# Initialize SentenceTransformer for embeddings
|
47 |
+
embedding_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
48 |
+
|
49 |
+
# Set the Chroma DB path
|
50 |
+
CHROMA_PATH = "chroma"
|
51 |
+
|
52 |
+
# Initialize Chroma vector store
|
53 |
+
chroma_client = ChromaClient(Settings(persist_directory=CHROMA_PATH))
|
54 |
+
collection = chroma_client.get_or_create_collection(name="video_transcripts")
|
55 |
+
|
56 |
+
# AWS S3 Configuration
|
57 |
+
S3_BUCKET = 'classcut-videos'
|
58 |
+
S3_REGION = 'ap-south-1' # e.g., 'us-west-1'
|
59 |
+
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
|
60 |
+
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
|
61 |
+
|
62 |
+
s3 = boto3.client('s3', region_name=S3_REGION,
|
63 |
+
aws_access_key_id=AWS_ACCESS_KEY_ID,
|
64 |
+
aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
|
65 |
+
|
66 |
+
|
67 |
+
def upload_to_s3(file_path):
|
68 |
+
file_name = file_path.split('/')[-1]
|
69 |
+
# Upload the file to S3
|
70 |
+
try:
|
71 |
+
s3.upload_file(file_path, S3_BUCKET, file_name, ExtraArgs={
|
72 |
+
'ContentType': 'binary/octet-stream',
|
73 |
+
'ContentDisposition': 'inline'
|
74 |
+
})
|
75 |
+
# Construct the S3 URL
|
76 |
+
s3_url = f"https://{S3_BUCKET}.s3.{S3_REGION}.amazonaws.com/{file_name}"
|
77 |
+
print(f"Uploaded {file_name} to S3 bucket: {S3_BUCKET}")
|
78 |
+
return s3_url
|
79 |
+
except Exception as e:
|
80 |
+
print(f"Error uploading {file_name} to S3: {e}")
|
81 |
+
|
82 |
+
|
83 |
+
def extract_audio(video_path):
|
84 |
+
"""
|
85 |
+
Extracts audio from a given video file and saves it as an mp3 file.
|
86 |
+
|
87 |
+
:param video_path: Path to the video file.
|
88 |
+
:return: Path to the extracted audio file.
|
89 |
+
"""
|
90 |
+
with VideoFileClip(video_path) as video:
|
91 |
+
audio_path = f"{video_path}.mp3"
|
92 |
+
video.audio.write_audiofile(audio_path)
|
93 |
+
return audio_path
|
94 |
+
|
95 |
+
|
96 |
+
def transcribe_with_timestamps(audio_path):
|
97 |
+
"""
|
98 |
+
Transcribes the given audio file using the Whisper model, including timestamps.
|
99 |
+
|
100 |
+
:param audio_path: Path to the audio file.
|
101 |
+
:return: A list of transcribed segments with timestamps.
|
102 |
+
"""
|
103 |
+
result = MODEL.transcribe(audio_path, verbose=True, language='hi')
|
104 |
+
return [f"{seg['start']} - {seg['end']}: {seg['text']}" for seg in result["segments"]]
|
105 |
+
|
106 |
+
|
107 |
+
def format_transcript(transcript_segments):
|
108 |
+
"""
|
109 |
+
Formats transcript segments into a single string.
|
110 |
+
|
111 |
+
:param transcript_segments: List of transcript segments.
|
112 |
+
:return: Formatted transcript.
|
113 |
+
"""
|
114 |
+
return "\n".join(transcript_segments).replace('\\n', ' ').strip()
|
115 |
+
|
116 |
+
|
117 |
+
def extract_text_from_video(video_path, frame_interval=30):
|
118 |
+
"""
|
119 |
+
Extracts text from video frames using Tesseract OCR and saves unique text.
|
120 |
+
|
121 |
+
:param video_path: Path to the video file.
|
122 |
+
:param frame_interval: Interval to capture frames for OCR (in seconds).
|
123 |
+
:return: List of unique text found in the video.
|
124 |
+
"""
|
125 |
+
print(f"Attempting to extract text from {video_path}")
|
126 |
+
|
127 |
+
unique_texts = set()
|
128 |
+
video = VideoFileClip(video_path)
|
129 |
+
duration = int(video.duration)
|
130 |
+
|
131 |
+
print(f"Duration of video: {duration} seconds.")
|
132 |
+
print(f"Frame interval: {frame_interval} seconds.")
|
133 |
+
|
134 |
+
for time_sec in range(0, duration, frame_interval):
|
135 |
+
frame = video.get_frame(time_sec)
|
136 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
137 |
+
text = pytesseract.image_to_string(gray)
|
138 |
+
if text.strip() and text not in unique_texts:
|
139 |
+
unique_texts.add(text.strip())
|
140 |
+
|
141 |
+
with open(f"{video_path}{OCR_TEXT_SUFFIX}", 'w') as file:
|
142 |
+
file.writelines(list(unique_texts))
|
143 |
+
|
144 |
+
return list(unique_texts)
|
145 |
+
|
146 |
+
|
147 |
+
def process_video(video_path):
|
148 |
+
# Extract audio and transcribe
|
149 |
+
audio_path = extract_audio(video_path)
|
150 |
+
transcript_segments = transcribe_with_timestamps(audio_path)
|
151 |
+
with open(f"{video_path}{TRANSCRIPT_SUFFIX}", 'w') as file:
|
152 |
+
file.writelines(transcript_segments)
|
153 |
+
|
154 |
+
# Extract text from video frames
|
155 |
+
extract_text_from_video(video_path)
|
156 |
+
|
157 |
+
# Fine-tune the Mistral model on the new transcript
|
158 |
+
fine_tune_model(transcript_segments)
|
159 |
+
|
160 |
+
# Add the transcript to ChromaDB
|
161 |
+
add_to_chromadb(' '.join(transcript_segments))
|
162 |
+
|
163 |
+
# You can add additional processing if needed
|
164 |
+
print(f"Processing of {video_path} completed.")
|
165 |
+
|
166 |
+
|
167 |
+
def allowed_video_file(filename):
|
168 |
+
return '.' in filename and \
|
169 |
+
filename.rsplit('.', 1)[1].lower() in ALLOWED_VIDEO_EXTENSIONS
|
170 |
+
|
171 |
+
|
172 |
+
def add_to_chromadb(text):
|
173 |
+
# Generate embeddings
|
174 |
+
embeddings = embedding_model.encode([text])
|
175 |
+
|
176 |
+
# Add to ChromaDB
|
177 |
+
collection.add(
|
178 |
+
documents=[text],
|
179 |
+
embeddings=embeddings.tolist(),
|
180 |
+
metadatas=[{'source': 'video_transcript'}]
|
181 |
+
)
|
182 |
+
|
183 |
+
print(f"Text appended to ChromaDB.")
|
184 |
+
|
185 |
+
|
186 |
+
def fine_tune_model(transcript_segments):
|
187 |
+
# Prepare data for fine-tuning
|
188 |
+
print("Preparing data for fine-tuning...")
|
189 |
+
dataset = [{'input_ids': tokenizer.encode(text, return_tensors='pt')[0]} for text in transcript_segments]
|
190 |
+
|
191 |
+
# Define training arguments
|
192 |
+
training_args = TrainingArguments(
|
193 |
+
output_dir='./fine_tuned_model',
|
194 |
+
num_train_epochs=1, # Adjust as needed
|
195 |
+
per_device_train_batch_size=1, # Adjust based on your hardware
|
196 |
+
save_steps=10,
|
197 |
+
save_total_limit=2,
|
198 |
+
logging_steps=10,
|
199 |
+
learning_rate=5e-5, # Hyperparameter tuning can be done here
|
200 |
+
fp16=True, # Enable if using compatible GPU
|
201 |
+
)
|
202 |
+
|
203 |
+
# Define a data collator
|
204 |
+
def data_collator(features):
|
205 |
+
return {'input_ids': [f['input_ids'] for f in features],
|
206 |
+
'labels': [f['input_ids'] for f in features]}
|
207 |
+
|
208 |
+
# Initialize Trainer
|
209 |
+
trainer = Trainer(
|
210 |
+
model=model,
|
211 |
+
args=training_args,
|
212 |
+
train_dataset=dataset,
|
213 |
+
data_collator=data_collator,
|
214 |
+
)
|
215 |
+
|
216 |
+
# Fine-tune the model
|
217 |
+
print("Starting fine-tuning...")
|
218 |
+
trainer.train()
|
219 |
+
print("Fine-tuning completed.")
|
220 |
+
|
221 |
+
# Save the fine-tuned model
|
222 |
+
model.save_pretrained('./fine_tuned_model')
|
223 |
+
tokenizer.save_pretrained('./fine_tuned_model')
|
224 |
+
print("Fine-tuned model saved.")
|
225 |
+
|
226 |
+
|
227 |
+
def query_chatbot(query_text):
|
228 |
+
# Retrieve relevant documents from ChromaDB
|
229 |
+
query_embedding = embedding_model.encode([query_text])
|
230 |
+
results = collection.query(query_embeddings=query_embedding, n_results=5)
|
231 |
+
context_text = " ".join(results['documents'][0])
|
232 |
+
|
233 |
+
# Prepare input for the model
|
234 |
+
prompt = f"Context: {context_text}\n\nQuestion: {query_text}\n\nAnswer:"
|
235 |
+
|
236 |
+
inputs = tokenizer(prompt, return_tensors='pt').to(model.device)
|
237 |
+
outputs = model.generate(**inputs, max_new_tokens=150)
|
238 |
+
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
239 |
+
|
240 |
+
# Extract the answer part
|
241 |
+
answer = response.split("Answer:")[-1].strip()
|
242 |
+
return answer
|
243 |
+
|
244 |
+
|
245 |
+
@app.route('/hello', methods=['GET'])
|
246 |
+
def hello():
|
247 |
+
return jsonify({'message': 'Hello, World!'})
|
248 |
+
|
249 |
+
|
250 |
+
@app.route('/upload', methods=['POST'])
|
251 |
+
def upload_file():
|
252 |
+
print("Request received.")
|
253 |
+
if 'file' not in request.files:
|
254 |
+
return jsonify({'error': 'No file part'}), 400
|
255 |
+
|
256 |
+
file = request.files['file']
|
257 |
+
if file.filename == '':
|
258 |
+
return jsonify({'error': 'No selected file'}), 400
|
259 |
+
|
260 |
+
if file and allowed_video_file(file.filename):
|
261 |
+
filename = secure_filename(file.filename)
|
262 |
+
file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
|
263 |
+
|
264 |
+
if not os.path.exists(file_path):
|
265 |
+
print(f"Saving {file.filename} to {file_path}")
|
266 |
+
try:
|
267 |
+
file.save(file_path)
|
268 |
+
scheduler.add_job(func=process_file, args=[file_path], trigger='date', id='file_process_job')
|
269 |
+
file_name = file_path.split('/')[-1]
|
270 |
+
return jsonify({'filename': f"{file_name}"}), 200
|
271 |
+
except Exception as e:
|
272 |
+
return jsonify({'error': str(e)}), 502
|
273 |
+
else:
|
274 |
+
print(f"We have already processed this file - {filename}. Skipping processing.")
|
275 |
+
return jsonify({'filename': f"{filename}"}), 200
|
276 |
+
else:
|
277 |
+
return jsonify({'error': 'File type not allowed'}), 400
|
278 |
+
|
279 |
+
x
|
280 |
+
def process_file(file_path):
|
281 |
+
# Your file processing logic here
|
282 |
+
print(f'Processing file: {file_path}')
|
283 |
+
process_video(file_path)
|
284 |
+
# Simulate a long processing task
|
285 |
+
time.sleep(10)
|
286 |
+
print('File processed!')
|
287 |
+
|
288 |
+
|
289 |
+
@app.route('/details', methods=['POST'])
|
290 |
+
def get_details():
|
291 |
+
data = request.get_json()
|
292 |
+
filename = data.get('filename') if data else None
|
293 |
+
if filename:
|
294 |
+
print(f"Received request for details of filename: {filename}")
|
295 |
+
|
296 |
+
details_json = f"{app.config['UPLOAD_FOLDER']}/{filename}_details.json"
|
297 |
+
print(f"Details JSON path: {details_json}")
|
298 |
+
if os.path.exists(details_json):
|
299 |
+
with open(details_json, 'r') as file:
|
300 |
+
details = json.load(file)
|
301 |
+
return jsonify(details)
|
302 |
+
else:
|
303 |
+
return jsonify({'error': 'Details not found'}), 404
|
304 |
+
|
305 |
+
|
306 |
+
@app.route('/chat', methods=['POST'])
|
307 |
+
def chat():
|
308 |
+
chat_msg = request.form.get('chat_msg')
|
309 |
+
if chat_msg:
|
310 |
+
print(f"Received chat message: {chat_msg}")
|
311 |
+
resp = query_chatbot(chat_msg)
|
312 |
+
return jsonify({"status": "success", "response": f"{resp}"})
|
313 |
+
else:
|
314 |
+
return jsonify({"status": "error", "message": "No chat message received"}), 400
|
315 |
+
|
316 |
+
|
317 |
+
if __name__ == '__main__':
|
318 |
+
app.run(host='0.0.0.0', port=5000)
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 14496047104
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "model-00003-of-00003.safetensors",
|
7 |
+
"model.embed_tokens.weight": "model-00001-of-00003.safetensors",
|
8 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
9 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
10 |
+
"model.layers.0.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
11 |
+
"model.layers.0.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
12 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
13 |
+
"model.layers.0.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
14 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
15 |
+
"model.layers.0.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
16 |
+
"model.layers.0.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
17 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
18 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
19 |
+
"model.layers.1.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
20 |
+
"model.layers.1.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
21 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
22 |
+
"model.layers.1.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
23 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
24 |
+
"model.layers.1.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
25 |
+
"model.layers.1.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
26 |
+
"model.layers.10.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
27 |
+
"model.layers.10.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
28 |
+
"model.layers.10.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
29 |
+
"model.layers.10.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
30 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
31 |
+
"model.layers.10.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
32 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
33 |
+
"model.layers.10.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
34 |
+
"model.layers.10.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
35 |
+
"model.layers.11.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
36 |
+
"model.layers.11.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
37 |
+
"model.layers.11.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
38 |
+
"model.layers.11.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
39 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
40 |
+
"model.layers.11.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
41 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
42 |
+
"model.layers.11.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
43 |
+
"model.layers.11.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
44 |
+
"model.layers.12.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
45 |
+
"model.layers.12.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
46 |
+
"model.layers.12.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
47 |
+
"model.layers.12.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
48 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
49 |
+
"model.layers.12.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
50 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
51 |
+
"model.layers.12.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
52 |
+
"model.layers.12.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
53 |
+
"model.layers.13.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
54 |
+
"model.layers.13.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
55 |
+
"model.layers.13.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
56 |
+
"model.layers.13.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
57 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
58 |
+
"model.layers.13.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
59 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
60 |
+
"model.layers.13.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
61 |
+
"model.layers.13.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
62 |
+
"model.layers.14.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
63 |
+
"model.layers.14.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
64 |
+
"model.layers.14.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
65 |
+
"model.layers.14.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
66 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
67 |
+
"model.layers.14.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
68 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
69 |
+
"model.layers.14.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
70 |
+
"model.layers.14.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
71 |
+
"model.layers.15.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
72 |
+
"model.layers.15.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
73 |
+
"model.layers.15.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
74 |
+
"model.layers.15.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
75 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
76 |
+
"model.layers.15.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
77 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
78 |
+
"model.layers.15.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
79 |
+
"model.layers.15.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
80 |
+
"model.layers.16.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
81 |
+
"model.layers.16.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
82 |
+
"model.layers.16.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
83 |
+
"model.layers.16.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
84 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
85 |
+
"model.layers.16.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
86 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
87 |
+
"model.layers.16.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
88 |
+
"model.layers.16.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
89 |
+
"model.layers.17.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
90 |
+
"model.layers.17.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
91 |
+
"model.layers.17.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
92 |
+
"model.layers.17.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
93 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
94 |
+
"model.layers.17.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
95 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
96 |
+
"model.layers.17.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
97 |
+
"model.layers.17.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
98 |
+
"model.layers.18.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
99 |
+
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
100 |
+
"model.layers.18.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
101 |
+
"model.layers.18.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
102 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
103 |
+
"model.layers.18.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
104 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
105 |
+
"model.layers.18.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
106 |
+
"model.layers.18.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
107 |
+
"model.layers.19.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
108 |
+
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
109 |
+
"model.layers.19.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
110 |
+
"model.layers.19.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
111 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
112 |
+
"model.layers.19.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
113 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
114 |
+
"model.layers.19.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
115 |
+
"model.layers.19.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
116 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
117 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
118 |
+
"model.layers.2.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
119 |
+
"model.layers.2.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
120 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
121 |
+
"model.layers.2.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
122 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
123 |
+
"model.layers.2.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
124 |
+
"model.layers.2.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
125 |
+
"model.layers.20.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
126 |
+
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
127 |
+
"model.layers.20.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
128 |
+
"model.layers.20.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
129 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
130 |
+
"model.layers.20.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
131 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
132 |
+
"model.layers.20.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
133 |
+
"model.layers.20.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
134 |
+
"model.layers.21.input_layernorm.weight": "model-00002-of-00003.safetensors",
|
135 |
+
"model.layers.21.mlp.down_proj.weight": "model-00002-of-00003.safetensors",
|
136 |
+
"model.layers.21.mlp.gate_proj.weight": "model-00002-of-00003.safetensors",
|
137 |
+
"model.layers.21.mlp.up_proj.weight": "model-00002-of-00003.safetensors",
|
138 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
|
139 |
+
"model.layers.21.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
140 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
141 |
+
"model.layers.21.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
142 |
+
"model.layers.21.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
143 |
+
"model.layers.22.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
144 |
+
"model.layers.22.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
145 |
+
"model.layers.22.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
146 |
+
"model.layers.22.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
147 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
148 |
+
"model.layers.22.self_attn.k_proj.weight": "model-00002-of-00003.safetensors",
|
149 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00003.safetensors",
|
150 |
+
"model.layers.22.self_attn.q_proj.weight": "model-00002-of-00003.safetensors",
|
151 |
+
"model.layers.22.self_attn.v_proj.weight": "model-00002-of-00003.safetensors",
|
152 |
+
"model.layers.23.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
153 |
+
"model.layers.23.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
154 |
+
"model.layers.23.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
155 |
+
"model.layers.23.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
156 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
157 |
+
"model.layers.23.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
158 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
159 |
+
"model.layers.23.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
160 |
+
"model.layers.23.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
161 |
+
"model.layers.24.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
162 |
+
"model.layers.24.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
163 |
+
"model.layers.24.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
164 |
+
"model.layers.24.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
165 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
166 |
+
"model.layers.24.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
167 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
168 |
+
"model.layers.24.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
169 |
+
"model.layers.24.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
170 |
+
"model.layers.25.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
171 |
+
"model.layers.25.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
172 |
+
"model.layers.25.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
173 |
+
"model.layers.25.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
174 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
175 |
+
"model.layers.25.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
176 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
177 |
+
"model.layers.25.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
178 |
+
"model.layers.25.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
179 |
+
"model.layers.26.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
180 |
+
"model.layers.26.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
181 |
+
"model.layers.26.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
182 |
+
"model.layers.26.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
183 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
184 |
+
"model.layers.26.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
185 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
186 |
+
"model.layers.26.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
187 |
+
"model.layers.26.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
188 |
+
"model.layers.27.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
189 |
+
"model.layers.27.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
190 |
+
"model.layers.27.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
191 |
+
"model.layers.27.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
192 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
193 |
+
"model.layers.27.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
194 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
195 |
+
"model.layers.27.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
196 |
+
"model.layers.27.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
197 |
+
"model.layers.28.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
198 |
+
"model.layers.28.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
199 |
+
"model.layers.28.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
200 |
+
"model.layers.28.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
201 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
202 |
+
"model.layers.28.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
203 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
204 |
+
"model.layers.28.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
205 |
+
"model.layers.28.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
206 |
+
"model.layers.29.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
207 |
+
"model.layers.29.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
208 |
+
"model.layers.29.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
209 |
+
"model.layers.29.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
210 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
211 |
+
"model.layers.29.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
212 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
213 |
+
"model.layers.29.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
214 |
+
"model.layers.29.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
215 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
216 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
217 |
+
"model.layers.3.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
218 |
+
"model.layers.3.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
219 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
220 |
+
"model.layers.3.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
221 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
222 |
+
"model.layers.3.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
223 |
+
"model.layers.3.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
224 |
+
"model.layers.30.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
225 |
+
"model.layers.30.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
226 |
+
"model.layers.30.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
227 |
+
"model.layers.30.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
228 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
229 |
+
"model.layers.30.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
230 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
231 |
+
"model.layers.30.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
232 |
+
"model.layers.30.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
233 |
+
"model.layers.31.input_layernorm.weight": "model-00003-of-00003.safetensors",
|
234 |
+
"model.layers.31.mlp.down_proj.weight": "model-00003-of-00003.safetensors",
|
235 |
+
"model.layers.31.mlp.gate_proj.weight": "model-00003-of-00003.safetensors",
|
236 |
+
"model.layers.31.mlp.up_proj.weight": "model-00003-of-00003.safetensors",
|
237 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00003-of-00003.safetensors",
|
238 |
+
"model.layers.31.self_attn.k_proj.weight": "model-00003-of-00003.safetensors",
|
239 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00003-of-00003.safetensors",
|
240 |
+
"model.layers.31.self_attn.q_proj.weight": "model-00003-of-00003.safetensors",
|
241 |
+
"model.layers.31.self_attn.v_proj.weight": "model-00003-of-00003.safetensors",
|
242 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
243 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
244 |
+
"model.layers.4.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
245 |
+
"model.layers.4.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
246 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
247 |
+
"model.layers.4.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
248 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
249 |
+
"model.layers.4.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
250 |
+
"model.layers.4.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
251 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
252 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
253 |
+
"model.layers.5.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
254 |
+
"model.layers.5.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
255 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
256 |
+
"model.layers.5.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
257 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
258 |
+
"model.layers.5.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
259 |
+
"model.layers.5.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
260 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
261 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
262 |
+
"model.layers.6.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
263 |
+
"model.layers.6.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
264 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
265 |
+
"model.layers.6.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
266 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
267 |
+
"model.layers.6.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
268 |
+
"model.layers.6.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
269 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
270 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
271 |
+
"model.layers.7.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
272 |
+
"model.layers.7.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
273 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
274 |
+
"model.layers.7.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
275 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
276 |
+
"model.layers.7.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
277 |
+
"model.layers.7.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
278 |
+
"model.layers.8.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
279 |
+
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
280 |
+
"model.layers.8.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
281 |
+
"model.layers.8.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
282 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
283 |
+
"model.layers.8.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
284 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
285 |
+
"model.layers.8.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
286 |
+
"model.layers.8.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
287 |
+
"model.layers.9.input_layernorm.weight": "model-00001-of-00003.safetensors",
|
288 |
+
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00003.safetensors",
|
289 |
+
"model.layers.9.mlp.gate_proj.weight": "model-00001-of-00003.safetensors",
|
290 |
+
"model.layers.9.mlp.up_proj.weight": "model-00001-of-00003.safetensors",
|
291 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00003.safetensors",
|
292 |
+
"model.layers.9.self_attn.k_proj.weight": "model-00001-of-00003.safetensors",
|
293 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00003.safetensors",
|
294 |
+
"model.layers.9.self_attn.q_proj.weight": "model-00001-of-00003.safetensors",
|
295 |
+
"model.layers.9.self_attn.v_proj.weight": "model-00001-of-00003.safetensors",
|
296 |
+
"model.norm.weight": "model-00003-of-00003.safetensors"
|
297 |
+
}
|
298 |
+
}
|
params.json
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dim": 4096,
|
3 |
+
"n_layers": 32,
|
4 |
+
"head_dim": 128,
|
5 |
+
"hidden_dim": 14336,
|
6 |
+
"n_heads": 32,
|
7 |
+
"n_kv_heads": 8,
|
8 |
+
"norm_eps": 1e-05,
|
9 |
+
"vocab_size": 32768,
|
10 |
+
"rope_theta": 1000000.0
|
11 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<s>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "</s>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"unk_token": {
|
17 |
+
"content": "<unk>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
}
|
23 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:37f00374dea48658ee8f5d0f21895b9bc55cb0103939607c8185bfd1c6ca1f89
|
3 |
+
size 587404
|
tokenizer.model.v3
ADDED
Binary file (587 kB). View file
|
|
tokenizer_config.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|