Update app.py
Browse files
app.py
CHANGED
@@ -6,14 +6,9 @@ import gradio as gr
|
|
6 |
import sentencepiece
|
7 |
|
8 |
|
9 |
-
# Load the model and tokenizer
|
10 |
-
|
11 |
-
|
12 |
-
tokenizer_dir = "Tonic1/YiTonic"
|
13 |
-
vocab_file = os.path.join(tokenizer_dir, "tokenizer.model")
|
14 |
-
tokenizer_json = os.path.join(tokenizer_dir, "tokenizer.json")
|
15 |
-
tokenizer_config = os.path.join(tokenizer_dir, "tokenizer_config.json")
|
16 |
-
tokenizer = YiTokenizer(vocab_file=vocab_file)
|
17 |
|
18 |
def run(message, chat_history, system_prompt, max_new_tokens=1024, temperature=0.3, top_p=0.9, top_k=50):
|
19 |
prompt = get_prompt(message, chat_history, system_prompt)
|
@@ -58,7 +53,7 @@ MAX_MAX_NEW_TOKENS = 200000
|
|
58 |
DEFAULT_MAX_NEW_TOKENS = 100000
|
59 |
MAX_INPUT_TOKEN_LENGTH = 100000
|
60 |
|
61 |
-
DESCRIPTION = "#
|
62 |
|
63 |
def clear_and_save_textbox(message): return '', message
|
64 |
|
|
|
6 |
import sentencepiece
|
7 |
|
8 |
|
9 |
+
# Load the model and tokenizer using transformers
|
10 |
+
model = AutoModelForCausalLM.from_pretrained("01-ai/Yi-34B", device_map="auto", torch_dtype="auto", trust_remote_code=True)
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained("01-ai/Yi-34B", trust_remote_code=True)
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
def run(message, chat_history, system_prompt, max_new_tokens=1024, temperature=0.3, top_p=0.9, top_k=50):
|
14 |
prompt = get_prompt(message, chat_history, system_prompt)
|
|
|
53 |
DEFAULT_MAX_NEW_TOKENS = 100000
|
54 |
MAX_INPUT_TOKEN_LENGTH = 100000
|
55 |
|
56 |
+
DESCRIPTION = "# 👋🏻Welcome to 🙋🏻♂️Tonic's🧑🏻🚀YI-200K🚀"
|
57 |
|
58 |
def clear_and_save_textbox(message): return '', message
|
59 |
|