Spaces:
Runtime error
Runtime error
Minh Nguyen
commited on
Commit
·
747e34b
1
Parent(s):
92842d3
update
Browse files
app.py
CHANGED
@@ -1,11 +1,29 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def respond(
|
11 |
message,
|
@@ -25,19 +43,27 @@ def respond(
|
|
25 |
|
26 |
messages.append({"role": "user", "content": message})
|
27 |
|
28 |
-
|
29 |
|
30 |
-
|
31 |
messages,
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
41 |
|
42 |
|
43 |
"""
|
@@ -61,4 +87,4 @@ demo = gr.ChatInterface(
|
|
61 |
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
from unsloth import FastLanguageModel
|
4 |
+
max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
5 |
+
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
6 |
+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
7 |
|
8 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
9 |
+
model_name = "minhnguyen5293/lora_model", # YOUR MODEL YOU USED FOR TRAINING
|
10 |
+
max_seq_length = max_seq_length,
|
11 |
+
dtype = dtype,
|
12 |
+
load_in_4bit = load_in_4bit,
|
13 |
+
)
|
14 |
+
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
|
15 |
|
16 |
+
max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
17 |
+
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
18 |
+
load_in_4bit = True
|
19 |
+
|
20 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
21 |
+
model_name = "lora_model", # YOUR MODEL YOU USED FOR TRAINING
|
22 |
+
max_seq_length = max_seq_length,
|
23 |
+
dtype = dtype,
|
24 |
+
load_in_4bit = load_in_4bit,
|
25 |
+
)
|
26 |
+
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
|
27 |
|
28 |
def respond(
|
29 |
message,
|
|
|
43 |
|
44 |
messages.append({"role": "user", "content": message})
|
45 |
|
46 |
+
from transformers import TextIteratorStreamer
|
47 |
|
48 |
+
inputs = tokenizer.apply_chat_template(
|
49 |
messages,
|
50 |
+
tokenize = True,
|
51 |
+
add_generation_prompt = True, # Must add for generation
|
52 |
+
return_tensors = "pt",
|
53 |
+
).to("cuda")
|
54 |
+
|
55 |
+
text_streamer = TextIteratorStreamer(tokenizer, skip_prompt = True)
|
56 |
+
_ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 128,
|
57 |
+
use_cache = True, temperature = 1.5, min_p = 0.1)
|
58 |
+
|
59 |
+
|
60 |
+
response = ""
|
61 |
|
62 |
+
for message in text_streamer:
|
63 |
+
# not append if message contain <|eot_id|>
|
64 |
+
if "<|eot_id|>" not in message:
|
65 |
+
response += message
|
66 |
+
yield response
|
67 |
|
68 |
|
69 |
"""
|
|
|
87 |
|
88 |
|
89 |
if __name__ == "__main__":
|
90 |
+
demo.launch()
|