Spaces:
Runtime error
Runtime error
Rohan5manza
commited on
Commit
•
dd9194f
1
Parent(s):
40617c0
Update app.py
Browse files
app.py
CHANGED
@@ -1,63 +1,44 @@
|
|
1 |
-
import
|
2 |
-
from
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
messages.append({"role": "user", "content": val[0]})
|
23 |
-
if val[1]:
|
24 |
-
messages.append({"role": "assistant", "content": val[1]})
|
25 |
-
|
26 |
-
messages.append({"role": "user", "content": message})
|
27 |
-
|
28 |
-
response = ""
|
29 |
-
|
30 |
-
for message in client.chat_completion(
|
31 |
-
messages,
|
32 |
-
max_tokens=max_tokens,
|
33 |
-
stream=True,
|
34 |
-
temperature=temperature,
|
35 |
-
top_p=top_p,
|
36 |
-
):
|
37 |
-
token = message.choices[0].delta.content
|
38 |
-
|
39 |
-
response += token
|
40 |
-
yield response
|
41 |
-
|
42 |
-
"""
|
43 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
44 |
-
"""
|
45 |
-
demo = gr.ChatInterface(
|
46 |
-
respond,
|
47 |
-
additional_inputs=[
|
48 |
-
gr.Textbox(value="You are a financial advisor.", label="System message"),
|
49 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
50 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
51 |
-
gr.Slider(
|
52 |
-
minimum=0.1,
|
53 |
-
maximum=1.0,
|
54 |
-
value=0.95,
|
55 |
-
step=0.05,
|
56 |
-
label="Top-p (nucleus sampling)",
|
57 |
-
),
|
58 |
-
],
|
59 |
)
|
60 |
|
61 |
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
2 |
+
from peft import PeftModel
|
3 |
+
from unsloth import FastLanguageModel
|
4 |
+
import torch
|
5 |
+
|
6 |
+
|
7 |
+
max_seq_length = 4096 # Choose any! We auto support RoPE Scaling internally!
|
8 |
+
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
9 |
+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
10 |
+
|
11 |
+
# 4bit pre quantized models we support for 4x faster downloading + no OOMs.
|
12 |
+
fourbit_models = [
|
13 |
+
"unsloth/llama-3-8b-Instruct-bnb-4bit",
|
14 |
+
]
|
15 |
+
|
16 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
17 |
+
model_name = "unsloth/llama-3-8b-Instruct-bnb-4bit",
|
18 |
+
max_seq_length = max_seq_length,
|
19 |
+
dtype = dtype,
|
20 |
+
load_in_4bit = load_in_4bit,
|
21 |
+
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
)
|
23 |
|
24 |
|
25 |
+
# Load the base model and apply LoRA adapters
|
26 |
+
from transformers import AutoModel
|
27 |
+
adapter_model = AutoModel.from_pretrained("Rohan5manza/sentiment_analysis")
|
28 |
+
|
29 |
+
model = PeftModel.from_pretrained(model, adapter_model)
|
30 |
+
|
31 |
+
def generate_response(prompt):
|
32 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
33 |
+
outputs = model.generate(**inputs)
|
34 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
35 |
+
|
36 |
+
# Example Gradio or Streamlit interface for deploying
|
37 |
+
import gradio as gr
|
38 |
+
|
39 |
+
def gradio_interface(prompt):
|
40 |
+
response = generate_response(prompt)
|
41 |
+
return response
|
42 |
+
|
43 |
+
iface = gr.Interface(fn=gradio_interface, inputs="text", outputs="text")
|
44 |
+
iface.launch()
|