Update app.py
Browse files
app.py
CHANGED
@@ -13,17 +13,17 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
13 |
import torch
|
14 |
|
15 |
|
16 |
-
|
17 |
#token = os.getenv("HF_TOKEN")
|
18 |
-
login(token = os.getenv('HF_TOKEN'))
|
19 |
#chatbot = pipeline(model="meta-llama/Llama-3.2-1B")
|
20 |
|
21 |
-
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct")
|
22 |
-
model = AutoModelForCausalLM.from_pretrained(
|
23 |
-
"meta-llama/Llama-3.2-1B-Instruct",
|
24 |
-
device_map="auto",
|
25 |
-
torch_dtype="auto",
|
26 |
-
)
|
27 |
|
28 |
#chatbot = pipeline(model="facebook/blenderbot-400M-distill")
|
29 |
|
@@ -32,13 +32,13 @@ response_list = []
|
|
32 |
|
33 |
|
34 |
def vanilla_chatbot(message, history):
|
35 |
-
inputs = tokenizer(message, return_tensors="pt").to("cpu")
|
36 |
-
with torch.no_grad():
|
37 |
-
|
38 |
-
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
39 |
-
|
40 |
|
41 |
-
|
42 |
|
43 |
demo_chatbot = gr.ChatInterface(vanilla_chatbot, title="Vanilla Chatbot", description="Enter text to start chatting.")
|
44 |
|
|
|
13 |
import torch
|
14 |
|
15 |
|
16 |
+
chatbot = pipeline(model="microsoft/Phi-3.5-mini-instruct")
|
17 |
#token = os.getenv("HF_TOKEN")
|
18 |
+
#login(token = os.getenv('HF_TOKEN'))
|
19 |
#chatbot = pipeline(model="meta-llama/Llama-3.2-1B")
|
20 |
|
21 |
+
#tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-1B-Instruct")
|
22 |
+
#model = AutoModelForCausalLM.from_pretrained(
|
23 |
+
# "meta-llama/Llama-3.2-1B-Instruct",
|
24 |
+
# device_map="auto",
|
25 |
+
# torch_dtype="auto",
|
26 |
+
#)
|
27 |
|
28 |
#chatbot = pipeline(model="facebook/blenderbot-400M-distill")
|
29 |
|
|
|
32 |
|
33 |
|
34 |
def vanilla_chatbot(message, history):
|
35 |
+
#inputs = tokenizer(message, return_tensors="pt").to("cpu")
|
36 |
+
#with torch.no_grad():
|
37 |
+
# outputs = model.generate(inputs.input_ids, max_length=100)
|
38 |
+
#return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
39 |
+
conversation = chatbot(message)
|
40 |
|
41 |
+
return conversation[0]['generated_text']
|
42 |
|
43 |
demo_chatbot = gr.ChatInterface(vanilla_chatbot, title="Vanilla Chatbot", description="Enter text to start chatting.")
|
44 |
|