Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import pipeline
|
3 |
import torch
|
4 |
|
5 |
#Sidebar menu
|
@@ -20,13 +20,37 @@ if time_series:
|
|
20 |
|
21 |
|
22 |
if chatbot:
|
|
|
23 |
st.header("Chat with me.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
|
25 |
-
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
text = st.text_area("Food security is a global challenge. Let's work together to find solutions. How can I help you today?")
|
28 |
|
|
|
|
|
29 |
if text:
|
30 |
out = pipe(text)
|
31 |
st.write(out)
|
32 |
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCasualLM
|
3 |
import torch
|
4 |
|
5 |
#Sidebar menu
|
|
|
20 |
|
21 |
|
22 |
if chatbot:
|
23 |
+
|
24 |
st.header("Chat with me.")
|
25 |
+
text = st.text_area("Food security is a global challenge. Let's work together to find solutions. How can I help you today?")
|
26 |
+
|
27 |
+
|
28 |
+
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2-9b-it")
|
29 |
+
model = AutoModelForCausalLM.from_pretrained(
|
30 |
+
"google/gemma-2-9b-it",
|
31 |
+
device_map="auto",
|
32 |
+
torch_dtype=torch.bfloat16)
|
33 |
|
34 |
+
|
35 |
|
36 |
+
if text:
|
37 |
+
input_ids = tokenizer(text, return_tensors="pt").to("cuda")
|
38 |
+
outputs = model.generate(**input_ids)
|
39 |
+
st.write(print(tokenizer.decode(outputs[0])))
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
'''
|
45 |
+
if chatbot:
|
46 |
+
|
47 |
+
st.header("Chat with me.")
|
48 |
text = st.text_area("Food security is a global challenge. Let's work together to find solutions. How can I help you today?")
|
49 |
|
50 |
+
pipe = pipeline("question-answering", model=model)
|
51 |
+
|
52 |
if text:
|
53 |
out = pipe(text)
|
54 |
st.write(out)
|
55 |
|
56 |
+
'''
|