Spaces:
Sleeping
Sleeping
File size: 1,910 Bytes
3d2ccf5 050bcdd 39fa1f9 3252678 3d2ccf5 1b8a80e 296d90c 3d2ccf5 0b96e65 296d90c 5d2b7f0 296d90c c52039d 1b8a80e 38f88ff 06d9d25 38f88ff 89c8fdb d08c883 5d2b7f0 1d1f5ef 5d2b7f0 3252678 3d2ccf5 1b8a80e 3d2ccf5 3252678 c52039d 3252678 c52039d 3252678 c52039d 7c00320 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 |
import streamlit as st
import requests
import os
from streamlit_chat import message
@st.cache
def query(payload):
api_token = os.getenv("api_token")
model_id = "deepset/roberta-base-squad2"
headers = {"Authorization": f"Bearer {api_token}"}
API_URL = f"https://api-inference.huggingface.co/models/{model_id}"
response = requests.post(API_URL, headers=headers, json=payload)
return response.json(), response
context = "To extract information from documents, use sentence similarity task. To do sentiment analysis from tweets, use text classification task. To detect masks from images, use object detection task. To extract information from invoices, use named entity recognition from token classification task."
message_history = [{"text":"Let's find out the best task for your use case! Tell me about your use case :)", "is_user":False}]
for msg in message_history:
message(msg["text"], is_user = msg["is_user"]) # display all the previous message
placeholder = st.empty() # placeholder for latest message
#with placeholder.container():
# last_message = message_history[-1]
# if last_message != "":
# message(last_message["text"], last_message["is_user"]) # display the latest message
input = st.text_input("Ask me 🤗")
message(input, is_user=True) # align's the message to the right
message_history.append({"text":input, "is_user" : True})
data, resp = query(
{
"inputs": {
"question": input,
"context": context,
}
}
)
if resp.status_code == 200:
model_answer = data["answer"]
response_templates = [f"{model_answer} is the best task for this 🤩", f"I think you should use {model_answer} 🪄", f"I think {model_answer} should work for you 🤓"]
bot_answer = random.choice(response_templates)
message_history.append({"text":bot_answer, "is_user" : False})
|