Spaces:
Sleeping
Sleeping
import streamlit as st | |
import os | |
import urllib.request | |
import json | |
import ssl | |
from menu import menu | |
menu() | |
# Retrieve Answer through API ----------------------------------------------- | |
def chat_api(history, question): | |
data = {'chat_history': history,"question": question} | |
body = str.encode(json.dumps(data)) | |
url = st.secrets["url"] | |
api_key = st.secrets["ms_api_key"] | |
print(url) | |
print(api_key) | |
# Replace this with the primary/secondary key, AMLToken, or Microsoft Entra ID token for the endpoint | |
if not api_key: | |
raise Exception("A key should be provided to invoke the endpoint") | |
headers = {'Content-Type':'application/json', 'Accept': 'application/json', 'Authorization':('Bearer '+ api_key)} | |
print(headers) | |
print(body) | |
req = urllib.request.Request(url, body, headers) | |
response = urllib.request.urlopen(req) | |
result = response.read() | |
answer = json.loads(result)["answer"] | |
return(answer) | |
# Header UI ------------------------------------------------------------------ | |
# st.image('./Cognizant_Logo.jpg', width=300) | |
st.header("RCM Copilot", divider = 'blue') | |
# Chat UI -------------------------------------------------------------------- | |
st.write("This chat as access to search the web to answer your quesitons.") | |
st.write("Model: gpt-4o-mini") | |
st.write("Version: V0.1") | |
st.header("", divider = 'blue') | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
# Accept user input | |
if prompt := st.chat_input("What is your question?"): | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
# Display user message in chat message container | |
with st.chat_message("user"): | |
st.markdown(prompt) | |
# Display assistant response in chat message container | |
with st.chat_message("assistant"): | |
message_placeholder = st.empty() | |
full_response = "" | |
with st.status("Getting the answer...") as status: | |
#assistant_response = get_completion(prompt, ft_model) | |
chat_history = st.session_state.messages | |
print(chat_history) | |
assistant_response = chat_api(history = chat_history, question = prompt) | |
full_response = assistant_response | |
status.update(label="Done.", state = "complete") | |
message_placeholder.markdown(full_response) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": full_response}) |