Spaces:
Sleeping
Sleeping
import streamlit as st | |
from openai import OpenAI | |
from app_config import * | |
from app_access_db import * | |
# model = "gpt-3.5-turbo" | |
# model = "gpt-4-turbo" | |
model = "gpt-4o" | |
gpt_base_url = None | |
# model = "lmstudio-community/Meta-Llama-3-8B-Instruct-GGUF" | |
# gpt_base_url = "http://localhost:1234/v1/" | |
# ------------------------------------------------------------------------------------------------ | |
# SIDEBAR | |
# ------------------------------------------------------------------------------------------------ | |
st.sidebar.title('OpenAI Business Chat') | |
st.sidebar.write('This chat bot is build with Tools and Function feature of OpenAI to be able to answer question regarding applications and performance of officers') | |
st.sidebar.markdown(""" | |
### Having a sample database with a structure | |
- application | |
- app_number | |
- amount | |
- amount_paid | |
- state. (APPROVED, REJECTED, PENDING_PAYMENT, PAID) | |
- office_code [FK] | |
- service_code [FK] | |
- date_created | |
- date_paid | |
- date_processed | |
- office | |
- office_name | |
- office_location_code [FK] | |
- location | |
- location_name | |
- location_code | |
- service | |
- service_code | |
- service_name | |
### The chatbot will provide answers from that database | |
- The number of applications rejected is a location during the current month | |
- The trend of applications in particular states, for a location | |
- Any question you think relevant from this DB | |
""") | |
def onchange_openai_key(): | |
print(st.session_state.openai_key) | |
openai_key = st.sidebar.text_input('OpenAI key', on_change=onchange_openai_key, key='openai_key') | |
def submit_openai_key(model=model): | |
if(openai_key == None or openai_key==''): | |
st.sidebar.write('Please provide the key before') | |
return | |
else: | |
client = OpenAI(api_key=openai_key, base_url=gpt_base_url) | |
model = model | |
completion = client.chat.completions.create( | |
model=model, | |
messages=[ | |
{"role": "system", "content": "You are an assistant giving simple and short answer for question of child. No questions, and no explanations"}, | |
{"role": "user", "content": "count from 0 to 10"} | |
], | |
temperature=0 | |
) | |
st.sidebar.write(f'Simple count : {completion.choices[0].message.content}') | |
submit_key = st.sidebar.button(label='Submit', on_click=submit_openai_key) | |
# ------------------------------------------------------------------------------------------------ | |
# CHAT | |
# ------------------------------------------------------------------------------------------------ | |
st.title('OpenAI Business Chat') | |
st.write(f'Ask any question that can be answer by the LLM {model}.') | |
def askQuestion(model=model, question=''): | |
if(openai_key == None or openai_key==''): | |
print('Please provide the key before') | |
return 'LLM API is not defined. Please provide the key before' | |
else: | |
client = OpenAI(api_key=openai_key, base_url=gpt_base_url) | |
model = model | |
completion = client.chat.completions.create( | |
model=model, | |
messages=[ | |
{"role": "system", "content": f'{query_context}'}, | |
{"role": "user", "content": f'{question}'} | |
], | |
temperature=0 | |
) | |
return completion.choices[0].message.content | |
class AssistantMessage: | |
def __init__(self): | |
self.sql : str | |
self.response_data : DataFrame | |
def displayAssistantMessage( assistantMessage: AssistantMessage): | |
with st.chat_message("assistant"): | |
st.code(assistantMessage.sql, language='sql') | |
st.code(assistantMessage.response_data, language='markdown') | |
if assistantMessage.response_data.columns.size == 2: | |
st.bar_chart(assistantMessage.response_data, x=assistantMessage.response_data.columns[0], y=assistantMessage.response_data.columns[1]) | |
if assistantMessage.response_data.columns.size == 1: | |
st.metric(label=assistantMessage.response_data.columns[0], value=f'{assistantMessage.response_data.values[0][0]}') | |
# Initialize chat history | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
# Display chat messages from history on app rerun | |
for message in st.session_state.messages: | |
if message["role"] == "user": | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
elif message["role"] == "assistant": | |
displayAssistantMessage(message["content"]) | |
# React to user input | |
if prompt := st.chat_input("What is up?"): | |
with st.status('Running', expanded=True) as status: | |
# Display user message in chat message container | |
st.chat_message("user").markdown(prompt) | |
# Add user message to chat history | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
response = askQuestion(question=prompt) | |
# st.code(response, language='sql') | |
response_data = run_query(response.replace('```','')) | |
# Display assistant response in chat message container | |
assistanMsg = AssistantMessage() | |
assistanMsg.sql = response.replace('```','') | |
assistanMsg.response_data = response_data | |
displayAssistantMessage(assistanMsg) | |
# with st.chat_message("assistant"): | |
# st.code(response, language='sql') | |
# st.caption(response_data) | |
# st.bar_chart(response_data, x=response_data.columns[0], y=response_data.columns[1]) | |
# Add assistant response to chat history | |
st.session_state.messages.append({"role": "assistant", "content": assistanMsg}) | |
status.update(label='Response of last question', state="complete", expanded=True) | |