Spaces:
Sleeping
Sleeping
from llama_index.core.agent import ReActAgent | |
from llama_index.llms.openai import OpenAI | |
from llama_index.core.tools import FunctionTool | |
from opensearchpy import OpenSearch | |
from gradio_client import Client | |
import streamlit as st | |
import json | |
import openai | |
import warnings | |
warnings.filterwarnings('ignore') | |
import os | |
openai_api_key = os.getenv("OPENAI_API_KEY") | |
job_id = os.getenv('job_id') | |
user = os.getenv('USERNAME') | |
password = os.getenv('PASSWORD') | |
host = os.getenv('HOST') | |
port = int(os.getenv('PORT')) | |
auth = (user,password) | |
client = openai.OpenAI(api_key=openai_api_key) | |
# os_client = OpenSearch( | |
# hosts = [{'host': host, 'port': port}], | |
# http_auth = auth, | |
# use_ssl = True, | |
# verify_certs = False | |
# ) | |
# indices = os_client.cat.indices(format="json") | |
# list_of_indeces = [] | |
# for index in indices: | |
# list_of_indeces.append(index['index']) | |
def rag_app(user_input: str) -> str: | |
gr_client = Client("anasmkh/QdrantVectorStore_Llamaindex") | |
result = gr_client.predict( | |
user_input=user_input, | |
api_name="/chat_with_ai" | |
) | |
return result | |
rag_tool = FunctionTool.from_defaults(fn=rag_app) | |
def query_generator(user_input:str) -> str: | |
job = job_id | |
response = client.fine_tuning.jobs.retrieve(job) | |
completion = client.chat.completions.create( | |
model=response.fine_tuned_model, | |
messages=[ | |
{"role": "system", "content": """don't add any other text to sql query;You are a highly skilled assistant trained to translate natural language requests | |
into accurate and efficient OpenSearch JSON queries """ | |
}, | |
{"role": "user", "content": user_input} | |
]) | |
return completion.choices[0].message | |
query_tool = FunctionTool.from_defaults(fn=query_generator) | |
llm = OpenAI(model="gpt-3.5-turbo", temperature=0) | |
agent = ReActAgent.from_tools([query_tool,rag_tool], llm=llm, verbose=True) | |
def implement_query(generated_query): | |
# st.write("Raw Query:", generated_query) | |
if isinstance(generated_query, str): | |
generated_query = generated_query.replace("'", '"') | |
query = json.loads(generated_query) | |
else: | |
query = generated_query | |
# st.write("Validated Query:", query) | |
# response = os_client.search(body=query) | |
# return response | |
# except json.JSONDecodeError as e: | |
# st.error("Error: The generated query is not valid JSON.") | |
# st.write(f"JSONDecodeError Details: {e}") | |
# except Exception as e: | |
# st.error(f"Error executing OpenSearch query: {e}") | |
# st.write(f"Exception Details: {e}") | |
st.subheader('OpenSearch Assistant') | |
user_input = st.text_input("Enter your query:", "") | |
if st.button("Submit"): | |
if user_input: | |
with st.spinner("Processing..."): | |
try: | |
response = agent.chat(user_input) | |
st.success("Query Processed Successfully!") | |
st.subheader("Agent Response:") | |
sources = response.sources | |
for source in sources: | |
st.write('Used Tool: ',source.tool_name) | |
if source.tool_name =='query_generator': | |
st.write(source.raw_output.content) | |
os_response = implement_query(source.raw_output.content) | |
st.subheader('OS Response') | |
st.write(os_response) | |
else: | |
st.write(source.raw_output[0][0][1]) | |
except Exception as e: | |
st.error(f"Error: {e}") | |
else: | |
st.warning("Please enter a query to process.") | |