Spaces:
Sleeping
Sleeping
File size: 4,102 Bytes
6df5c93 df2b26b 6df5c93 21b7541 fcfb36c 125fa0c 9a079fe 6df5c93 0522eea 6df5c93 5cebc05 0b47392 6df5c93 c7fa549 6df5c93 0b47392 5cebc05 f79e678 0ae54ee 899338b 0ae54ee 0522eea 0ae54ee aa10033 0ae54ee a74f77b da0c2cc 5cebc05 cbd9da8 506afb0 5cebc05 2c0c1cb 6df5c93 31d2d4e 5cebc05 31d2d4e 5cebc05 31d2d4e 4b9f2eb 5cebc05 31d2d4e 4b9f2eb cbd9da8 8c715b2 31d2d4e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
import os
import json
import gradio as gr
import streamlit as st
from huggingface_hub import HfApi, login
from dotenv import load_dotenv
from llm import get_groq_llm
from vectorstore import get_chroma_vectorstore
from embeddings import get_SFR_Code_embedding_model
from kadi_apy_bot import KadiAPYBot
# Load environment variables from .env file
load_dotenv()
vectorstore_path = "data/vectorstore"
GROQ_API_KEY = os.environ["GROQ_API_KEY"]
HF_TOKEN = os.environ["HF_Token"]
with open("config.json", "r") as file:
config = json.load(file)
login(HF_TOKEN)
hf_api = HfApi()
# Access the values
LLM_MODEL_NAME = config["llm_model_name"]
LLM_MODEL_TEMPERATURE = float(config["llm_model_temperature"])
def initialize():
global kadiAPY_bot
vectorstore = get_chroma_vectorstore(get_SFR_Code_embedding_model(), vectorstore_path)
llm = get_groq_llm(LLM_MODEL_NAME, LLM_MODEL_TEMPERATURE, GROQ_API_KEY)
kadiAPY_bot = KadiAPYBot(llm, vectorstore)
initialize()
def bot_kadi(history, state):
"""
Handle user input and generate bot response using gr.State().
"""
user_query = history[-1][0]
# Add user query to the bot's state for session-specific history
state["history"].append({"query": user_query, "response": None})
# Process the query with the bot and generate a response
response = kadiAPY_bot.process_query(user_query)
# Save the response back to session state
state["history"][-1]["response"] = response
history[-1] = (user_query, response)
yield history
# Gradio UI
def add_text(history, text, state):
"""
Add user text to history and initialize state if needed.
"""
if "history" not in state:
state["history"] = [] # Initialize session-specific state
history = history + [(text, None)]
yield history, ""
def check_input_text(text):
if not text:
gr.Warning("Please input a question.")
raise TypeError
return True
def main():
with gr.Blocks() as demo:
gr.Markdown("## KadiAPY - AI Coding-Assistant")
gr.Markdown("AI assistant for KadiAPY based on RAG architecture powered by LLM")
with gr.Tab("KadiAPY - AI Assistant"):
with gr.Row():
with gr.Column(scale=10):
chatbot = gr.Chatbot([], elem_id="chatbot", label="Kadi Bot", bubble_full_width=False, show_copy_button=True, height=600)
user_txt = gr.Textbox(label="Question", placeholder="Type in your question and press Enter or click Submit")
# Create session-specific state with gr.State
session_state = gr.State()
with gr.Row():
with gr.Column(scale=1):
submit_btn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
clear_btn = gr.Button("Clear", variant="stop")
gr.Examples(
examples=[
"Write me a python script with which can convert plain JSON to a Kadi4Mat-compatible extra metadata structure",
"I need a method to upload a file to a record. The id of the record is 3",
],
inputs=user_txt,
outputs=chatbot,
fn=add_text,
label="Try asking...",
cache_examples=False,
examples_per_page=3,
)
user_txt.submit(check_input_text, user_txt, None).success(add_text, [chatbot, user_txt, session_state], [chatbot, user_txt]).then(bot_kadi, [chatbot, session_state], [chatbot])
submit_btn.click(check_input_text, user_txt, None).success(add_text, [chatbot, user_txt, session_state], [chatbot, user_txt]).then(bot_kadi, [chatbot, session_state], [chatbot])
clear_btn.click(lambda: None, None, chatbot, queue=False)
demo.launch()
if __name__ == "__main__":
main() |