Spaces:
Sleeping
Sleeping
import os | |
import json | |
import gradio as gr | |
import streamlit as st | |
from huggingface_hub import HfApi, login | |
from dotenv import load_dotenv | |
from download_repo import download_gitlab_repo_to_hfspace | |
from process_repo import extract_repo_files | |
from chunking import chunk_pythoncode_and_add_metadata, chunk_text_and_add_metadata | |
from vectorstore import setup_vectorstore | |
from llm import get_groq_llm | |
from kadi_apy_bot import KadiAPYBot | |
from repo_versions import store_message_from_json | |
# Load environment variables from .env file | |
load_dotenv() | |
# Load configuration from JSON file | |
with open("config.json", "r") as file: | |
config = json.load(file) | |
GROQ_API_KEY = os.environ["GROQ_API_KEY"] | |
HF_TOKEN = os.environ["HF_Token"] | |
VECTORSTORE_DIRECTORY = config["vectorstore_directory"] | |
CHUNK_SIZE = config["chunking"]["chunk_size"] | |
CHUNK_OVERLAP = config["chunking"]["chunk_overlap"] | |
EMBEDDING_MODEL_NAME = config["embedding_model"]["name"] | |
EMBEDDING_MODEL_VERSION = config["embedding_model"]["version"] | |
LLM_MODEL_NAME = config["llm_model"]["name"] | |
LLM_MODEL_TEMPERATURE = config["llm_model"]["temperature"] | |
GITLAB_API_URL = config["gitlab"]["api_url"] | |
GITLAB_PROJECT_ID = config["gitlab"]["project id"] | |
GITLAB_PROJECT_VERSION = config["gitlab"]["project version"] | |
DATA_DIR = config["data_dir"] | |
HF_SPACE_NAME = config["hf_space_name"] | |
login(HF_TOKEN) | |
hf_api = HfApi() | |
def initialize(): | |
global kadiAPY_bot | |
download_gitlab_repo_to_hfspace(GITLAB_API_URL, GITLAB_PROJECT_ID, GITLAB_PROJECT_VERSION, DATA_DIR, hf_api, HF_SPACE_NAME) | |
code_texts, code_references = extract_repo_files(DATA_DIR, ['kadi_apy'], []) | |
doc_texts, doc_references = extract_repo_files(DATA_DIR, ['docs'], []) | |
print("Length of code_texts: ", len(code_texts)) | |
print("Length of doc_files: ", len(doc_texts)) | |
code_chunks = chunk_pythoncode_and_add_metadata(code_texts, code_references) | |
doc_chunks = chunk_text_and_add_metadata(doc_texts, doc_references, CHUNK_SIZE, CHUNK_OVERLAP) | |
print(f"Total number of code_chunks: {len(code_chunks)}") | |
print(f"Total number of doc_chunks: {len(doc_chunks)}") | |
vectorstore = setup_vectorstore(doc_chunks + code_chunks, EMBEDDING_MODEL_NAME, VECTORSTORE_DIRECTORY) | |
llm = get_groq_llm(LLM_MODEL_NAME, LLM_MODEL_TEMPERATURE, GROQ_API_KEY) | |
kadiAPY_bot = KadiAPYBot(llm, vectorstore) | |
initialize() | |
import streamlit as st | |
def main(): | |
st.set_page_config(page_title="KadiAPY - AI Coding-Assistant", layout="wide") | |
st.title("KadiAPY - AI Coding-Assistant") | |
st.markdown("AI assistant for KadiAPY based on RAG architecture powered by LLM") | |
tab1, tab2 = st.tabs(["KadiAPY - AI Assistant", "Try Asking"]) | |
with tab1: | |
st.write("### Kadi Bot") | |
if "chat_history" not in st.session_state: | |
st.session_state.chat_history = [] | |
# Chatbot | |
chatbot_placeholder = st.empty() | |
question = st.text_input("Question", placeholder="Type in your question and press Enter") | |
if st.button("Submit"): | |
if question: | |
st.session_state.chat_history.append({"User": question}) | |
response = bot_kadi(question) # Replace `bot_kadi` with your response generation function | |
st.session_state.chat_history.append({"Kadi Bot": response}) | |
if st.button("Clear"): | |
st.session_state.chat_history = [] | |
for exchange in st.session_state.chat_history: | |
for speaker, text in exchange.items(): | |
st.write(f"**{speaker}:** {text}") | |
with tab2: | |
st.write("### Try Asking...") | |
examples = [ | |
"Write me a python script which can convert plain JSON to a Kadi4Mat-compatible extra metadata structure", | |
"I need a method to upload a file to a record. The id of the record is 3", | |
] | |
for example in examples: | |
st.write(f"- {example}") | |
def bot_kadi(question): | |
# Dummy function for the AI response, replace with your logic | |
return "This is a placeholder response from Kadi Bot." | |
if __name__ == "__main__": | |
main() | |