Spaces:
Sleeping
Sleeping
import os | |
import json | |
import gradio as gr | |
import zipfile | |
import tempfile | |
import requests | |
import urllib.parse | |
import io | |
from huggingface_hub import HfApi, login | |
from PyPDF2 import PdfReader | |
from langchain_huggingface import HuggingFaceEmbeddings | |
from langchain_community.vectorstores import Chroma | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain_groq import ChatGroq | |
from dotenv import load_dotenv | |
from langchain.docstore.document import Document | |
from langchain.schema import Document | |
from chunk_python_code import chunk_python_code_with_metadata | |
# Load environment variables from .env file | |
load_dotenv() | |
# Load configuration from JSON file | |
with open('config.json') as config_file: | |
config = json.load(config_file) | |
with open("config2.json", "r") as file: | |
config2 = json.load(file) | |
PERSIST_DOC_DIRECTORY = config["persist_doc_directory"] | |
PERSIST_CODE_DIRECTORY =config["persist_code_directory"] | |
CHUNK_SIZE = config["chunk_size"] | |
CHUNK_OVERLAP = config["chunk_overlap"] | |
EMBEDDING_MODEL_NAME = config["embedding_model"] | |
LLM_MODEL_NAME = config["llm_model"] | |
LLM_TEMPERATURE = config["llm_temperature"] | |
GITLAB_API_URL = config["gitlab_api_url"] | |
HF_SPACE_NAME = config["hf_space_name"] | |
DATA_DIR = config["data_dir"] | |
GROQ_API_KEY = os.environ["GROQ_API_KEY"] | |
HF_TOKEN = os.environ["HF_Token"] | |
login(HF_TOKEN) | |
api = HfApi() | |
def load_project_id(json_file): | |
with open(json_file, 'r') as f: | |
data = json.load(f) | |
return data['project_id'] | |
def download_gitlab_project_by_version(): | |
try: | |
# Load the configuration from config.json | |
# Extract GitLab project information from the config | |
api_url = config2['gitlab']['api_url'] | |
project_id = urllib.parse.quote(config2['gitlab']['project']['id'], safe="") | |
version = config2['gitlab']['project']['version'] | |
# Construct the URL for the release's zip file | |
url = f"{api_url}/projects/{project_id}/repository/archive.zip?sha={version}" | |
# Send GET request to download the zip file | |
response = requests.get(url, stream=True) | |
archive_bytes = io.BytesIO(response.content) | |
print(archive_bytes) | |
if response.status_code == 200: | |
# Extract filename from content-disposition header | |
content_disposition = response.headers.get("content-disposition") | |
if content_disposition and "filename=" in content_disposition: | |
filename = content_disposition.split("filename=")[-1].strip('"') | |
print(filename) | |
# test | |
# target_path = f"{DATA_DIR}/{filename}" | |
# Check if the request was successful | |
if response.status_code == 200: | |
api.upload_file( | |
path_or_fileobj= archive_bytes, | |
path_in_repo= f"{DATA_DIR}/{filename}", | |
repo_id=HF_SPACE_NAME, | |
repo_type='space' | |
) | |
print(f"Release {version} downloaded successfully as {file_path}.") | |
else: | |
print(f"Failed to download the release: {response.status_code} - {response.reason}") | |
print(response.text) | |
except FileNotFoundError: | |
print("The config.json file was not found. Please ensure it exists in the project directory.") | |
except json.JSONDecodeError: | |
print("Failed to parse the config.json file. Please ensure it contains valid JSON.") | |
except Exception as e: | |
print(f"An error occurred: {e}") | |
def download_gitlab_repo(): | |
print("Start the upload_gitRepository function") | |
project_id = load_project_id('repository_ids.json') | |
encoded_project_id = urllib.parse.quote_plus(project_id) | |
# Define the URL to download the repository archive | |
archive_url = f"{GITLAB_API_URL}/projects/{encoded_project_id}/repository/archive.zip" | |
# Download the repository archive | |
response = requests.get(archive_url) | |
archive_bytes = io.BytesIO(response.content) | |
# Retrieve the original file name from the response headers | |
content_disposition = response.headers.get('content-disposition') | |
if content_disposition: | |
filename = content_disposition.split('filename=')[-1].strip('\"') | |
else: | |
filename = 'archive.zip' # Fallback to a default name if not found | |
# Check if the file already exists in the repository | |
existing_files = api.list_repo_files(repo_id=HF_SPACE_NAME, repo_type='space') | |
target_path = f"{DATA_DIR}/{filename}" | |
print(f"Target Path: '{target_path}'") | |
print(f"Existing Files: {[repr(file) for file in existing_files]}") | |
if target_path in existing_files: | |
print(f"File '{target_path}' already exists in the repository. Skipping upload...") | |
else: | |
# Upload the ZIP file to the new folder in the Hugging Face space repository | |
print("Uploading File to directory:") | |
print(f"Archive Bytes: {repr(archive_bytes.getvalue())[:100]}") # Show a preview of bytes | |
print(f"Target Path in Repo: '{target_path}'") | |
api.upload_file( | |
path_or_fileobj=archive_bytes, | |
path_in_repo=target_path, | |
repo_id=HF_SPACE_NAME, | |
repo_type='space' | |
) | |
print("Upload complete") | |
def get_all_files_in_folder(temp_dir, folder_path): | |
all_files = [] | |
print("inner method of get all files in folder") | |
target_dir = os.path.join(temp_dir, folder_path) | |
print(target_dir) | |
for root, dirs, files in os.walk(target_dir): | |
print(f"Files in current directory ({root}): {files}") | |
for file in files: | |
print(f"Processing file: {file}") | |
all_files.append(os.path.join(root, file)) | |
return all_files | |
def get_file(temp_dir, file_path): | |
full_path = os.path.join(temp_dir, file_path) | |
return full_path | |
def process_directory(directory, folder_paths, file_paths): | |
all_texts = [] | |
file_references = [] | |
zip_filename = next((file for file in os.listdir(directory) if file.endswith('.zip')), None) # zip_filename: kadi-apy-master-2a244f1af1483b48f8f9c0d99ce2744a0950c834.zip | |
print("zip_filename:", zip_filename) | |
zip_file_path = os.path.join(directory, zip_filename) # zip_file_path: data/kadi-apy-master-2a244f1af1483b48f8f9c0d99ce2744a0950c834.zip | |
print("zip_file_path:", zip_file_path) | |
# zip_file_path = os.listdir(directory) if file.endswith('.zip') | |
with tempfile.TemporaryDirectory() as tmpdirname: | |
# Unzip the file into the temporary directory | |
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref: | |
zip_ref.extractall(tmpdirname) | |
files = [] | |
print("tmpdirname: " , tmpdirname) # /tmp/tmpux1v52wy | |
unzipped_root = os.listdir(tmpdirname) | |
print("unzipped_root ", unzipped_root) # ['kadi-apy-master-2a244f1af1483b48f8f9c0d99ce2744a0950c834'] | |
tmpsubdirpath= os.path.join(tmpdirname, unzipped_root[0]) # /tmp/tmpux1v52wy/kadi-apy-master-2a244f1af1483b48f8f9c0d99ce2744a0950c834 | |
print("tempsubdirpath: ", tmpsubdirpath) | |
if folder_paths: | |
for folder_path in folder_paths: | |
files += get_all_files_in_folder(tmpsubdirpath, folder_path) | |
if file_paths: | |
files += [get_file(tmpsubdirpath, file_path) for file_path in file_paths] | |
print(f"Total number of files: {len(files)}") | |
for file_path in files: | |
print("111111111:", file_path) | |
file_ext = os.path.splitext(file_path)[1] | |
print("222222222:", file_ext) | |
if os.path.getsize(file_path) == 0: | |
print(f"Skipping an empty file: {file_path}") | |
continue | |
with open(file_path, 'rb') as f: | |
if file_ext in ['.rst', '.py']: | |
text = f.read().decode('utf-8') | |
all_texts.append(text) | |
print("Filepaths brother:", file_path) | |
relative_path = os.path.relpath(file_path, tmpsubdirpath) | |
print("Relative Filepaths brother:", relative_path) | |
file_references.append(relative_path) | |
print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAA: ", relative_path) | |
return all_texts, file_references | |
def split_python_code_into_chunks(texts, file_paths): | |
chunks = [] | |
for text, file_path in zip(texts, file_paths): | |
document_chunks = chunk_python_code_with_metadata(text, file_path) | |
chunks.extend(document_chunks) | |
return chunks | |
# Split text into chunks | |
def split_into_chunks(texts, references, chunk_size, chunk_overlap): | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) | |
chunks = [] | |
for text, reference in zip(texts, references): | |
chunks.extend([Document(page_content=chunk, metadata={"source": reference}) for chunk in text_splitter.split_text(text)]) | |
return chunks | |
# Setup Vectorstore | |
def setup_vectorstore(chunks, model_name, persist_directory): | |
print("Start setup_vectorstore_function") | |
embedding_model = HuggingFaceEmbeddings(model_name=model_name) | |
vectorstore = Chroma.from_documents(chunks, embedding=embedding_model, persist_directory=persist_directory) | |
return vectorstore | |
# Setup LLM | |
def setup_llm(model_name, temperature, api_key): | |
llm = ChatGroq(model=model_name, temperature=temperature, api_key=api_key) | |
return llm | |
def retrieve_from_vectorstore(vectorstore, query, k): | |
results = vectorstore.similarity_search(query, k=k) | |
chunks_with_references = [(result.page_content, result.metadata["source"]) for result in results] | |
# Print the chosen chunks and their sources to the console | |
print("\nChosen chunks and their sources for the query:") | |
for chunk, source in chunks_with_references: | |
print(f"Source: {source}\nChunk: {chunk}\n") | |
print("-" * 50) | |
return chunks_with_references | |
def retrieve_docs_from_vectorstore(vectorstore, query, k): | |
return vectorstore.similarity_search(query, k=k) | |
def format_doc_context(docs): | |
doc_context = "\n\n".join(doc.page_content for doc in docs) | |
print("\nDocument Context for LLM:\n") | |
print(doc_context) # Optional: Print the context for verification | |
return doc_context | |
def rag_workflow(query): | |
retrieved_doc_chunks = retrieve_from_vectorstore (docstore, query, k=5) | |
retrieved_code_chunks = retrieve_from_vectorstore(codestore, query, k=5) | |
# docs = retrieve_docs_from_vectorstore(docstore, query, k=5) | |
# doc_context = format_doc_context(docs) | |
doc_context = "\n\n".join([doc_chunk for doc_chunk, _ in retrieved_doc_chunks]) | |
code_context = "\n\n".join([code_chunk for code_chunk, _ in retrieved_code_chunks]) | |
doc_references = "\n".join([f"[{i+1}] {ref}" for i, (_, ref) in enumerate(retrieved_doc_chunks)]) | |
code_references = "\n".join([f"[{i+1}] {ref}" for i, (_, ref) in enumerate(retrieved_code_chunks)]) | |
print(doc_context) | |
print(code_context) | |
print(doc_references) | |
print(code_references) | |
# print("Document Chunks:\n") | |
# print("\n\n".join(["="*80 + "\n" + doc_chunk for doc_chunk, _ in retrieved_doc_chunks])) | |
# print("\nDocument References:\n") | |
# print(doc_references) | |
# print("\n" + "="*80 + "\n") # Separator between doc and code | |
# print("Code Chunks:\n") | |
# print("\n\n".join(["="*80 + "\n" + code_chunk for code_chunk, _ in retrieved_code_chunks])) | |
# print("\nCode References:\n") | |
# print(code_references) | |
# print(f"Context for the query:\n{doc_context}\n") | |
# print(f"References for the query:\n{references}\n") | |
prompt = f"""You are an expert python developer. You are assisting in generating code for users who wants to make use of "kadi-apy", an API library. | |
"Doc-context:" provides you with information how to use this API library by givnig code examples and code documentation. | |
"Code-context:" provides you information of API methods and classes from the "kadi-apy" library. | |
Based on the retrieved contexts and the guidelines answer the query. | |
General Guidelines: | |
- If no related information is found from the contexts to answer the query, reply that you do not know. | |
Guidelines when generating code: | |
- First display the full code and then follow with a well structured explanation of the generated code. | |
Doc-context: | |
{doc_context} | |
Code-context: | |
{code_context} | |
Query: | |
{query} | |
""" | |
response = llm.invoke(prompt) | |
return response.content | |
def initialize(): | |
global docstore, codestore, chunks, llm | |
download_gitlab_project_by_version() | |
#download_gitlab_repo() | |
code_partial_paths = ['kadi_apy/lib/'] | |
code_file_paths = [] | |
doc_partial_paths = [] | |
doc_partial_paths = ['docs/source/setup/'] | |
doc_file_paths = ['docs/source/usage/lib.rst'] | |
kadiAPY_code_texts, kadiAPY_code_references = process_directory(DATA_DIR, code_partial_paths, code_file_paths) | |
print("LEEEEEEEEEEEENGTH of code_texts: ", len(kadiAPY_code_texts)) | |
kadiAPY_doc_texts, kadiAPY_doc_references = process_directory(DATA_DIR, doc_partial_paths, doc_file_paths) | |
print("LEEEEEEEEEEEENGTH of doc_files: ", len(kadiAPY_doc_texts)) | |
kadiAPY_code_chunks = split_python_code_into_chunks(kadiAPY_code_texts, kadiAPY_code_references) | |
kadiAPY_doc_chunks = split_into_chunks(kadiAPY_doc_texts, kadiAPY_doc_references, CHUNK_SIZE, CHUNK_OVERLAP) | |
print(f"Total number of code_chunks: {len(kadiAPY_code_chunks)}") | |
print(f"Total number of doc_chunks: {len(kadiAPY_doc_chunks)}") | |
docstore = setup_vectorstore(kadiAPY_code_chunks, EMBEDDING_MODEL_NAME, PERSIST_DOC_DIRECTORY) | |
codestore = setup_vectorstore(kadiAPY_doc_chunks, EMBEDDING_MODEL_NAME, PERSIST_CODE_DIRECTORY) | |
llm = setup_llm(LLM_MODEL_NAME, LLM_TEMPERATURE, GROQ_API_KEY) | |
initialize() | |
# Gradio utils | |
def check_input_text(text): | |
if not text: | |
gr.Warning("Please input a question.") | |
raise TypeError | |
return True | |
def add_text(history, text): | |
history = history + [(text, None)] | |
yield history, "" | |
import gradio as gr | |
def bot_kadi(history): | |
user_query = history[-1][0] | |
response = rag_workflow(user_query) | |
history[-1] = (user_query, response) | |
yield history | |
def main(): | |
with gr.Blocks() as demo: | |
gr.Markdown("## Kadi4Mat - AI Chat-Bot") | |
gr.Markdown("AI assistant for Kadi4Mat based on RAG architecture powered by LLM") | |
with gr.Tab("Kadi4Mat - AI Assistant"): | |
with gr.Row(): | |
with gr.Column(scale=10): | |
chatbot = gr.Chatbot([], elem_id="chatbot", label="Kadi Bot", bubble_full_width=False, show_copy_button=True, height=600) | |
user_txt = gr.Textbox(label="Question", placeholder="Type in your question and press Enter or click Submit") | |
with gr.Row(): | |
with gr.Column(scale=1): | |
submit_btn = gr.Button("Submit", variant="primary") | |
with gr.Column(scale=1): | |
clear_btn = gr.Button("Clear", variant="stop") | |
gr.Examples( | |
examples=[ | |
"Who is working on Kadi4Mat?", | |
"How do i install the Kadi-Apy library?", | |
"How do i install the Kadi-Apy library for development?", | |
"I need a method to upload a file to a record", | |
], | |
inputs=user_txt, | |
outputs=chatbot, | |
fn=add_text, | |
label="Try asking...", | |
cache_examples=False, | |
examples_per_page=3, | |
) | |
user_txt.submit(check_input_text, user_txt, None).success(add_text, [chatbot, user_txt], [chatbot, user_txt]).then(bot_kadi, [chatbot], [chatbot]) | |
submit_btn.click(check_input_text, user_txt, None).success(add_text, [chatbot, user_txt], [chatbot, user_txt]).then(bot_kadi, [chatbot], [chatbot]) | |
#user_txt.submit(check_input_text, user_txt, None).success(add_text, [chatbot, user_txt], [chatbot, user_txt]).then(bot_kadi, [chatbot], [chatbot, doc_citation]) | |
#submit_btn.click(check_input_text, user_txt, None).success(add_text, [chatbot, user_txt], [chatbot, user_txt]).then(bot_kadi, [chatbot], [chatbot, doc_citation]) | |
clear_btn.click(lambda: None, None, chatbot, queue=False) | |
demo.launch() | |
if __name__ == "__main__": | |
main() |