|
import os |
|
import json |
|
import gradio as gr |
|
import zipfile |
|
import tempfile |
|
import requests |
|
import urllib.parse |
|
import io |
|
|
|
from huggingface_hub import HfApi, login |
|
from PyPDF2 import PdfReader |
|
from langchain_huggingface import HuggingFaceEmbeddings |
|
from langchain_community.vectorstores import Chroma |
|
from langchain.text_splitter import RecursiveCharacterTextSplitter |
|
from langchain_groq import ChatGroq |
|
from dotenv import load_dotenv |
|
from langchain.docstore.document import Document |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
with open('config.json') as config_file: |
|
config = json.load(config_file) |
|
|
|
|
|
PERSIST_DIRECTORY = config["persist_directory"] |
|
CHUNK_SIZE = config["chunk_size"] |
|
CHUNK_OVERLAP = config["chunk_overlap"] |
|
EMBEDDING_MODEL_NAME = config["embedding_model"] |
|
LLM_MODEL_NAME = config["llm_model"] |
|
LLM_TEMPERATURE = config["llm_temperature"] |
|
GITLAB_API_URL = config["gitlab_api_url"] |
|
HF_SPACE_NAME = config["hf_space_name"] |
|
REPOSITORY_DIRECTORY = config["repository_directory"] |
|
|
|
GROQ_API_KEY = os.environ["GROQ_API_KEY"] |
|
HF_TOKEN = os.environ["HF_Token"] |
|
|
|
|
|
|
|
login(HF_TOKEN) |
|
api = HfApi() |
|
|
|
def load_project_id(json_file): |
|
with open(json_file, 'r') as f: |
|
data = json.load(f) |
|
return data['project_id'] |
|
|
|
|
|
def download_gitlab_repo(): |
|
print("Start the upload_gitRepository function") |
|
project_id = load_project_id('repository_ids.json') |
|
encoded_project_id = urllib.parse.quote_plus(project_id) |
|
|
|
|
|
archive_url = f"{GITLAB_API_URL}/projects/{encoded_project_id}/repository/archive.zip" |
|
|
|
|
|
response = requests.get(archive_url) |
|
archive_bytes = io.BytesIO(response.content) |
|
|
|
|
|
content_disposition = response.headers.get('content-disposition') |
|
if content_disposition: |
|
filename = content_disposition.split('filename=')[-1].strip('\"') |
|
else: |
|
filename = 'archive.zip' |
|
|
|
|
|
existing_files = api.list_repo_files(repo_id=HF_SPACE_NAME, repo_type='space') |
|
target_path = f"{REPOSITORY_DIRECTORY}/{filename}" |
|
|
|
print(f"Target Path: '{target_path}'") |
|
print(f"Existing Files: {[repr(file) for file in existing_files]}") |
|
|
|
if target_path in existing_files: |
|
print(f"File '{target_path}' already exists in the repository. Skipping upload...") |
|
else: |
|
|
|
print("Uploading File to directory:") |
|
print(f"Archive Bytes: {repr(archive_bytes.getvalue())[:100]}") |
|
print(f"Target Path in Repo: '{target_path}'") |
|
|
|
api.upload_file( |
|
path_or_fileobj=archive_bytes, |
|
path_in_repo=target_path, |
|
repo_id=HF_SPACE_NAME, |
|
repo_type='space' |
|
) |
|
print("Upload complete") |
|
|
|
|
|
def get_all_files_in_folder(temp_dir, partial_path): |
|
|
|
all_files = [] |
|
print("inner method of get all files in folder") |
|
target_dir = os.path.join(temp_dir, partial_path) |
|
print(target_dir) |
|
|
|
for root, dirs, files in os.walk(target_dir): |
|
print(f"Files in current directory ({root}): {files}") |
|
for file in files: |
|
print(f"Processing file: {file}") |
|
all_files.append(os.path.join(root, file)) |
|
|
|
return all_files |
|
|
|
def get_file(temp_dir, file_path): |
|
full_path = os.path.join(temp_dir, file_path) |
|
return full_path |
|
|
|
|
|
def process_directory(directory, partial_paths=None, file_paths=None): |
|
all_texts = [] |
|
file_references = [] |
|
|
|
zip_files = [file for file in os.listdir(directory) if file.endswith('.zip')] |
|
|
|
if not zip_files: |
|
print("No zip file found in the directory.") |
|
return all_texts, file_references |
|
|
|
if len(zip_files) > 1: |
|
print("More than one zip file found.") |
|
return all_texts, file_references |
|
else: |
|
zip_file_path = os.path.join(directory, zip_files[0]) |
|
|
|
|
|
with tempfile.TemporaryDirectory() as tmpdirname: |
|
|
|
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref: |
|
zip_ref.extractall(tmpdirname) |
|
print(f"Extracted {zip_file_path} to {tmpdirname}") |
|
|
|
files = [] |
|
|
|
unzipped_root = os.listdir(tmpdirname) |
|
if len(unzipped_root) == 1 and os.path.isdir(os.path.join(tmpdirname, unzipped_root[0])): |
|
tmpsubdirpath= os.path.join(tmpdirname, unzipped_root[0]) |
|
else: |
|
tmpsubdirpath = tmpdirname |
|
|
|
if partial_paths: |
|
print("Go in partial_paths") |
|
for partial_path in partial_paths: |
|
files += get_all_files_in_folder(tmpsubdirpath, partial_path) |
|
|
|
else: |
|
print("wtf") |
|
for root, _, files_list in os.walk(tmpdirname): |
|
for file in files_list: |
|
files.append(os.path.join(root, file)) |
|
|
|
if file_paths: |
|
print("Go in normal paths") |
|
files += [get_file(tmpsubdirpath, file_path) for file_path in file_paths] |
|
|
|
print(f"Total number of files: {len(files)}") |
|
for file_path in files: |
|
print(f"Paths of files: {files}") |
|
file_ext = os.path.splitext(file_path)[1] |
|
|
|
if os.path.getsize(file_path) == 0: |
|
print(f"Skipping an empty file: {file_path}") |
|
continue |
|
|
|
with open(file_path, 'rb') as f: |
|
if file_ext in ['.rst', '.md', '.txt', '.html', '.json', '.yaml', '.py']: |
|
text = f.read().decode('utf-8') |
|
print(f"Extracted text from {file_path}:\n{text[:50]}...\n") |
|
elif file_ext in ['.svg']: |
|
text = f"SVG file content from {file_path}" |
|
elif file_ext in ['.png', '.ico']: |
|
text = f"Image metadata from {file_path}" |
|
else: |
|
continue |
|
|
|
all_texts.append(text) |
|
file_references.append(file_path) |
|
|
|
print(f"Print the text for testing broooo {all_texts}") |
|
return all_texts, file_references |
|
|
|
import ast |
|
|
|
def get_source_segment(source_lines, node): |
|
start_line, start_col = node.lineno - 1, node.col_offset |
|
end_line = node.end_lineno - 1 if hasattr(node, 'end_lineno') else node.lineno - 1 |
|
end_col = node.end_col_offset if hasattr(node, 'end_col_offset') else len(source_lines[end_line]) |
|
|
|
lines = source_lines[start_line:end_line + 1] |
|
lines[0] = lines[0][start_col:] |
|
lines[-1] = lines[-1][:end_col] |
|
|
|
return ''.join(lines) |
|
|
|
from langchain.schema import Document |
|
|
|
def chunk_python_file_content(content, char_limit=1572): |
|
source_lines = content.splitlines(keepends=True) |
|
|
|
|
|
tree = ast.parse(content) |
|
|
|
chunks = [] |
|
current_chunk = "" |
|
current_chunk_size = 0 |
|
|
|
|
|
class_nodes = [node for node in ast.walk(tree) if isinstance(node, ast.ClassDef)] |
|
function_nodes = [node for node in ast.walk(tree) if isinstance(node, ast.FunctionDef) and not isinstance(node, ast.ClassDef)] |
|
|
|
for class_node in class_nodes: |
|
method_nodes = [node for node in class_node.body if isinstance(node, ast.FunctionDef)] |
|
|
|
if method_nodes: |
|
first_method_start_line = method_nodes[0].lineno - 1 |
|
class_def_lines = source_lines[class_node.lineno - 1:first_method_start_line] |
|
else: |
|
class_def_lines = source_lines[class_node.lineno - 1:class_node.end_lineno] |
|
|
|
class_def = ''.join(class_def_lines) |
|
class_def_size = len(class_def) |
|
|
|
|
|
if current_chunk_size + class_def_size <= char_limit: |
|
current_chunk += f"{class_def.strip()}\n" |
|
current_chunk_size += class_def_size |
|
else: |
|
|
|
if current_chunk: |
|
chunks.append(current_chunk.strip()) |
|
current_chunk = "" |
|
current_chunk_size = 0 |
|
current_chunk += f"{class_def.strip()}\n" |
|
current_chunk_size = class_def_size |
|
|
|
for method_node in method_nodes: |
|
method_def = get_source_segment(source_lines, method_node) |
|
method_def_size = len(method_def) |
|
|
|
|
|
if current_chunk_size + method_def_size <= char_limit: |
|
current_chunk += f"{method_def.strip()}\n" |
|
current_chunk_size += method_def_size |
|
else: |
|
|
|
if current_chunk: |
|
chunks.append(current_chunk.strip()) |
|
current_chunk = "" |
|
current_chunk_size = 0 |
|
current_chunk += f"# This is a class method of class: {class_node.name}\n{method_def.strip()}\n" |
|
current_chunk_size = method_def_size |
|
|
|
for function_node in function_nodes: |
|
function_def = get_source_segment(source_lines, function_node) |
|
function_def_size = len(function_def) |
|
|
|
|
|
if current_chunk_size + function_def_size <= char_limit: |
|
current_chunk += f"{function_def.strip()}\n" |
|
current_chunk_size += function_def_size |
|
else: |
|
|
|
if current_chunk: |
|
chunks.append(current_chunk.strip()) |
|
current_chunk = "" |
|
current_chunk_size = 0 |
|
current_chunk += f"{function_def.strip()}\n" |
|
current_chunk_size = function_def_size |
|
|
|
if current_chunk: |
|
chunks.append(current_chunk.strip()) |
|
|
|
return chunks |
|
|
|
|
|
|
|
|
|
def split_pythoncode_into_chunks(texts, references, chunk_size, chunk_overlap): |
|
chunks = [] |
|
|
|
for text, reference in zip(texts, references): |
|
file_chunks = chunk_python_file_content(text, char_limit=chunk_size) |
|
|
|
for chunk in file_chunks: |
|
document = Document(page_content=chunk, metadata={"source": reference}) |
|
chunks.append(document) |
|
|
|
print(f"Total number of chunks: {len(chunks)}") |
|
return chunks |
|
|
|
|
|
|
|
def split_into_chunks(texts, references, chunk_size, chunk_overlap): |
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) |
|
chunks = [] |
|
|
|
for text, reference in zip(texts, references): |
|
chunks.extend([Document(page_content=chunk, metadata={"source": reference}) for chunk in text_splitter.split_text(text)]) |
|
print(f"Total number of chunks: {len(chunks)}") |
|
return chunks |
|
|
|
|
|
def setup_chroma(chunks, model_name="sentence-transformers/all-mpnet-base-v2", persist_directory="chroma_data"): |
|
embedding_model = HuggingFaceEmbeddings(model_name=model_name) |
|
vectorstore = Chroma.from_documents(chunks, embedding=embedding_model, persist_directory=persist_directory) |
|
return vectorstore |
|
|
|
|
|
def setup_llm(model_name, temperature, api_key): |
|
llm = ChatGroq(model=model_name, temperature=temperature, api_key=api_key) |
|
return llm |
|
|
|
def query_chroma(vectorstore, query, k): |
|
results = vectorstore.similarity_search(query, k=k) |
|
chunks_with_references = [(result.page_content, result.metadata["source"]) for result in results] |
|
|
|
print("\nChosen chunks and their sources for the query:") |
|
for chunk, source in chunks_with_references: |
|
print(f"Source: {source}\nChunk: {chunk}\n") |
|
print("-" * 50) |
|
return chunks_with_references |
|
|
|
def rag_workflow(query): |
|
docs = query_chroma(vectorstore, query, k=10) |
|
context = "\n\n".join([doc for doc, _ in docs]) |
|
references = "\n".join([f"[{i+1}] {ref}" for i, (_, ref) in enumerate(docs)]) |
|
print(f"Context for the query:\n{context}\n") |
|
print(f"References for the query:\n{references}\n") |
|
|
|
prompt = f"""You are an expert python developer. Provide a clear and consice answer based only on the information in the retrieved context. |
|
The retrieved context contains source code and documenation of an api library. |
|
If no related Information is found from the context to answer the query, reply that you do not know. |
|
|
|
Context: |
|
{context} |
|
|
|
Query: |
|
{query} |
|
""" |
|
|
|
|
|
response = llm.invoke(prompt) |
|
return response.content, references |
|
|
|
|
|
def initialize(): |
|
global vectorstore, chunks, llm |
|
|
|
partial_paths = ['kadi_apy/lib/ressources'] |
|
file_paths = [] |
|
|
|
|
|
all_texts, file_references = process_directory(REPOSITORY_DIRECTORY, partial_paths, file_paths) |
|
chunks = split_pythoncode_into_chunks(all_texts, file_references, 512, 0) |
|
|
|
print(f"Total number of chunks: {len(chunks)}") |
|
|
|
|
|
|
|
|
|
initialize() |
|
|
|
|
|
def check_input_text(text): |
|
if not text: |
|
gr.Warning("Please input a question.") |
|
raise TypeError |
|
return True |
|
|
|
def add_text(history, text): |
|
history = history + [(text, None)] |
|
yield history, "" |
|
|
|
|
|
|
|
import gradio as gr |
|
|
|
|
|
def bot_kadi(history): |
|
user_query = history[-1][0] |
|
response, references = rag_workflow(user_query) |
|
history[-1] = (user_query, response) |
|
|
|
|
|
formatted_references = "" |
|
docs = query_chroma(vectorstore, user_query, k=5) |
|
for i, (doc, ref) in enumerate(docs): |
|
formatted_references += f""" |
|
<div style="border: 1px solid #ddd; padding: 10px; margin-bottom: 10px; border-radius: 5px;"> |
|
<h3 style="margin-top: 0;">Reference {i+1}</h3> |
|
<p><strong>Source:</strong> {ref}</p> |
|
<button onclick="var elem = document.getElementById('text-{i}'); var button = this; if (elem.style.display === 'block') {{ elem.style.display = 'none'; button.innerHTML = '▶ show source text'; }} else {{ elem.style.display = 'block'; button.innerHTML = '▼ hide source text'; }}">{{'▶ show source text'}}</button> |
|
<div id="text-{i}" style="display: none;"> |
|
<p><strong>Text:</strong> {doc}</p> |
|
</div> |
|
</div> |
|
""" |
|
|
|
yield history, formatted_references |
|
|
|
def main(): |
|
with gr.Blocks() as demo: |
|
gr.Markdown("## Kadi4Mat - AI Chat-Bot") |
|
gr.Markdown("AI assistant for Kadi4Mat based on RAG architecture powered by LLM") |
|
|
|
with gr.Tab("Kadi4Mat - AI Assistant"): |
|
with gr.Row(): |
|
with gr.Column(scale=10): |
|
chatbot = gr.Chatbot([], elem_id="chatbot", label="Kadi Bot", bubble_full_width=False, show_copy_button=True) |
|
user_txt = gr.Textbox(label="Question", placeholder="Type in your question and press Enter or click Submit") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=1): |
|
submit_btn = gr.Button("Submit", variant="primary") |
|
with gr.Column(scale=1): |
|
clear_btn = gr.Button("Clear", variant="stop") |
|
|
|
gr.Examples( |
|
examples=[ |
|
"Who is working on Kadi4Mat?", |
|
"How do i install the Kadi-Apy library?", |
|
"How do i install the Kadi-Apy library for development?", |
|
"I need a method to upload a file to a record", |
|
], |
|
inputs=user_txt, |
|
outputs=chatbot, |
|
fn=add_text, |
|
label="Try asking...", |
|
cache_examples=False, |
|
examples_per_page=3, |
|
) |
|
|
|
with gr.Column(scale=3): |
|
with gr.Tab("References"): |
|
doc_citation = gr.HTML("<p>References used in answering the question will be displayed below.</p>") |
|
|
|
|
|
|
|
user_txt.submit(check_input_text, user_txt, None).success(add_text, [chatbot, user_txt], [chatbot, user_txt]).then(bot_kadi, [chatbot], [chatbot, doc_citation]) |
|
submit_btn.click(check_input_text, user_txt, None).success(add_text, [chatbot, user_txt], [chatbot, user_txt]).then(bot_kadi, [chatbot], [chatbot, doc_citation]) |
|
clear_btn.click(lambda: None, None, chatbot, queue=False) |
|
|
|
demo.launch() |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |