import os import tempfile import uuid import pandas as pd import logging import json import yaml import time import datetime import asyncio import warnings from pathlib import Path # Konfigurasi logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') # LLM dan indexing from llama_index.core import Settings, VectorStoreIndex, SimpleDirectoryReader, PromptTemplate from llama_index.llms.cerebras import Cerebras from llama_index.embeddings.nomic import NomicEmbedding from llama_index.core.node_parser import MarkdownNodeParser from llama_index.readers.docling import DoclingReader # Speech-to-text dan text-to-speech dengan Groq from groq import Groq # Gradio import gradio as gr import gradio.themes as themes # Suppress warning tokenization warnings.filterwarnings("ignore", message=".*clean_up_tokenization_spaces.*") # --- API Keys & Global Variables --- CEREBRAS_API_KEY = os.getenv("CEREBRAS_API_KEY") GROQ_API_KEY = os.getenv("GROQ_API_KEY") NOMIC_API_KEY = os.getenv("NOMIC_API_KEY") if not CEREBRAS_API_KEY: raise ValueError("CEREBRAS_API_KEY belum diset.") if not GROQ_API_KEY: raise ValueError("GROQ_API_KEY belum diset.") if not NOMIC_API_KEY: raise ValueError("NOMIC_API_KEY belum diset.") # Global cache untuk query engine dokumen global_file_cache = {} # Inisialisasi Groq client (untuk STT dan TTS) logging.info("Inisialisasi Groq client") groq_client = Groq(api_key=GROQ_API_KEY) # --- Fungsi untuk Model dan Indexing --- def load_cerebras_llm(): logging.info("Memuat Cerebras LLM") try: llm = Cerebras(model="llama-3.3-70b", api_key=CEREBRAS_API_KEY) logging.debug("Cerebras LLM berhasil dimuat") return llm except Exception as e: logging.error(f"Error load_cerebras_llm: {e}") raise def create_embedding(): logging.info("Menginisialisasi embedding model dengan NomicEmbedding") try: embed_model = NomicEmbedding( model_name="nomic-embed-text-v1.5", vision_model_name="nomic-embed-vision-v1.5", api_key=NOMIC_API_KEY ) Settings.embed_model = embed_model logging.debug("Embedding model berhasil di-set") return embed_model except Exception as e: logging.error(f"Error create_embedding: {e}") raise # --- Fungsi untuk Memuat Dokumen dan Membuat Query Engine --- def load_documents(file_list): logging.info("Memuat dokumen yang diunggah") if not file_list: logging.error("Tidak ada file yang diunggah.") return "Error: Tidak ada file yang diunggah.", None documents = [] doc_names = [] try: for file_obj in file_list: file_name = os.path.basename(file_obj.name) doc_names.append(file_name) logging.debug(f"Memuat file: {file_name}") try: loader = SimpleDirectoryReader(input_files=[file_obj.name], file_extractor={".xlsx": DoclingReader()}) except Exception: loader = SimpleDirectoryReader(input_files=[file_obj.name]) docs = loader.load_data() for doc in docs: # Menyimpan metadata sumber dokumen doc.metadata["source"] = file_name documents.append(doc) if not documents: logging.error("Tidak ditemukan dokumen yang valid.") return "Tidak ditemukan dokumen yang valid.", None llm = load_cerebras_llm() create_embedding() node_parser = MarkdownNodeParser() # Custom prompt yang memaksa jawaban hanya berdasarkan dokumen custom_prompt = """ You are a helpful assistant that can only answer questions based solely on the provided document context. If the answer is not contained within the document context, respond with "I don't have enough information about that aspect of the document." Context: {context_str} Query: {query_str} Answer:""" qa_prompt_tmpl = PromptTemplate(custom_prompt) index = VectorStoreIndex.from_documents(documents, transformations=[node_parser], show_progress=True) Settings.llm = llm query_engine = index.as_query_engine(streaming=True) query_engine.update_prompts({"response_synthesizer:text_qa_template": qa_prompt_tmpl}) file_key = f"doc-{uuid.uuid4()}" global_file_cache[file_key] = query_engine logging.info(f"Berhasil memuat {len(documents)} dokumen: {', '.join(doc_names)} dengan file_key: {file_key}") return f"Berhasil memuat {len(documents)} dokumen: {', '.join(doc_names)}.", file_key except Exception as e: logging.error(f"Error loading documents: {e}") return f"Error loading documents: {str(e)}", None # --- Fungsi Chat Dokumen --- async def document_chat(file_key: str, prompt: str, audio_file=None, translate_audio: bool=False, history=[]): logging.info(f"Memproses dokumen chat untuk file_key: {file_key} dengan prompt: {prompt}") if file_key not in global_file_cache: logging.error("File key dokumen tidak ditemukan pada cache global.") return history + [("Error", "Silakan muat dokumen terlebih dahulu.")] query_engine = global_file_cache[file_key] try: if audio_file: logging.info("Audio file diterima, memulai transkripsi/terjemahan") transcription = transcribe_or_translate_audio(audio_file, translate=translate_audio) logging.debug(f"Hasil transkripsi: {transcription}") prompt = f"{prompt} {transcription}".strip() response = await asyncio.to_thread(query_engine.query, prompt) answer = str(response) # Tambahkan informasi sumber dokumen if hasattr(response, "get_documents"): docs = response.get_documents() if docs: sources = "\n\n".join([f"Source: {doc.metadata.get('source', 'No source')}" for doc in docs]) answer = answer + "\n\n" + sources return history + [(prompt, answer)] except Exception as e: logging.error(f"Error processing document_chat: {e}") return history + [(prompt, f"Error processing query: {str(e)}")] # --- Fungsi Speech-to-Text --- def transcribe_or_translate_audio(audio_file, translate=False): logging.info(f"Memulai proses {'terjemahan' if translate else 'transkripsi'} audio") try: with open(audio_file, "rb") as file: file_content = file.read() logging.debug("File audio berhasil dibaca") if translate: result = groq_client.audio.translations.create( file=(audio_file, file_content), model="whisper-large-v3", response_format="json", temperature=0.0 ) logging.debug("Terjemahan audio berhasil diproses") return result.text else: result = groq_client.audio.transcriptions.create( file=(audio_file, file_content), model="whisper-large-v3", response_format="json", temperature=0.0 ) logging.debug("Transkripsi audio berhasil diproses") return result.text except Exception as e: logging.error(f"Error processing audio: {e}") return f"Error processing audio: {str(e)}" # --- Fungsi Text-to-Speech (TTS) --- def convert_text_to_speech(text, voice): logging.info("Memulai konversi teks ke suara dengan TTS") model = "playai-tts" response_format = "wav" try: if not text: logging.warning("Input teks kosong, TTS tidak dijalankan.") return None logging.debug(f"Parameter TTS: model={model}, voice={voice}, panjang teks={len(text)} karakter") response = groq_client.audio.speech.create( model=model, voice=voice, input=text, response_format=response_format ) logging.debug("Response TTS diterima dari Groq API") temp_wav = tempfile.NamedTemporaryFile(delete=False, suffix=".wav") temp_wav_path = temp_wav.name temp_wav.close() response.write_to_file(temp_wav_path) if os.path.exists(temp_wav_path): logging.info(f"Audio TTS berhasil disimpan di {temp_wav_path}") else: logging.error("File audio TTS tidak ditemukan setelah disimpan.") return temp_wav_path except Exception as e: logging.error(f"Error converting text to speech: {e}") return f"Error converting text to speech: {str(e)}" # --- Callback Wrapper untuk Chat Dokumen dengan TTS --- def doc_chat_with_tts(prompt, history, file_key, audio_file, translate, voice, enable_tts): logging.info("Memproses document chat dengan TTS (opsional)") # Jika history berupa list of dicts (format Gradio), langsung gunakan history yang ada if history and isinstance(history[0], dict): tuple_history = [] else: tuple_history = history or [] try: updated_history = asyncio.run(document_chat(file_key, prompt, audio_file, translate, tuple_history)) logging.debug("Updated history dari document_chat diterima") except Exception as e: logging.error(f"Error dalam document_chat: {e}") updated_history = tuple_history # Build new_messages agar semua chat history tampil new_messages = [] for entry in updated_history: if isinstance(entry, dict): new_messages.append(entry) elif isinstance(entry, (list, tuple)): if len(entry) == 2: user_msg, assistant_msg = entry new_messages.append({"role": "user", "content": user_msg}) new_messages.append({"role": "assistant", "content": assistant_msg}) else: logging.warning("Entry in history does not have exactly 2 elements, skipping.") else: logging.warning("Unexpected entry type in history, skipping.") # Jika TTS diaktifkan, proses TTS pada pesan asisten terakhir if enable_tts: last_assistant = "" for msg in reversed(new_messages): if msg.get("role") == "assistant": last_assistant = msg.get("content", "") break if last_assistant is None or last_assistant.strip() == "": logging.warning("Tidak ada pesan asisten yang valid untuk TTS.") audio_path = None else: logging.info("Memulai konversi jawaban akhir ke audio dengan TTS") audio_path = convert_text_to_speech(last_assistant, voice) logging.info(f"Audio output dihasilkan: {audio_path}") else: audio_path = None logging.info("TTS tidak diaktifkan, sehingga tidak menghasilkan audio.") return new_messages, audio_path # --- Membangun Antarmuka Gradio --- with gr.Blocks(theme=themes.Base(primary_hue="teal", secondary_hue="teal", neutral_hue="slate")) as demo: # Chat interface doc_chat_history = gr.Chatbot(label="Riwayat Chat", type="messages") doc_audio_output = gr.Audio(label="Audio Output", type="filepath") doc_voice = gr.Dropdown(label="Pilih Suara untuk TTS", choices=["Arista-PlayAI", "Atlas-PlayAI", "Basil-PlayAI", "Briggs-PlayAI", "Calum-PlayAI", "Celeste-PlayAI", "Cheyenne-PlayAI", "Chip-PlayAI", "Cillian-PlayAI", "Deedee-PlayAI", "Fritz-PlayAI", "Gail-PlayAI", "Indigo-PlayAI", "Mamaw-PlayAI", "Mason-PlayAI", "Mikail-PlayAI", "Mitch-PlayAI", "Quinn-PlayAI", "Thunder-PlayAI"], value="Fritz-PlayAI") # Checkbox untuk mengaktifkan TTS enable_tts = gr.Checkbox(label="Aktifkan TTS", value=True) # Muat dokumen with gr.Row(): doc_file_input = gr.File(label="Unggah Dokumen", file_count="multiple") load_doc_btn = gr.Button("Muat Dokumen") doc_load_status = gr.Textbox(label="Status Dokumen") doc_file_key = gr.State() with gr.Row(): doc_chat_input = gr.Textbox(label="Masukkan Pertanyaan") doc_audio_input = gr.Microphone(label="Record", type="filepath") doc_translate = gr.Checkbox(label="Terjemahkan Audio ke Bahasa Inggris", value=False) # Callback untuk memuat dokumen def load_doc(files): logging.info("Callback load_doc dipanggil") status, file_key = load_documents(files) return status, file_key load_doc_btn.click(load_doc, inputs=[doc_file_input], outputs=[doc_load_status, doc_file_key]) # Callback untuk proses chat def process_doc_chat(prompt, history, file_key, audio_file, translate, voice, enable_tts): logging.info("Callback process_doc_chat dipanggil") return doc_chat_with_tts(prompt, history, file_key, audio_file, translate, voice, enable_tts) doc_chat_input.submit(process_doc_chat, inputs=[doc_chat_input, doc_chat_history, doc_file_key, doc_audio_input, doc_translate, doc_voice, enable_tts], outputs=[doc_chat_history, doc_audio_output]) doc_audio_input.change(process_doc_chat, inputs=[doc_chat_input, doc_chat_history, doc_file_key, doc_audio_input, doc_translate, doc_voice, enable_tts], outputs=[doc_chat_history, doc_audio_output]) # Tombol untuk mengosongkan cache global clear_btn = gr.Button("Clear All") def clear_all(): global global_file_cache global_file_cache = {} return "Cache dikosongkan." clear_status = gr.Textbox(label="Clear Status") clear_btn.click(clear_all, outputs=[clear_status]) demo.queue() demo.launch(debug=True)