Spaces:
Sleeping
Sleeping
import os | |
import tempfile | |
import uuid | |
import base64 | |
import io | |
import json | |
import re | |
from datetime import datetime, timedelta | |
# Third-party imports | |
import gradio as gr | |
import groq | |
import numpy as np | |
import pandas as pd | |
import openpyxl | |
import requests | |
import fitz # PyMuPDF | |
from PIL import Image | |
from dotenv import load_dotenv | |
from transformers import AutoProcessor, AutoModelForVision2Seq | |
import torch | |
# LangChain imports | |
from langchain_community.embeddings import HuggingFaceEmbeddings | |
from langchain_community.vectorstores import FAISS | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
# Load environment variables | |
load_dotenv() | |
client = groq.Client(api_key=os.getenv("GROQ_TECH_API_KEY")) | |
embeddings = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2") | |
# Directory to store FAISS indexes | |
FAISS_INDEX_DIR = "faiss_indexes_tech" | |
if not os.path.exists(FAISS_INDEX_DIR): | |
os.makedirs(FAISS_INDEX_DIR) | |
# Dictionary to store user-specific vectorstores | |
user_vectorstores = {} | |
# Load SmolDocling model for image analysis | |
def load_docling_model(): | |
try: | |
processor = AutoProcessor.from_pretrained("ds4sd/SmolDocling-256M-preview") | |
model = AutoModelForVision2Seq.from_pretrained("ds4sd/SmolDocling-256M-preview") | |
return processor, model | |
except Exception as e: | |
print(f"Error loading SmolDocling model: {e}") | |
return None, None | |
# Initialize SmolDocling model | |
docling_processor, docling_model = load_docling_model() | |
# Custom CSS for Tech theme | |
custom_css = """ | |
:root { | |
--primary-color: #4285F4; /* Google Blue */ | |
--secondary-color: #34A853; /* Google Green */ | |
--light-background: #F8F9FA; | |
--dark-text: #202124; | |
--white: #FFFFFF; | |
--border-color: #DADCE0; | |
--code-bg: #F1F3F4; | |
--code-text: #37474F; | |
--error-color: #EA4335; /* Google Red */ | |
--warning-color: #FBBC04; /* Google Yellow */ | |
} | |
body { background-color: var(--light-background); font-family: 'Google Sans', 'Roboto', sans-serif; } | |
.container { max-width: 1200px !important; margin: 0 auto !important; padding: 10px; } | |
.header { background-color: var(--white); border-bottom: 1px solid var(--border-color); padding: 15px 0; margin-bottom: 20px; border-radius: 12px 12px 0 0; box-shadow: 0 1px 2px rgba(0,0,0,0.05); } | |
.header-title { color: var(--primary-color); font-size: 1.8rem; font-weight: 700; text-align: center; } | |
.header-subtitle { color: var(--dark-text); font-size: 1rem; text-align: center; margin-top: 5px; } | |
.chat-container { border-radius: 8px !important; box-shadow: 0 1px 3px rgba(0,0,0,0.1) !important; background-color: var(--white) !important; border: 1px solid var(--border-color) !important; min-height: 500px; } | |
.message-user { background-color: var(--primary-color) !important; color: var(--white) !important; border-radius: 18px 18px 4px 18px !important; padding: 12px 16px !important; margin-left: auto !important; max-width: 80% !important; } | |
.message-bot { background-color: #F1F3F4 !important; color: var(--dark-text) !important; border-radius: 18px 18px 18px 4px !important; padding: 12px 16px !important; margin-right: auto !important; max-width: 80% !important; } | |
.input-area { background-color: var(--white) !important; border-top: 1px solid var(--border-color) !important; padding: 12px !important; border-radius: 0 0 12px 12px !important; } | |
.input-box { border: 1px solid var(--border-color) !important; border-radius: 24px !important; padding: 12px 16px !important; box-shadow: 0 1px 2px rgba(0,0,0,0.05) !important; } | |
.send-btn { background-color: var(--primary-color) !important; border-radius: 24px !important; color: var(--white) !important; padding: 10px 20px !important; font-weight: 500 !important; } | |
.clear-btn { background-color: #F1F3F4 !important; border: 1px solid var(--border-color) !important; border-radius: 24px !important; color: var(--dark-text) !important; padding: 8px 16px !important; font-weight: 500 !important; } | |
.pdf-viewer-container { border-radius: 8px !important; box-shadow: 0 1px 3px rgba(0,0,0,0.1) !important; background-color: var(--white) !important; border: 1px solid var(--border-color) !important; padding: 20px; } | |
.pdf-viewer-image { max-width: 100%; height: auto; border: 1px solid var(--border-color); border-radius: 8px; box-shadow: 0 1px 2px rgba(0,0,0,0.05); } | |
.stats-box { background-color: #E8F0FE; padding: 10px; border-radius: 8px; margin-top: 10px; } | |
.tool-container { background-color: var(--white); border-radius: 8px; box-shadow: 0 1px 3px rgba(0,0,0,0.1); padding: 15px; margin-bottom: 20px; border: 1px solid var(--border-color); } | |
.code-block { background-color: var(--code-bg); color: var(--code-text); padding: 12px; border-radius: 8px; font-family: 'Roboto Mono', monospace; overflow-x: auto; margin: 10px 0; border-left: 3px solid var(--primary-color); } | |
.repo-card { border: 1px solid var(--border-color); padding: 15px; margin: 10px 0; border-radius: 8px; background-color: var(--white); } | |
.repo-name { color: var(--primary-color); font-weight: bold; font-size: 1.1rem; margin-bottom: 5px; } | |
.repo-description { color: var(--dark-text); font-size: 0.9rem; margin-bottom: 10px; } | |
.repo-stats { display: flex; gap: 15px; color: #5F6368; font-size: 0.85rem; } | |
.repo-stat { display: flex; align-items: center; gap: 5px; } | |
.qa-card { border-left: 3px solid var(--secondary-color); padding: 10px 15px; margin: 15px 0; background-color: #F8F9FA; border-radius: 0 8px 8px 0; } | |
.qa-title { font-weight: bold; color: var(--dark-text); margin-bottom: 5px; } | |
.qa-body { color: var(--dark-text); font-size: 0.95rem; margin-bottom: 10px; } | |
.qa-meta { display: flex; justify-content: space-between; color: #5F6368; font-size: 0.85rem; } | |
.tag { background-color: #E8F0FE; color: var(--primary-color); padding: 4px 8px; border-radius: 4px; font-size: 0.8rem; margin-right: 5px; display: inline-block; } | |
.toggle-container { display: flex; align-items: center; margin-bottom: 15px; } | |
.toggle-label { margin-right: 10px; font-weight: 500; } | |
.search-toggle { margin-left: 5px; } | |
.voice-btn { background-color: var(--primary-color) !important; border-radius: 50% !important; width: 44px !important; height: 44px !important; display: flex !important; align-items: center !important; justify-content: center !important; color: var(--white) !important; box-shadow: 0 2px 5px rgba(0,0,0,0.2) !important; } | |
.speak-btn { background-color: var(--secondary-color) !important; border-radius: 24px !important; color: var(--white) !important; padding: 8px 16px !important; font-weight: 500 !important; margin-left: 10px !important; } | |
.audio-controls { display: flex; align-items: center; margin-top: 10px; } | |
/* Audio Visualization Elements */ | |
.audio-visualization { | |
display: flex; | |
align-items: center; | |
justify-content: center; | |
gap: 4px; | |
height: 40px; | |
padding: 10px; | |
background-color: rgba(0,0,0,0.05); | |
border-radius: 12px; | |
margin: 10px 0; | |
} | |
.audio-bar { | |
width: 3px; | |
background-color: var(--accent-color); | |
border-radius: 2px; | |
height: 5px; | |
transition: height 0.1s ease; | |
} | |
.audio-status { | |
font-size: 0.85rem; | |
color: var(--secondary-color); | |
text-align: center; | |
margin-top: 5px; | |
font-style: italic; | |
} | |
.recording-indicator { | |
width: 12px; | |
height: 12px; | |
border-radius: 50%; | |
background-color: #ff4b4b; | |
margin-right: 8px; | |
animation: blink 1s infinite; | |
} | |
.playing-indicator { | |
width: 12px; | |
height: 12px; | |
border-radius: 50%; | |
background-color: #4bff4b; | |
margin-right: 8px; | |
animation: pulse 1s infinite; | |
} | |
@keyframes blink { | |
0% { opacity: 1; } | |
50% { opacity: 0.4; } | |
100% { opacity: 1; } | |
} | |
@keyframes pulse { | |
0% { transform: scale(1); } | |
50% { transform: scale(1.2); } | |
100% { transform: scale(1); } | |
} | |
.file-upload-enhancement .file-preview { | |
max-height: 200px; | |
overflow: auto; | |
border: 1px solid var(--border-color); | |
border-radius: 8px; | |
padding: 10px; | |
margin-top: 10px; | |
background-color: rgba(0,0,0,0.02); | |
} | |
.excel-preview-table { | |
width: 100%; | |
border-collapse: collapse; | |
font-size: 0.85rem; | |
} | |
.excel-preview-table th, .excel-preview-table td { | |
border: 1px solid #ddd; | |
padding: 4px 8px; | |
text-align: left; | |
} | |
.excel-preview-table th { | |
background-color: var(--secondary-color); | |
color: white; | |
} | |
.excel-preview-table tr:nth-child(even) { | |
background-color: rgba(0,0,0,0.03); | |
} | |
""" | |
# Function to process PDF files | |
def process_pdf(pdf_file): | |
if pdf_file is None: | |
return None, "No file uploaded", {"page_images": [], "total_pages": 0, "total_words": 0} | |
try: | |
session_id = str(uuid.uuid4()) | |
with tempfile.NamedTemporaryFile(suffix=".pdf", delete=False) as temp_file: | |
temp_file.write(pdf_file) | |
pdf_path = temp_file.name | |
doc = fitz.open(pdf_path) | |
texts = [page.get_text() for page in doc] | |
page_images = [] | |
for page in doc: | |
pix = page.get_pixmap() | |
img_bytes = pix.tobytes("png") | |
img_base64 = base64.b64encode(img_bytes).decode("utf-8") | |
page_images.append(img_base64) | |
total_pages = len(doc) | |
total_words = sum(len(text.split()) for text in texts) | |
doc.close() | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) | |
chunks = text_splitter.create_documents(texts) | |
vectorstore = FAISS.from_documents(chunks, embeddings) | |
index_path = os.path.join(FAISS_INDEX_DIR, session_id) | |
vectorstore.save_local(index_path) | |
user_vectorstores[session_id] = vectorstore | |
os.unlink(pdf_path) | |
pdf_state = {"page_images": page_images, "total_pages": total_pages, "total_words": total_words} | |
return session_id, f"β Successfully processed {len(chunks)} text chunks from your PDF", pdf_state | |
except Exception as e: | |
if "pdf_path" in locals() and os.path.exists(pdf_path): | |
os.unlink(pdf_path) | |
return None, f"Error processing PDF: {str(e)}", {"page_images": [], "total_pages": 0, "total_words": 0} | |
# New function to process Excel files | |
def process_excel(excel_file): | |
if excel_file is None: | |
return None, "No file uploaded", {"data_preview": "", "total_sheets": 0, "total_rows": 0} | |
try: | |
session_id = str(uuid.uuid4()) | |
with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as temp_file: | |
temp_file.write(excel_file) | |
excel_path = temp_file.name | |
# Read Excel file with pandas | |
excel_data = pd.ExcelFile(excel_path) | |
sheet_names = excel_data.sheet_names | |
all_texts = [] | |
total_rows = 0 | |
# Process each sheet | |
for sheet in sheet_names: | |
df = pd.read_excel(excel_path, sheet_name=sheet) | |
total_rows += len(df) | |
# Convert dataframe to text for vectorization | |
sheet_text = f"Sheet: {sheet}\n" | |
sheet_text += df.to_string(index=False) | |
all_texts.append(sheet_text) | |
# Generate HTML preview of first sheet | |
first_df = pd.read_excel(excel_path, sheet_name=0) | |
preview_rows = min(10, len(first_df)) | |
data_preview = first_df.head(preview_rows).to_html(classes="excel-preview-table", index=False) | |
# Process for vectorstore | |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) | |
chunks = text_splitter.create_documents(all_texts) | |
vectorstore = FAISS.from_documents(chunks, embeddings) | |
index_path = os.path.join(FAISS_INDEX_DIR, session_id) | |
vectorstore.save_local(index_path) | |
user_vectorstores[session_id] = vectorstore | |
os.unlink(excel_path) | |
excel_state = {"data_preview": data_preview, "total_sheets": len(sheet_names), "total_rows": total_rows} | |
return session_id, f"β Successfully processed {len(chunks)} text chunks from Excel file", excel_state | |
except Exception as e: | |
if "excel_path" in locals() and os.path.exists(excel_path): | |
os.unlink(excel_path) | |
return None, f"Error processing Excel file: {str(e)}", {"data_preview": "", "total_sheets": 0, "total_rows": 0} | |
# Function to analyze image using SmolDocling | |
def analyze_image(image_file): | |
if image_file is None: | |
return "No image uploaded. Please upload an image to analyze." | |
if docling_processor is None or docling_model is None: | |
return "SmolDocling model not loaded. Please check your installation." | |
try: | |
# Process the image - image_file is a filepath string from Gradio | |
image = Image.open(image_file) | |
# Use the SmolDocling model | |
inputs = docling_processor(images=image, return_tensors="pt") | |
with torch.no_grad(): | |
outputs = docling_model.generate( | |
**inputs, | |
max_new_tokens=512, | |
temperature=0.1, | |
do_sample=False | |
) | |
# Decode the output | |
result = docling_processor.batch_decode(outputs, skip_special_tokens=True)[0] | |
# Format the result for display with technical emphasis | |
analysis = f"## Technical Document Analysis Results\n\n{result}\n\n" | |
analysis += "### Technical Insights\n\n" | |
analysis += "* The analysis provides technical information extracted from the document image.\n" | |
analysis += "* Consider this information as a starting point for further technical investigation.\n" | |
analysis += "* For code snippets or technical specifications, verify accuracy before implementation.\n" | |
return analysis | |
except Exception as e: | |
return f"Error analyzing image: {str(e)}" | |
# Function to handle different file types | |
def process_file(file_data, file_type): | |
if file_data is None: | |
return None, "No file uploaded", None | |
if file_type == "pdf": | |
return process_pdf(file_data) | |
elif file_type == "excel": | |
return process_excel(file_data) | |
elif file_type == "image": | |
# For image files, we'll just use them directly for analysis | |
# But we'll return a session ID to maintain consistency | |
session_id = str(uuid.uuid4()) | |
return session_id, "β Image file ready for analysis", None | |
else: | |
return None, "Unsupported file type", None | |
# Function for speech-to-text conversion | |
def speech_to_text(): | |
try: | |
r = sr.Recognizer() | |
with sr.Microphone() as source: | |
r.adjust_for_ambient_noise(source) | |
audio = r.listen(source) | |
text = r.recognize_google(audio) | |
return text | |
except sr.UnknownValueError: | |
return "Could not understand audio. Please try again." | |
except sr.RequestError as e: | |
return f"Error with speech recognition service: {e}" | |
except Exception as e: | |
return f"Error converting speech to text: {str(e)}" | |
# Function for text-to-speech conversion | |
def text_to_speech(text, history): | |
if not text or not history: | |
return None | |
try: | |
# Get the last bot response | |
last_response = history[-1][1] | |
# Convert text to speech | |
tts = pyttsx3.init() | |
tts.setProperty('rate', 150) | |
tts.setProperty('volume', 0.9) | |
tts.save_to_file(last_response, "temp_output.mp3") | |
tts.runAndWait() | |
return "temp_output.mp3" | |
except Exception as e: | |
print(f"Error in text-to-speech: {e}") | |
return None | |
# Function to generate chatbot responses with Tech theme | |
def generate_response(message, session_id, model_name, history, web_search_enabled=True): | |
if not message: | |
return history | |
try: | |
context = "" | |
if session_id and session_id in user_vectorstores: | |
vectorstore = user_vectorstores[session_id] | |
docs = vectorstore.similarity_search(message, k=3) | |
if docs: | |
context = "\n\nRelevant information from uploaded PDF:\n" + "\n".join(f"- {doc.page_content}" for doc in docs) | |
# Check if it's a GitHub repo search and web search is enabled | |
if web_search_enabled and re.match(r'^/github\s+.+', message, re.IGNORECASE): | |
query = re.sub(r'^/github\s+', '', message, flags=re.IGNORECASE) | |
repo_results = search_github_repos(query) | |
if repo_results: | |
response = "**GitHub Repository Search Results:**\n\n" | |
for repo in repo_results[:3]: # Limit to top 3 results | |
response += f"**[{repo['name']}]({repo['html_url']})**\n" | |
if repo['description']: | |
response += f"{repo['description']}\n" | |
response += f"β {repo['stargazers_count']} | π΄ {repo['forks_count']} | Language: {repo['language'] or 'Not specified'}\n" | |
response += f"Updated: {repo['updated_at'][:10]}\n\n" | |
history.append((message, response)) | |
return history | |
else: | |
history.append((message, "No GitHub repositories found for your query.")) | |
return history | |
# Check if it's a Stack Overflow search and web search is enabled | |
if web_search_enabled and re.match(r'^/stack\s+.+', message, re.IGNORECASE): | |
query = re.sub(r'^/stack\s+', '', message, flags=re.IGNORECASE) | |
qa_results = search_stackoverflow(query) | |
if qa_results: | |
response = "**Stack Overflow Search Results:**\n\n" | |
for qa in qa_results[:3]: # Limit to top 3 results | |
response += f"**[{qa['title']}]({qa['link']})**\n" | |
response += f"Score: {qa['score']} | Answers: {qa['answer_count']}\n" | |
if 'tags' in qa and qa['tags']: | |
response += f"Tags: {', '.join(qa['tags'][:5])}\n" | |
response += f"Asked: {qa['creation_date']}\n\n" | |
history.append((message, response)) | |
return history | |
else: | |
history.append((message, "No Stack Overflow questions found for your query.")) | |
return history | |
# Check if it's a code explanation request | |
code_match = re.search(r'/explain\s+```(?:.+?)?\n(.+?)```', message, re.DOTALL) | |
if code_match: | |
code = code_match.group(1).strip() | |
explanation = explain_code(code) | |
history.append((message, explanation)) | |
return history | |
system_prompt = "You are a technical assistant specializing in software development, programming, and IT topics." | |
system_prompt += " Format code snippets with proper markdown code blocks with language specified." | |
system_prompt += " For technical explanations, be precise and include examples where helpful." | |
if context: | |
system_prompt += " Use the following context to answer the question if relevant: " + context | |
completion = client.chat.completions.create( | |
model=model_name, | |
messages=[ | |
{"role": "system", "content": system_prompt}, | |
{"role": "user", "content": message} | |
], | |
temperature=0.7, | |
max_tokens=1024 | |
) | |
response = completion.choices[0].message.content | |
history.append((message, response)) | |
return history | |
except Exception as e: | |
history.append((message, f"Error generating response: {str(e)}")) | |
return history | |
# Functions to update PDF viewer | |
def update_pdf_viewer(pdf_state): | |
if not pdf_state["total_pages"]: | |
return 0, None, "No PDF uploaded yet" | |
try: | |
img_data = base64.b64decode(pdf_state["page_images"][0]) | |
img = Image.open(io.BytesIO(img_data)) | |
return pdf_state["total_pages"], img, f"**Total Pages:** {pdf_state['total_pages']}\n**Total Words:** {pdf_state['total_words']}" | |
except Exception as e: | |
print(f"Error decoding image: {e}") | |
return 0, None, "Error displaying PDF" | |
def update_image(page_num, pdf_state): | |
if not pdf_state["total_pages"] or page_num < 1 or page_num > pdf_state["total_pages"]: | |
return None | |
try: | |
img_data = base64.b64decode(pdf_state["page_images"][page_num - 1]) | |
img = Image.open(io.BytesIO(img_data)) | |
return img | |
except Exception as e: | |
print(f"Error decoding image: {e}") | |
return None | |
# GitHub API integration | |
def search_github_repos(query, sort="stars", order="desc", per_page=10): | |
"""Search for GitHub repositories""" | |
try: | |
github_token = os.getenv("GITHUB_TOKEN", "") | |
headers = {} | |
if github_token: | |
headers["Authorization"] = f"token {github_token}" | |
params = { | |
"q": query, | |
"sort": sort, | |
"order": order, | |
"per_page": per_page | |
} | |
response = requests.get( | |
"https://api.github.com/search/repositories", | |
headers=headers, | |
params=params | |
) | |
if response.status_code != 200: | |
print(f"GitHub API Error: {response.status_code} - {response.text}") | |
return [] | |
data = response.json() | |
return data.get("items", []) | |
except Exception as e: | |
print(f"Error in GitHub search: {e}") | |
return [] | |
# Stack Overflow API integration | |
def search_stackoverflow(query, sort="votes", site="stackoverflow", pagesize=10): | |
"""Search for questions on Stack Overflow""" | |
try: | |
params = { | |
"order": "desc", | |
"sort": sort, | |
"site": site, | |
"pagesize": pagesize, | |
"intitle": query | |
} | |
response = requests.get( | |
"https://api.stackexchange.com/2.3/search/advanced", | |
params=params | |
) | |
if response.status_code != 200: | |
print(f"Stack Exchange API Error: {response.status_code} - {response.text}") | |
return [] | |
data = response.json() | |
# Process results to convert Unix timestamps to readable dates | |
for item in data.get("items", []): | |
if "creation_date" in item: | |
item["creation_date"] = datetime.fromtimestamp(item["creation_date"]).strftime("%Y-%m-%d") | |
return data.get("items", []) | |
except Exception as e: | |
print(f"Error in Stack Overflow search: {e}") | |
return [] | |
def get_stackoverflow_answers(question_id, site="stackoverflow"): | |
"""Get answers for a specific question on Stack Overflow""" | |
try: | |
params = { | |
"order": "desc", | |
"sort": "votes", | |
"site": site, | |
"filter": "withbody" # Include the answer body in the response | |
} | |
response = requests.get( | |
f"https://api.stackexchange.com/2.3/questions/{question_id}/answers", | |
params=params | |
) | |
if response.status_code != 200: | |
print(f"Stack Exchange API Error: {response.status_code} - {response.text}") | |
return [] | |
data = response.json() | |
# Process results | |
for item in data.get("items", []): | |
if "creation_date" in item: | |
item["creation_date"] = datetime.fromtimestamp(item["creation_date"]).strftime("%Y-%m-%d") | |
return data.get("items", []) | |
except Exception as e: | |
print(f"Error getting Stack Overflow answers: {e}") | |
return [] | |
def explain_code(code): | |
"""Explain code using LLM""" | |
try: | |
system_prompt = "You are an expert programmer and code reviewer. Your task is to explain the provided code in a clear, concise manner. Include:" | |
system_prompt += "\n1. What the code does (high-level overview)" | |
system_prompt += "\n2. Key functions/components and their purposes" | |
system_prompt += "\n3. Potential issues or optimization opportunities" | |
system_prompt += "\n4. Any best practices that are followed or violated" | |
completion = client.chat.completions.create( | |
model="llama3-70b-8192", # Using more capable model for code explanation | |
messages=[ | |
{"role": "system", "content": system_prompt}, | |
{"role": "user", "content": f"Explain this code:\n```\n{code}\n```"} | |
], | |
temperature=0.3, | |
max_tokens=1024 | |
) | |
explanation = completion.choices[0].message.content | |
return f"**Code Explanation:**\n\n{explanation}" | |
except Exception as e: | |
return f"Error explaining code: {str(e)}" | |
def perform_repo_search(query, language, sort_by, min_stars): | |
"""Perform GitHub repository search with UI parameters""" | |
try: | |
if not query: | |
return "Please enter a search query" | |
# Build the search query with filters | |
search_query = query | |
if language and language != "any": | |
search_query += f" language:{language}" | |
if min_stars and min_stars != "0": | |
search_query += f" stars:>={min_stars}" | |
# Map sort_by to GitHub API parameters | |
sort_param = "stars" | |
if sort_by == "updated": | |
sort_param = "updated" | |
elif sort_by == "forks": | |
sort_param = "forks" | |
results = search_github_repos(search_query, sort=sort_param) | |
if not results: | |
return "No repositories found. Try different search terms." | |
# Format results as markdown | |
markdown = "## GitHub Repository Search Results\n\n" | |
for i, repo in enumerate(results, 1): | |
markdown += f"### {i}. [{repo['full_name']}]({repo['html_url']})\n\n" | |
if repo['description']: | |
markdown += f"{repo['description']}\n\n" | |
markdown += f"**Language:** {repo['language'] or 'Not specified'}\n" | |
markdown += f"**Stars:** {repo['stargazers_count']} | **Forks:** {repo['forks_count']} | **Watchers:** {repo['watchers_count']}\n" | |
markdown += f"**Created:** {repo['created_at'][:10]} | **Updated:** {repo['updated_at'][:10]}\n\n" | |
if repo.get('topics'): | |
markdown += f"**Topics:** {', '.join(repo['topics'])}\n\n" | |
if repo.get('license') and repo['license'].get('name'): | |
markdown += f"**License:** {repo['license']['name']}\n\n" | |
markdown += f"[View Repository]({repo['html_url']}) | [Clone URL]({repo['clone_url']})\n\n" | |
markdown += "---\n\n" | |
return markdown | |
except Exception as e: | |
return f"Error searching for repositories: {str(e)}" | |
def perform_stack_search(query, tag, sort_by): | |
"""Perform Stack Overflow search with UI parameters""" | |
try: | |
if not query: | |
return "Please enter a search query" | |
# Add tag to query if specified | |
if tag and tag != "any": | |
query_with_tag = f"{query} [tag:{tag}]" | |
else: | |
query_with_tag = query | |
# Map sort_by to Stack Exchange API parameters | |
sort_param = "votes" | |
if sort_by == "newest": | |
sort_param = "creation" | |
elif sort_by == "activity": | |
sort_param = "activity" | |
results = search_stackoverflow(query_with_tag, sort=sort_param) | |
if not results: | |
return "No questions found. Try different search terms." | |
# Format results as markdown | |
markdown = "## Stack Overflow Search Results\n\n" | |
for i, question in enumerate(results, 1): | |
markdown += f"### {i}. [{question['title']}]({question['link']})\n\n" | |
# Score and answer stats | |
markdown += f"**Score:** {question['score']} | **Answers:** {question['answer_count']}" | |
if question.get('is_answered'): | |
markdown += " β (Accepted answer available)" | |
markdown += "\n\n" | |
# Tags | |
if question.get('tags'): | |
markdown += "**Tags:** " | |
for tag in question['tags']: | |
markdown += f"`{tag}` " | |
markdown += "\n\n" | |
# Asked info | |
markdown += f"**Asked:** {question['creation_date']} | **Views:** {question.get('view_count', 'N/A')}\n\n" | |
markdown += f"[View Question]({question['link']})\n\n" | |
markdown += "---\n\n" | |
return markdown | |
except Exception as e: | |
return f"Error searching Stack Overflow: {str(e)}" | |
# Gradio interface | |
with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo: | |
current_session_id = gr.State(None) | |
pdf_state = gr.State({"page_images": [], "total_pages": 0, "total_words": 0}) | |
excel_state = gr.State({"data_preview": "", "total_sheets": 0, "total_rows": 0}) | |
file_type = gr.State("none") | |
audio_status = gr.State("Ready") | |
gr.HTML(""" | |
<div class="header"> | |
<div class="header-title">Tech-Vision Enhanced</div> | |
<div class="header-subtitle">Analyze technical documents, spreadsheets, and images with AI</div> | |
</div> | |
""") | |
with gr.Row(elem_classes="container"): | |
with gr.Column(scale=1, min_width=300): | |
with gr.Tabs(): | |
with gr.TabItem("PDF"): | |
pdf_file = gr.File(label="Upload PDF Document", file_types=[".pdf"], type="binary") | |
pdf_upload_button = gr.Button("Process PDF", variant="primary") | |
with gr.TabItem("Excel"): | |
excel_file = gr.File(label="Upload Excel File", file_types=[".xlsx", ".xls"], type="binary") | |
excel_upload_button = gr.Button("Process Excel", variant="primary") | |
with gr.TabItem("Image"): | |
image_input = gr.File( | |
label="Upload Image", | |
file_types=["image"], | |
type="filepath" | |
) | |
analyze_btn = gr.Button("Analyze Image") | |
file_status = gr.Markdown("No file uploaded yet") | |
# Model selector | |
model_dropdown = gr.Dropdown( | |
choices=["llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "gemma-7b-it"], | |
value="llama3-70b-8192", | |
label="Select Groq Model" | |
) | |
with gr.Column(scale=2, min_width=600): | |
with gr.Tabs(): | |
with gr.TabItem("PDF Viewer"): | |
with gr.Column(elem_classes="pdf-viewer-container"): | |
page_slider = gr.Slider(minimum=1, maximum=1, step=1, label="Page Number", value=1) | |
pdf_image = gr.Image(label="PDF Page", type="pil", elem_classes="pdf-viewer-image") | |
pdf_stats = gr.Markdown("No PDF uploaded yet", elem_classes="stats-box") | |
with gr.TabItem("Excel Viewer"): | |
excel_preview = gr.HTML(label="Excel Preview", elem_classes="file-preview") | |
excel_stats = gr.Markdown("No Excel file uploaded yet", elem_classes="stats-box") | |
with gr.TabItem("Image Analysis"): | |
image_preview = gr.Image(label="Image Preview", type="pil") | |
image_analysis_results = gr.Markdown("Upload an image and click 'Analyze Image' to see analysis results") | |
# Audio visualization elements | |
with gr.Row(elem_classes="container"): | |
with gr.Column(): | |
audio_vis = gr.HTML(""" | |
<div class="audio-visualization"> | |
<div class="audio-bar" style="height: 5px;"></div> | |
<div class="audio-bar" style="height: 12px;"></div> | |
<div class="audio-bar" style="height: 18px;"></div> | |
<div class="audio-bar" style="height: 15px;"></div> | |
<div class="audio-bar" style="height: 10px;"></div> | |
<div class="audio-bar" style="height: 20px;"></div> | |
<div class="audio-bar" style="height: 14px;"></div> | |
<div class="audio-bar" style="height: 8px;"></div> | |
</div> | |
""", visible=False) | |
audio_status_display = gr.Markdown("", elem_classes="audio-status") | |
# Chat interface | |
with gr.Row(elem_classes="container"): | |
with gr.Column(scale=2, min_width=600): | |
chatbot = gr.Chatbot( | |
height=400, | |
show_copy_button=True, | |
elem_classes="chat-container", | |
type="messages" # Use the new messages format | |
) | |
with gr.Row(): | |
msg = gr.Textbox( | |
show_label=False, | |
placeholder="Ask about your document or click the microphone to speak...", | |
scale=5 | |
) | |
voice_btn = gr.Button("π€", elem_classes="voice-btn") | |
send_btn = gr.Button("Send", scale=1) | |
with gr.Row(elem_classes="audio-controls"): | |
clear_btn = gr.Button("Clear Conversation") | |
speak_btn = gr.Button("π Speak Response", elem_classes="speak-btn") | |
audio_player = gr.Audio(label="Response Audio", type="filepath", visible=False) | |
# Event Handlers for PDF processing | |
pdf_upload_button.click( | |
lambda x: ("pdf", x), | |
inputs=[pdf_file], | |
outputs=[file_type, file_status] | |
).then( | |
process_pdf, | |
inputs=[pdf_file], | |
outputs=[current_session_id, file_status, pdf_state] | |
).then( | |
update_pdf_viewer, | |
inputs=[pdf_state], | |
outputs=[page_slider, pdf_image, pdf_stats] | |
) | |
# Event Handlers for Excel processing | |
def update_excel_preview(state): | |
if not state: | |
return "", "No Excel file uploaded yet" | |
preview = state.get("data_preview", "") | |
sheets = state.get("total_sheets", 0) | |
rows = state.get("total_rows", 0) | |
stats = f"**Excel Statistics:**\nSheets: {sheets}\nTotal Rows: {rows}" | |
return preview, stats | |
excel_upload_button.click( | |
lambda x: ("excel", x), | |
inputs=[excel_file], | |
outputs=[file_type, file_status] | |
).then( | |
process_excel, | |
inputs=[excel_file], | |
outputs=[current_session_id, file_status, excel_state] | |
).then( | |
update_excel_preview, | |
inputs=[excel_state], | |
outputs=[excel_preview, excel_stats] | |
) | |
# Event Handlers for Image Analysis | |
analyze_btn.click( | |
lambda x: ("image", x), | |
inputs=[image_input], | |
outputs=[file_type, file_status] | |
).then( | |
analyze_image, | |
inputs=[image_input], | |
outputs=[image_analysis_results] | |
).then( | |
lambda x: Image.open(x) if x else None, | |
inputs=[image_input], | |
outputs=[image_preview] | |
) | |
# Chat message handling | |
msg.submit( | |
generate_response, | |
inputs=[msg, current_session_id, model_dropdown, chatbot], | |
outputs=[chatbot] | |
).then(lambda: "", None, [msg]) | |
send_btn.click( | |
generate_response, | |
inputs=[msg, current_session_id, model_dropdown, chatbot], | |
outputs=[chatbot] | |
).then(lambda: "", None, [msg]) | |
# Improved speech-to-text with visual feedback | |
voice_btn.click( | |
speech_to_text, | |
inputs=[audio_status], | |
outputs=[audio_status_display, audio_vis, msg] | |
) | |
# Improved text-to-speech with visual feedback | |
speak_btn.click( | |
text_to_speech, | |
inputs=[audio_status, chatbot], | |
outputs=[audio_status_display, audio_vis, audio_player] | |
).then( | |
lambda x: gr.update(visible=True) if x else gr.update(visible=False), | |
inputs=[audio_player], | |
outputs=[audio_player] | |
) | |
# Page navigation for PDF | |
page_slider.change( | |
update_image, | |
inputs=[page_slider, pdf_state], | |
outputs=[pdf_image] | |
) | |
# Clear conversation and reset UI | |
clear_btn.click( | |
lambda: ( | |
[], None, "No file uploaded yet", | |
{"page_images": [], "total_pages": 0, "total_words": 0}, | |
{"data_preview": "", "total_sheets": 0, "total_rows": 0}, | |
"none", 0, None, "No PDF uploaded yet", "", | |
"No Excel file uploaded yet", None, | |
"Upload an image and click 'Analyze Image' to see results", None, | |
gr.update(visible=False), "Ready" | |
), | |
None, | |
[chatbot, current_session_id, file_status, pdf_state, excel_state, | |
file_type, page_slider, pdf_image, pdf_stats, excel_preview, | |
excel_stats, image_preview, image_analysis_results, audio_player, | |
audio_vis, audio_status_display] | |
) | |
# Add footer with creator attribution | |
gr.HTML(""" | |
<div style="text-align: center; margin-top: 20px; padding: 10px; color: #666; font-size: 0.8rem; border-top: 1px solid #eee;"> | |
Created by Calvin Allen Crawford | |
</div> | |
""") | |
# Launch the app | |
if __name__ == "__main__": | |
demo.launch() |