import gradio as gr from sentence_transformers import SentenceTransformer import weaviate from weaviate.classes.query import MetadataQuery from weaviate.classes.init import Auth from dotenv import load_dotenv import os # Load environment variables load_dotenv() # Initialize the model model = SentenceTransformer('all-MiniLM-L6-v2') # Move client initialization into search function def get_client(): return weaviate.connect_to_weaviate_cloud( cluster_url=os.getenv("WEAVIATE_URL"), auth_credentials=Auth.api_key(os.getenv("WEAVIATE_API_KEY")) ) def search_images(query): # Create client connection client = get_client() lux_collection = client.collections.get("LuxData") # Encode the query and search query_vector = model.encode(query) results = lux_collection.query.near_vector( near_vector=query_vector.tolist(), limit=20, return_metadata=MetadataQuery(distance=True) ) # Format results for display formatted_results = [] for result in results.objects: formatted_results.append({ "distance": round(result.metadata.distance, 2), "label": result.properties['label'], "lux_url": result.properties['lux_url'], "image_url": result.properties.get('image_url', "No image available") }) # Close the client connection client.close() return formatted_results # Create Gradio interface def create_interface(): with gr.Blocks() as demo: gr.Markdown("# Lux Semantic Search") with gr.Row(): query_input = gr.Textbox(label="Enter your search query") search_button = gr.Button("Search") results_table = gr.HTML(label="Search Results") def on_search(query): results = search_images(query) # Create HTML table with images html = """
Distance | Label | Lux URL | Image |
---|---|---|---|
{r["distance"]} | {r["label"]} | {r["lux_url"]} | {image_cell} |