Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from sentence_transformers import SentenceTransformer
|
3 |
+
from transformers import pipeline
|
4 |
+
import faiss
|
5 |
+
import numpy as np
|
6 |
+
|
7 |
+
# Sample documents (customize these)
|
8 |
+
documents = [
|
9 |
+
"Hugging Face provides a platform called Spaces for deploying ML apps.",
|
10 |
+
"RAG combines document retrieval with generative models.",
|
11 |
+
"FAISS enables efficient similarity search in dense vectors.",
|
12 |
+
"Gradio allows quick creation of ML UIs."
|
13 |
+
]
|
14 |
+
|
15 |
+
# Initialize models and FAISS index
|
16 |
+
embedder = SentenceTransformer('all-MiniLM-L6-v2')
|
17 |
+
generator = pipeline('text2text-generation', model='google/flan-t5-base')
|
18 |
+
|
19 |
+
# Process documents and create FAISS index
|
20 |
+
chunk_embeddings = embedder.encode(documents)
|
21 |
+
dimension = chunk_embeddings.shape[1]
|
22 |
+
index = faiss.IndexFlatL2(dimension)
|
23 |
+
index.add(chunk_embeddings)
|
24 |
+
|
25 |
+
def answer_question(question):
|
26 |
+
# Retrieve relevant chunks
|
27 |
+
question_embed = embedder.encode([question])
|
28 |
+
distances, indices = index.search(question_embed, k=2)
|
29 |
+
context = "\n".join([documents[i] for i in indices[0]])
|
30 |
+
|
31 |
+
# Generate answer
|
32 |
+
prompt = f"Answer based on context: {context}\nQuestion: {question}\nAnswer:"
|
33 |
+
return generator(prompt, max_length=200)[0]['generated_text']
|
34 |
+
|
35 |
+
# Gradio interface
|
36 |
+
interface = gr.Interface(
|
37 |
+
answer_question,
|
38 |
+
inputs=gr.Textbox(label="Ask a question"),
|
39 |
+
outputs=gr.Textbox(label="Answer"),
|
40 |
+
title="RAG Demo"
|
41 |
+
)
|
42 |
+
|
43 |
+
interface.launch()
|