File size: 1,431 Bytes
ecba6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1af4a96
ecba6a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1af4a96
ecba6a5
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
from sentence_transformers import SentenceTransformer
from transformers import pipeline
import faiss
import numpy as np

# Sample documents (customize these)
documents = [
    "Hugging Face provides a platform called Spaces for deploying ML apps.",
    "RAG combines document retrieval with generative models.",
    "FAISS enables efficient similarity search in dense vectors.",
    "Gradio allows quick creation of ML UIs."
]

# Initialize models and FAISS index
embedder = SentenceTransformer('all-MiniLM-L6-v2') #converts text to vectors
generator = pipeline('text2text-generation', model='google/flan-t5-base')

# Process documents and create FAISS index
chunk_embeddings = embedder.encode(documents)
dimension = chunk_embeddings.shape[1]
index = faiss.IndexFlatL2(dimension)
index.add(chunk_embeddings)

def answer_question(question):
    # Retrieve relevant chunks
    question_embed = embedder.encode([question])
    distances, indices = index.search(question_embed, k=2)
    context = "\n".join([documents[i] for i in indices[0]])
    
    # Generate answer
    prompt = f"Answer based on context: {context}\nQuestion: {question}\nAnswer:"
    return generator(prompt, max_length=1000)[0]['generated_text']

# Gradio interface
interface = gr.Interface(
    answer_question,
    inputs=gr.Textbox(label="Ask a question"),
    outputs=gr.Textbox(label="Answer"),
    title="RAG Demo"
)

interface.launch()