Spaces:
Sleeping
Sleeping
File size: 1,322 Bytes
4b4260f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import faiss
import pickle
from sentence_transformers import SentenceTransformer
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import torch
import numpy as np
def load_faiss_index(index_path="faiss_index/faiss_index.faiss", doc_path="faiss_index/documents.pkl"):
index = faiss.read_index(index_path)
with open(doc_path, "rb") as f:
documents = pickle.load(f)
return index, documents
def get_embedding_model():
return SentenceTransformer("all-MiniLM-L6-v2")
def query_index(question, index, documents, model, k=3):
question_embedding = model.encode([question])
_, indices = index.search(np.array(question_embedding).astype("float32"), k)
results = [documents[i] for i in indices[0]]
return results
def generate_answer(question, context):
model_id = "mistralai/Mistral-7B-Instruct-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.float16)
prompt = f"Voici un contexte :\n{context}\n\nQuestion : {question}\nRéponse :"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
outputs = model.generate(**inputs, max_new_tokens=256)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|