Programmes commited on
Commit
d6c7dc0
·
verified ·
1 Parent(s): 8ca422c

Delete rag_utils_unsloth.py

Browse files
Files changed (1) hide show
  1. rag_utils_unsloth.py +0 -42
rag_utils_unsloth.py DELETED
@@ -1,42 +0,0 @@
1
- import faiss
2
- import pickle
3
- import numpy as np
4
- import torch
5
- import os
6
-
7
- from sentence_transformers import SentenceTransformer
8
- from unsloth import FastLanguageModel
9
-
10
- def load_faiss_index(index_path="faiss_index/faiss_index.faiss", doc_path="faiss_index/documents.pkl"):
11
- index = faiss.read_index(index_path)
12
- with open(doc_path, "rb") as f:
13
- documents = pickle.load(f)
14
- return index, documents
15
-
16
- def get_embedding_model():
17
- return SentenceTransformer("sentence-transformers/multi-qa-MiniLM-L6-cos-v1")
18
-
19
- def query_index(question, index, documents, model, k=3):
20
- question_embedding = model.encode([question])
21
- _, indices = index.search(np.array(question_embedding).astype("float32"), k)
22
- results = [documents[i] for i in indices[0]]
23
- return results
24
-
25
- def generate_answer(question, context):
26
- model_id = "unsloth/mistral-7b-instruct-v0.1-bnb-4bit"
27
-
28
- model, tokenizer = FastLanguageModel.from_pretrained(
29
- model_name=model_id,
30
- max_seq_length=4096,
31
- dtype="float32", # pour CPU uniquement
32
- load_in_4bit=True,
33
- device_map="auto"
34
- )
35
-
36
- tokenizer.pad_token = tokenizer.eos_token
37
-
38
- prompt = f"Voici un contexte :\n{context}\n\nQuestion : {question}\nRéponse :"
39
- inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True).to(model.device)
40
-
41
- outputs = model.generate(**inputs, max_new_tokens=256, pad_token_id=tokenizer.eos_token_id)
42
- return tokenizer.decode(outputs[0], skip_special_tokens=True)