Yoxas commited on
Commit
716f829
·
verified ·
1 Parent(s): 9cc49fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -110
app.py CHANGED
@@ -1,126 +1,44 @@
1
- import gradio as gr
2
  import pandas as pd
3
- import numpy as np
4
- from transformers import pipeline, BertTokenizer, BertModel
5
- import faiss
6
  import torch
 
 
7
  import json
8
  import spaces
9
- import logging
10
-
11
- # Set up logging
12
- logging.basicConfig(level=logging.DEBUG)
13
-
14
- # Load CSV data
15
- data = pd.read_csv('RBDx10kstats.csv')
16
-
17
- # Function to safely convert JSON strings to numpy arrays
18
- def safe_json_loads(x):
19
- try:
20
- return np.array(json.loads(x), dtype=np.float32) # Ensure the array is of type float32
21
- except json.JSONDecodeError as e:
22
- logging.error(f"Error decoding JSON: {e}")
23
- return np.array([], dtype=np.float32) # Return an empty array or handle it as appropriate
24
-
25
- # Apply the safe_json_loads function to the embedding column
26
- data['embedding'] = data['embedding'].apply(safe_json_loads)
27
-
28
- # Filter out any rows with empty embeddings
29
- data = data[data['embedding'].apply(lambda x: x.size > 0)]
30
-
31
- # Initialize FAISS index
32
- dimension = len(data['embedding'].iloc[0])
33
- res = faiss.StandardGpuResources() # use a single GPU
34
 
35
- # Create FAISS index
36
- if faiss.get_num_gpus() > 0:
37
- gpu_index = faiss.IndexFlatL2(dimension)
38
- gpu_index = faiss.index_cpu_to_gpu(res, 0, gpu_index) # move to GPU
39
- else:
40
- gpu_index = faiss.IndexFlatL2(dimension) # fall back to CPU
41
 
42
- # Ensure embeddings are stacked as float32
43
- embeddings = np.vstack(data['embedding'].values).astype(np.float32)
44
- logging.debug(f"Embeddings shape: {embeddings.shape}, dtype: {embeddings.dtype}")
45
- gpu_index.add(embeddings)
46
 
47
- # Check if GPU is available
48
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
49
 
50
- # Load QA model
51
- qa_model = pipeline("question-answering", model="distilbert-base-uncased-distilled-squad", device=0 if torch.cuda.is_available() else -1)
52
-
53
- # Load BERT model and tokenizer
54
- tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
55
- model = BertModel.from_pretrained('bert-base-uncased').to(device)
56
-
57
- # Function to embed the question using BERT
58
- def embed_question(question, model, tokenizer):
59
- try:
60
- inputs = tokenizer(question, return_tensors='pt').to(device)
61
- logging.debug(f"Tokenized inputs: {inputs}")
62
- with torch.no_grad():
63
- outputs = model(**inputs)
64
- embedding = outputs.last_hidden_state.mean(dim=1).cpu().numpy().astype(np.float32)
65
- logging.debug(f"Question embedding shape: {embedding.shape}")
66
- logging.debug(f"Question embedding content: {embedding}")
67
- return embedding
68
- except Exception as e:
69
- logging.error(f"Error embedding question: {e}")
70
- raise
71
-
72
- # Function to retrieve the relevant document and generate a response
73
  @spaces.GPU(duration=120)
74
- def retrieve_and_generate(question):
75
- logging.debug(f"Received question: {question}")
76
- try:
77
- # Embed the question
78
- question_embedding = embed_question(question, model, tokenizer)
79
-
80
- # Ensure the embedding is in the correct format for FAISS search
81
- question_embedding = question_embedding.astype(np.float32)
82
 
83
- # Search in FAISS index
84
- try:
85
- logging.debug(f"Searching FAISS index with question embedding: {question_embedding}")
86
- distances, indices = gpu_index.search(question_embedding, k=1)
87
- if len(indices) == 0:
88
- logging.error("No results found in FAISS search.")
89
- return "No relevant document found."
90
- logging.debug(f"Indices found: {indices}, Distances: {distances}")
91
- except Exception as e:
92
- logging.error(f"Error during FAISS search: {e}")
93
- return f"An error occurred during search: {e}"
94
-
95
- # Retrieve the most relevant document
96
- try:
97
- relevant_doc = data.iloc[indices[0][0]]
98
- logging.debug(f"Relevant document: {relevant_doc}")
99
- except Exception as e:
100
- logging.error(f"Error retrieving document: {e}")
101
- return "An error occurred while retrieving the document. Please try again."
102
-
103
- # Use the QA model to generate the answer
104
- try:
105
- context = relevant_doc['Abstract']
106
- response = qa_model(question=question, context=context)
107
- logging.debug(f"Response: {response}")
108
- return response['answer']
109
- except Exception as e:
110
- logging.error(f"Error generating answer: {e}")
111
- return "An error occurred while generating the answer. Please try again."
112
- except Exception as e:
113
- logging.error(f"Error during retrieval and generation: {e}")
114
- return "An error occurred. Please try again."
115
 
116
  # Create a Gradio interface
117
- interface = gr.Interface(
118
- fn=retrieve_and_generate,
119
- inputs=gr.Textbox(lines=2, placeholder="Ask a question about the documents..."),
120
  outputs="text",
121
  title="RAG Chatbot",
122
- description="Ask questions about the documents in the CSV file."
123
  )
124
 
125
- # Launch the Gradio app
126
- interface.launch()
 
 
1
  import pandas as pd
 
 
 
2
  import torch
3
+ from sentence_transformers import SentenceTransformer, util
4
+ import gradio as gr
5
  import json
6
  import spaces
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
+ # Load the CSV file with embeddings
9
+ df = pd.read_csv('RBDx10kstats.csv')
10
+ df['embedding'] = df['embedding'].apply(json.loads) # Convert JSON string back to list
 
 
 
11
 
12
+ # Convert embeddings to tensor for efficient retrieval
13
+ embeddings = torch.tensor(df['embedding'].tolist())
 
 
14
 
15
+ # Load the same Sentence Transformer model
16
+ model = SentenceTransformer('all-MiniLM-L6-v2')
17
 
18
+ # Define the function to find the most relevant document
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  @spaces.GPU(duration=120)
20
+ def retrieve_relevant_doc(query):
21
+ query_embedding = model.encode(query, convert_to_tensor=True)
22
+ similarities = util.pytorch_cos_sim(query_embedding, embeddings)[0]
23
+ best_match_idx = torch.argmax(similarities).item()
24
+ return df.iloc[best_match_idx]['Abstract']
 
 
 
25
 
26
+ # Define the function to generate a response (for simplicity, echo the retrieved doc)
27
+ @spaces.GPU(duration=120)
28
+ def generate_response(query):
29
+ relevant_doc = retrieve_relevant_doc(query)
30
+ # Here you could use a more sophisticated language model to generate a response
31
+ # For now, we will just return the relevant document as the response
32
+ return relevant_doc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  # Create a Gradio interface
35
+ iface = gr.Interface(
36
+ fn=generate_response,
37
+ inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your query here..."),
38
  outputs="text",
39
  title="RAG Chatbot",
40
+ description="This chatbot retrieves relevant documents based on your query."
41
  )
42
 
43
+ # Launch the Gradio interface
44
+ iface.launch()