Spaces:

arjunanand13 commited on
Commit
7f2869e
1 Parent(s): e430c35

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +191 -0
app.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import concurrent.futures
2
+ import threading
3
+ import torch
4
+ from datetime import datetime
5
+ import json
6
+ import gradio as gr
7
+ import re
8
+ import faiss
9
+ import numpy as np
10
+ from sentence_transformers import SentenceTransformer
11
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
12
+
13
+ class DocumentRetrievalAndGeneration:
14
+ def __init__(self, embedding_model_name, lm_model_id, data_folder):
15
+ self.all_splits = self.load_documents(data_folder)
16
+ self.embeddings = SentenceTransformer(embedding_model_name)
17
+ self.gpu_index = self.create_faiss_index()
18
+ self.llm = self.initialize_llm(lm_model_id)
19
+ self.cancel_flag = threading.Event()
20
+
21
+ def load_documents(self, folder_path):
22
+ loader = DirectoryLoader(folder_path, loader_cls=TextLoader)
23
+ documents = loader.load()
24
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=5000, chunk_overlap=250)
25
+ all_splits = text_splitter.split_documents(documents)
26
+ print('Length of documents:', len(documents))
27
+ print("LEN of all_splits", len(all_splits))
28
+ for i in range(5):
29
+ print(all_splits[i].page_content)
30
+ return all_splits
31
+
32
+ def create_faiss_index(self):
33
+ all_texts = [split.page_content for split in self.all_splits]
34
+ embeddings = self.embeddings.encode(all_texts, convert_to_tensor=True).cpu().numpy()
35
+ index = faiss.IndexFlatL2(embeddings.shape[1])
36
+ index.add(embeddings)
37
+ gpu_resource = faiss.StandardGpuResources()
38
+ gpu_index = faiss.index_cpu_to_gpu(gpu_resource, 0, index)
39
+ return gpu_index
40
+
41
+ def initialize_llm(self, model_id):
42
+ bnb_config = BitsAndBytesConfig(
43
+ load_in_4bit=True,
44
+ bnb_4bit_use_double_quant=True,
45
+ bnb_4bit_quant_type="nf4",
46
+ bnb_4bit_compute_dtype=torch.bfloat16
47
+ )
48
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
49
+ model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
50
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
51
+ generate_text = pipeline(
52
+ model=model,
53
+ tokenizer=tokenizer,
54
+ return_full_text=True,
55
+ task='text-generation',
56
+ temperature=0.6,
57
+ max_new_tokens=256,
58
+ )
59
+ return generate_text
60
+
61
+
62
+
63
+ def generate_response_with_timeout(self, model_inputs):
64
+ def target(future):
65
+ if self.cancel_flag.is_set():
66
+ return
67
+ generated_ids = self.llm.model.generate(model_inputs, max_new_tokens=1000, do_sample=True)
68
+ if not self.cancel_flag.is_set():
69
+ future.set_result(generated_ids)
70
+ else:
71
+ future.set_exception(TimeoutError("Text generation process was canceled"))
72
+
73
+ future = concurrent.futures.Future()
74
+ thread = threading.Thread(target=target, args=(future,))
75
+ thread.start()
76
+
77
+ try:
78
+ generated_ids = future.result(timeout=60) # Timeout set to 60 seconds
79
+ return generated_ids
80
+ except concurrent.futures.TimeoutError:
81
+ self.cancel_flag.set()
82
+ raise TimeoutError("Text generation process timed out")
83
+
84
+ def qa_infer_gradio(self, query):
85
+ # Set the cancel flag to false for the new query
86
+ self.cancel_flag.clear()
87
+
88
+ try:
89
+ query_embedding = self.embeddings.encode(query, convert_to_tensor=True).cpu().numpy()
90
+ distances, indices = self.gpu_index.search(np.array([query_embedding]), k=5)
91
+
92
+ content = ""
93
+ for idx in indices[0]:
94
+ content += "-" * 50 + "\n"
95
+ content += self.all_splits[idx].page_content + "\n"
96
+
97
+ prompt = f"""<s>
98
+ Here's my question:
99
+ Query: {query}
100
+ Solution:
101
+ RETURN ONLY SOLUTION. IF THERE IS NO ANSWER RELATABLE IN RETRIEVED CHUNKS, RETURN "NO SOLUTION AVAILABLE"
102
+ </s>
103
+ """
104
+ messages = [{"role": "user", "content": prompt}]
105
+ encodeds = self.llm.tokenizer.apply_chat_template(messages, return_tensors="pt")
106
+ model_inputs = encodeds.to(self.llm.device)
107
+
108
+ start_time = datetime.now()
109
+ generated_ids = self.generate_response_with_timeout(model_inputs)
110
+ elapsed_time = datetime.now() - start_time
111
+
112
+ decoded = self.llm.tokenizer.batch_decode(generated_ids)
113
+ generated_response = decoded[0]
114
+
115
+ match = re.search(r'Solution:(.*?)</s>', generated_response, re.DOTALL | re.IGNORECASE)
116
+ if match:
117
+ solution_text = match.group(1).strip()
118
+ else:
119
+ solution_text = "NO SOLUTION AVAILABLE"
120
+
121
+ print("Generated response:", generated_response)
122
+ print("Time elapsed:", elapsed_time)
123
+ print("Device in use:", self.llm.device)
124
+
125
+ return solution_text, content
126
+
127
+ except TimeoutError:
128
+ return "timeout", content
129
+
130
+ if __name__ == "__main__":
131
+ # Example usage
132
+ embedding_model_name = 'flax-sentence-embeddings/all_datasets_v3_MiniLM-L12'
133
+ lm_model_id = "mistralai/Mistral-7B-Instruct-v0.2"
134
+ data_folder = 'sample_embedding_folder2'
135
+
136
+ doc_retrieval_gen = DocumentRetrievalAndGeneration(embedding_model_name, lm_model_id, data_folder)
137
+
138
+ def launch_interface():
139
+ css_code = """
140
+ .gradio-container {
141
+ background-color: #daccdb;
142
+ }
143
+ /* Button styling for all buttons */
144
+ button {
145
+ background-color: #927fc7; /* Default color for all other buttons */
146
+ color: black;
147
+ border: 1px solid black;
148
+ padding: 10px;
149
+ margin-right: 10px;
150
+ font-size: 16px; /* Increase font size */
151
+ font-weight: bold; /* Make text bold */
152
+ }
153
+ """
154
+ EXAMPLES = [
155
+ "On which devices can the VIP and CSI2 modules operate simultaneously?",
156
+ "I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?",
157
+ "Could you clarify the maximum number of cameras that can be connected simultaneously to the video input ports on the TDA2x SoC, considering it supports up to 10 multiplexed input ports and includes 3 dedicated video input modules?"
158
+ ]
159
+
160
+ file_path = "ticketNames.txt"
161
+
162
+ with open(file_path, "r") as file:
163
+ content = file.read()
164
+ ticket_names = json.loads(content)
165
+ dropdown = gr.Dropdown(label="Sample queries", choices=ticket_names)
166
+
167
+ tab1 = gr.Interface(
168
+ fn=doc_retrieval_gen.qa_infer_gradio,
169
+ inputs=[gr.Textbox(label="QUERY", placeholder="Enter your query here")],
170
+ allow_flagging='never',
171
+ examples=EXAMPLES,
172
+ cache_examples=False,
173
+ outputs=[gr.Textbox(label="SOLUTION"), gr.Textbox(label="RELATED QUERIES")],
174
+ css=css_code
175
+ )
176
+ tab2 = gr.Interface(
177
+ fn=doc_retrieval_gen.qa_infer_gradio,
178
+ inputs=[dropdown],
179
+ allow_flagging='never',
180
+ outputs=[gr.Textbox(label="SOLUTION"), gr.Textbox(label="RELATED QUERIES")],
181
+ css=css_code
182
+ )
183
+
184
+ gr.TabbedInterface(
185
+ [tab1, tab2],
186
+ ["Textbox Input", "FAQs"],
187
+ title="TI E2E FORUM",
188
+ css=css_code
189
+ ).launch(debug=True)
190
+
191
+ launch_interface()