Spaces:

arjunanand13 commited on
Commit
b4b2324
1 Parent(s): af8f2bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -7
app.py CHANGED
@@ -9,8 +9,8 @@ import faiss
9
  import numpy as np
10
  from sentence_transformers import SentenceTransformer
11
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
12
- from langchain.document_loaders import DirectoryLoader, TextLoader # Import these from langchain
13
- from langchain.text_splitter import RecursiveCharacterTextSplitter # Import the text splitter
14
 
15
  class DocumentRetrievalAndGeneration:
16
  def __init__(self, embedding_model_name, lm_model_id, data_folder):
@@ -48,7 +48,7 @@ class DocumentRetrievalAndGeneration:
48
  bnb_4bit_compute_dtype=torch.bfloat16
49
  )
50
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
51
- model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
52
  tokenizer = AutoTokenizer.from_pretrained(model_id)
53
  generate_text = pipeline(
54
  model=model,
@@ -94,7 +94,8 @@ class DocumentRetrievalAndGeneration:
94
  content += "-" * 50 + "\n"
95
  content += self.all_splits[idx].page_content + "\n"
96
 
97
- prompt = f"""<s>
 
98
  Here's my question:
99
  Query: {query}
100
  Solution:
@@ -103,7 +104,7 @@ class DocumentRetrievalAndGeneration:
103
  """
104
  messages = [{"role": "user", "content": prompt}]
105
  encodeds = self.llm.tokenizer.apply_chat_template(messages, return_tensors="pt")
106
- model_inputs = encodeds.to(self.llm.device)
107
 
108
  start_time = datetime.now()
109
  generated_ids = self.generate_response_with_timeout(model_inputs)
@@ -120,9 +121,63 @@ class DocumentRetrievalAndGeneration:
120
 
121
  print("Generated response:", generated_response)
122
  print("Time elapsed:", elapsed_time)
123
- print("Device in use:", self.llm.device)
124
 
125
  return solution_text, content
126
 
127
  except TimeoutError:
128
- return "timeout", content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  import numpy as np
10
  from sentence_transformers import SentenceTransformer
11
  from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, BitsAndBytesConfig
12
+ from langchain.document_loaders import DirectoryLoader, TextLoader
13
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
14
 
15
  class DocumentRetrievalAndGeneration:
16
  def __init__(self, embedding_model_name, lm_model_id, data_folder):
 
48
  bnb_4bit_compute_dtype=torch.bfloat16
49
  )
50
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
51
+ model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config).to(device)
52
  tokenizer = AutoTokenizer.from_pretrained(model_id)
53
  generate_text = pipeline(
54
  model=model,
 
94
  content += "-" * 50 + "\n"
95
  content += self.all_splits[idx].page_content + "\n"
96
 
97
+ prompt = f"""
98
+ <s>
99
  Here's my question:
100
  Query: {query}
101
  Solution:
 
104
  """
105
  messages = [{"role": "user", "content": prompt}]
106
  encodeds = self.llm.tokenizer.apply_chat_template(messages, return_tensors="pt")
107
+ model_inputs = encodeds.to(self.llm.model.device)
108
 
109
  start_time = datetime.now()
110
  generated_ids = self.generate_response_with_timeout(model_inputs)
 
121
 
122
  print("Generated response:", generated_response)
123
  print("Time elapsed:", elapsed_time)
124
+ print("Device in use:", self.llm.model.device)
125
 
126
  return solution_text, content
127
 
128
  except TimeoutError:
129
+ return "timeout", content
130
+
131
+ if __name__ == "__main__":
132
+ # Example usage
133
+ embedding_model_name = 'flax-sentence-embeddings/all_datasets_v3_MiniLM-L12'
134
+ lm_model_id = "mistralai/Mistral-7B-Instruct-v0.2"
135
+ data_folder = 'sample_embedding_folder2'
136
+
137
+ doc_retrieval_gen = DocumentRetrievalAndGeneration(embedding_model_name, lm_model_id, data_folder)
138
+
139
+ # Define Gradio interface function
140
+ def launch_interface():
141
+ css_code = """
142
+ .gradio-container {
143
+ background-color: #daccdb;
144
+ }
145
+ /* Button styling for all buttons */
146
+ button {
147
+ background-color: #927fc7; /* Default color for all other buttons */
148
+ color: black;
149
+ border: 1px solid black;
150
+ padding: 10px;
151
+ margin-right: 10px;
152
+ font-size: 16px; /* Increase font size */
153
+ font-weight: bold; /* Make text bold */
154
+ }
155
+ """
156
+ EXAMPLES = ["Can the VIP and CSI2 modules operate simultaneously?",
157
+ "I'm using Code Composer Studio 5.4.0.00091 and enabled FPv4SPD16 floating point support for CortexM4 in TDA2. However, after building the project, the .asm file shows --float_support=vfplib instead of FPv4SPD16. Why is this happening?",
158
+ "Could you clarify the maximum number of cameras that can be connected simultaneously to the video input ports on the TDA2x SoC, considering it supports up to 10 multiplexed input ports and includes 3 dedicated video input modules?"]
159
+
160
+ file_path = "ticketNames.txt"
161
+
162
+ # Read the file content
163
+ with open(file_path, "r") as file:
164
+ content = file.read()
165
+ ticket_names = json.loads(content)
166
+ dropdown = gr.Dropdown(label="Sample queries", choices=ticket_names)
167
+
168
+ # Define Gradio interface
169
+ interface = gr.Interface(
170
+ fn=doc_retrieval_gen.qa_infer_gradio,
171
+ inputs=[gr.Textbox(label="QUERY", placeholder="Enter your query here")],
172
+ allow_flagging='never',
173
+ examples=EXAMPLES,
174
+ cache_examples=False,
175
+ outputs=[gr.Textbox(label="SOLUTION"), gr.Textbox(label="RELATED QUERIES")],
176
+ css=css_code
177
+ )
178
+
179
+ # Launch Gradio interface
180
+ interface.launch(debug=True)
181
+
182
+ # Launch the interface
183
+ launch_interface()