Reyad-Ahmmed commited on
Commit
722a491
·
verified ·
1 Parent(s): 2e6b2e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -35
app.py CHANGED
@@ -194,49 +194,43 @@ label_mapping = {
194
 
195
  #Function to classify user input
196
  def classifyTimeFrame(user_input):
197
- while True:
198
- #user_input = input("Enter a command (or type 'q' to quit): ")
199
- if user_input.lower() == 'q':
200
- print("Exiting...")
201
- break
202
-
203
- # Tokenize and predict
204
- input_encoding = tokenizer(user_input, padding=True, truncation=True, return_tensors="pt").to('cpu')
205
-
206
- with torch.no_grad():
207
- attention_mask = input_encoding['attention_mask'].clone()
208
 
209
 
210
 
211
- # Modify the attention mask to emphasize certain key tokens
212
- # for idx, token_id in enumerate(input_encoding['input_ids'][0]):
213
- # word = tokenizer.decode([token_id])
214
- # print(word)
215
- # if word.strip() in ["now", "same", "continue", "again", "also"]: # Target key tokens
216
- # attention_mask[0, idx] = 3 # Increase attention weight for these words
217
- # else:
218
- # attention_mask[0, idx] = 0
219
- # print (attention_mask)
220
- # input_encoding['attention_mask'] = attention_mask
221
- # print (input_encoding)
222
- output = model(**input_encoding, output_hidden_states=True)
223
 
224
- probabilities = F.softmax(output.logits, dim=-1)
225
 
226
- prediction = torch.argmax(output.logits, dim=1).cpu().numpy()
227
 
228
- # Map prediction back to label
229
- print(prediction)
230
- predicted_label = label_mapping[prediction[0]]
231
 
232
 
233
- print(f"Predicted intent: {predicted_label}\n")
234
- # Print the confidence for each label
235
- print("\nLabel Confidence Scores:")
236
- for i, label in label_mapping.items():
237
- confidence = probabilities[0][i].item() # Get confidence score for each label
238
- print(f"{label}: {confidence:.4f}")
239
- print("\n")
240
 
241
  iface = gr.Interface(fn=classifyTimeFrame, inputs="text", outputs="text")
242
  iface.launch(share=True)
 
194
 
195
  #Function to classify user input
196
  def classifyTimeFrame(user_input):
197
+ # Tokenize and predict
198
+ input_encoding = tokenizer(user_input, padding=True, truncation=True, return_tensors="pt").to('cpu')
199
+
200
+ with torch.no_grad():
201
+ attention_mask = input_encoding['attention_mask'].clone()
 
 
 
 
 
 
202
 
203
 
204
 
205
+ # Modify the attention mask to emphasize certain key tokens
206
+ # for idx, token_id in enumerate(input_encoding['input_ids'][0]):
207
+ # word = tokenizer.decode([token_id])
208
+ # print(word)
209
+ # if word.strip() in ["now", "same", "continue", "again", "also"]: # Target key tokens
210
+ # attention_mask[0, idx] = 3 # Increase attention weight for these words
211
+ # else:
212
+ # attention_mask[0, idx] = 0
213
+ # print (attention_mask)
214
+ # input_encoding['attention_mask'] = attention_mask
215
+ # print (input_encoding)
216
+ output = model(**input_encoding, output_hidden_states=True)
217
 
218
+ probabilities = F.softmax(output.logits, dim=-1)
219
 
220
+ prediction = torch.argmax(output.logits, dim=1).cpu().numpy()
221
 
222
+ # Map prediction back to label
223
+ print(prediction)
224
+ predicted_label = label_mapping[prediction[0]]
225
 
226
 
227
+ print(f"Predicted intent: {predicted_label}\n")
228
+ # Print the confidence for each label
229
+ print("\nLabel Confidence Scores:")
230
+ for i, label in label_mapping.items():
231
+ confidence = probabilities[0][i].item() # Get confidence score for each label
232
+ print(f"{label}: {confidence:.4f}")
233
+ print("\n")
234
 
235
  iface = gr.Interface(fn=classifyTimeFrame, inputs="text", outputs="text")
236
  iface.launch(share=True)