FridayMaster commited on
Commit
947c082
·
verified ·
1 Parent(s): 67be4ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -7
app.py CHANGED
@@ -9,11 +9,19 @@ model = AutoModelForSequenceClassification.from_pretrained(model_name) # Use th
9
 
10
  # Define a function to generate responses
11
  def generate_response(prompt):
 
12
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=512)
13
  with torch.no_grad():
 
14
  outputs = model(**inputs)
15
- # Customize the response generation as per your model's output
16
- response = tokenizer.decode(outputs.logits.argmax(dim=-1), skip_special_tokens=True)
 
 
 
 
 
 
17
  return response
18
 
19
  # Create a Gradio interface
@@ -28,8 +36,3 @@ iface = gr.Interface(
28
  # Launch the Gradio app
29
  if __name__ == "__main__":
30
  iface.launch()
31
-
32
-
33
-
34
-
35
-
 
9
 
10
  # Define a function to generate responses
11
  def generate_response(prompt):
12
+ # Tokenize the input prompt
13
  inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=512)
14
  with torch.no_grad():
15
+ # Get the model output
16
  outputs = model(**inputs)
17
+
18
+ # Process the output logits
19
+ logits = outputs.logits
20
+ predicted_class_id = logits.argmax().item()
21
+
22
+ # Generate a response based on the predicted class
23
+ response = f"Predicted class ID: {predicted_class_id}"
24
+
25
  return response
26
 
27
  # Create a Gradio interface
 
36
  # Launch the Gradio app
37
  if __name__ == "__main__":
38
  iface.launch()