Dileep7729 commited on
Commit
cb84f56
·
verified ·
1 Parent(s): 9303fde

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -33
app.py CHANGED
@@ -1,87 +1,75 @@
1
  import gradio as gr
2
  from transformers import CLIPModel, CLIPProcessor
3
- from PIL import Image
4
 
5
  # Step 1: Load Fine-Tuned Model from Hugging Face Model Hub
6
  model_name = "quadranttechnologies/retail-content-safety-clip-finetuned"
7
 
8
- print("Initializing the application...")
9
-
10
  try:
11
- print("Loading the model from Hugging Face Model Hub...")
12
  model = CLIPModel.from_pretrained(model_name, trust_remote_code=True)
13
  processor = CLIPProcessor.from_pretrained(model_name)
14
  print("Model and processor loaded successfully.")
15
  except Exception as e:
16
- print(f"Error loading the model or processor: {e}")
17
  raise RuntimeError(f"Failed to load model: {e}")
18
 
19
  # Step 2: Define the Inference Function
20
  def classify_image(image):
21
  """
22
- Classify an image as 'safe' or 'unsafe' and return probabilities.
23
 
24
  Args:
25
  image (PIL.Image.Image): Uploaded image.
26
 
27
  Returns:
28
- dict: Classification results or an error message.
 
29
  """
30
  try:
31
- print("Starting image classification...")
32
-
33
- # Validate input
34
  if image is None:
35
  raise ValueError("No image provided. Please upload a valid image.")
36
 
37
- # Validate image format
38
- if not hasattr(image, "convert"):
39
- raise ValueError("Invalid image format. Please upload a valid image (JPEG, PNG, etc.).")
40
-
41
  # Define categories
42
  categories = ["safe", "unsafe"]
43
 
44
- # Process the image with the processor
45
- print("Processing the image...")
46
  inputs = processor(text=categories, images=image, return_tensors="pt", padding=True)
47
- print(f"Processed inputs: {inputs}")
48
-
49
- # Run inference with the model
50
- print("Running model inference...")
51
  outputs = model(**inputs)
52
- print(f"Model outputs: {outputs}")
53
 
54
- # Extract logits and probabilities
55
  logits_per_image = outputs.logits_per_image # Image-text similarity scores
56
  probs = logits_per_image.softmax(dim=1) # Convert logits to probabilities
57
- print(f"Calculated probabilities: {probs}")
58
 
59
- # Extract probabilities for each category
60
  safe_prob = probs[0][0].item() * 100 # Safe percentage
61
  unsafe_prob = probs[0][1].item() * 100 # Unsafe percentage
62
 
63
- # Return results
64
- return {
65
- "safe": f"{safe_prob:.2f}%",
66
- "unsafe": f"{unsafe_prob:.2f}%"
67
- }
68
 
69
  except Exception as e:
70
- print(f"Error during classification: {e}")
71
- return {"Error": str(e)}
72
 
73
  # Step 3: Set Up Gradio Interface
74
  iface = gr.Interface(
75
  fn=classify_image,
76
  inputs=gr.Image(type="pil"),
77
- outputs=gr.Textbox(label="Output (Debug Mode)"), # Use Textbox to display errors if any occur
 
 
 
78
  title="Content Safety Classification",
79
  description="Upload an image to classify it as 'safe' or 'unsafe' with corresponding probabilities.",
80
  )
81
 
82
  # Step 4: Launch Gradio Interface
83
  if __name__ == "__main__":
84
- print("Launching the Gradio interface...")
85
  iface.launch()
86
 
87
 
@@ -102,4 +90,5 @@ if __name__ == "__main__":
102
 
103
 
104
 
 
105
 
 
1
  import gradio as gr
2
  from transformers import CLIPModel, CLIPProcessor
 
3
 
4
  # Step 1: Load Fine-Tuned Model from Hugging Face Model Hub
5
  model_name = "quadranttechnologies/retail-content-safety-clip-finetuned"
6
 
7
+ print("Loading the fine-tuned model from Hugging Face Model Hub...")
 
8
  try:
 
9
  model = CLIPModel.from_pretrained(model_name, trust_remote_code=True)
10
  processor = CLIPProcessor.from_pretrained(model_name)
11
  print("Model and processor loaded successfully.")
12
  except Exception as e:
13
+ print(f"Error loading model or processor: {e}")
14
  raise RuntimeError(f"Failed to load model: {e}")
15
 
16
  # Step 2: Define the Inference Function
17
  def classify_image(image):
18
  """
19
+ Classify an image as 'safe' or 'unsafe' and display category and probabilities.
20
 
21
  Args:
22
  image (PIL.Image.Image): Uploaded image.
23
 
24
  Returns:
25
+ str: Predicted category ("safe" or "unsafe").
26
+ dict: Probabilities for "safe" and "unsafe".
27
  """
28
  try:
29
+ # Validate image input
 
 
30
  if image is None:
31
  raise ValueError("No image provided. Please upload a valid image.")
32
 
 
 
 
 
33
  # Define categories
34
  categories = ["safe", "unsafe"]
35
 
36
+ # Process the image
 
37
  inputs = processor(text=categories, images=image, return_tensors="pt", padding=True)
 
 
 
 
38
  outputs = model(**inputs)
 
39
 
40
+ # Get logits and probabilities
41
  logits_per_image = outputs.logits_per_image # Image-text similarity scores
42
  probs = logits_per_image.softmax(dim=1) # Convert logits to probabilities
 
43
 
44
+ # Extract probabilities
45
  safe_prob = probs[0][0].item() * 100 # Safe percentage
46
  unsafe_prob = probs[0][1].item() * 100 # Unsafe percentage
47
 
48
+ # Determine the predicted category
49
+ predicted_category = "safe" if safe_prob > unsafe_prob else "unsafe"
50
+
51
+ # Return the predicted category and probabilities
52
+ return predicted_category, {"safe": f"{safe_prob:.2f}%", "unsafe": f"{unsafe_prob:.2f}%"}
53
 
54
  except Exception as e:
55
+ print(f"Error during inference: {e}")
56
+ return f"Error: {str(e)}", {}
57
 
58
  # Step 3: Set Up Gradio Interface
59
  iface = gr.Interface(
60
  fn=classify_image,
61
  inputs=gr.Image(type="pil"),
62
+ outputs=[
63
+ gr.Textbox(label="Predicted Category"), # Display the predicted category prominently
64
+ gr.Label(label="Probabilities"), # Display probabilities with a progress bar
65
+ ],
66
  title="Content Safety Classification",
67
  description="Upload an image to classify it as 'safe' or 'unsafe' with corresponding probabilities.",
68
  )
69
 
70
  # Step 4: Launch Gradio Interface
71
  if __name__ == "__main__":
72
+ print("Launching Gradio interface...")
73
  iface.launch()
74
 
75
 
 
90
 
91
 
92
 
93
+
94