Dileep7729 commited on
Commit
52ea34b
·
verified ·
1 Parent(s): d237a07

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -18
app.py CHANGED
@@ -1,13 +1,19 @@
1
  import gradio as gr
2
  from transformers import CLIPModel, CLIPProcessor
 
 
3
 
4
  # Step 1: Load Fine-Tuned Model from Hugging Face Model Hub
5
  model_name = "quadranttechnologies/retail-content-safety-clip-finetuned"
6
 
7
  print("Loading the fine-tuned model from Hugging Face Model Hub...")
8
- model = CLIPModel.from_pretrained(model_name, trust_remote_code=True)
9
- processor = CLIPProcessor.from_pretrained(model_name)
10
- print("Model loaded successfully.")
 
 
 
 
11
 
12
  # Step 2: Define the Inference Function
13
  def classify_image(image):
@@ -18,40 +24,43 @@ def classify_image(image):
18
  image (PIL.Image.Image): The input image.
19
 
20
  Returns:
21
- dict: A dictionary containing probabilities for 'safe' and 'unsafe'.
22
  """
23
  try:
24
- # Debug: Check if the image is loaded
25
  if image is None:
26
  raise ValueError("No image provided. Please upload an image.")
 
 
27
 
28
- # Define the main categories
29
  main_categories = ["safe", "unsafe"]
30
 
31
- # Process the image with the model processor
32
  print("Processing the image...")
33
  inputs = processor(text=main_categories, images=image, return_tensors="pt", padding=True)
34
- print(f"Inputs processed: {inputs}")
35
 
36
- # Perform inference using the model
37
  outputs = model(**inputs)
38
- print(f"Model outputs: {outputs}")
39
 
40
- # Extract probabilities for each category
41
  logits_per_image = outputs.logits_per_image # Image-text similarity scores
42
  probs = logits_per_image.softmax(dim=1) # Convert logits to probabilities
43
 
44
- # Safe and unsafe probabilities
45
  safe_probability = probs[0][0].item() * 100 # Convert to percentage
46
  unsafe_probability = probs[0][1].item() * 100 # Convert to percentage
47
 
48
  print(f"Safe: {safe_probability:.2f}%, Unsafe: {unsafe_probability:.2f}%")
49
 
50
- # Return the results as a dictionary for display in Gradio
51
  return {
52
  "safe": f"{safe_probability:.2f}%",
53
  "unsafe": f"{unsafe_probability:.2f}%"
54
  }
 
55
  except Exception as e:
56
  print(f"Error during inference: {str(e)}")
57
  return {"Error": str(e)}
@@ -60,12 +69,9 @@ def classify_image(image):
60
  iface = gr.Interface(
61
  fn=classify_image,
62
  inputs=gr.Image(type="pil"),
63
- outputs=gr.Label(label="Output"), # Use Gradio's Label component for progress bar display
64
  title="Content Safety Classification",
65
- description=(
66
- "Upload an image to classify it as 'safe' or 'unsafe' with corresponding probabilities. "
67
- "The model will analyze the image and provide probabilities for each category."
68
- ),
69
  )
70
 
71
  # Step 4: Launch Gradio Interface
@@ -89,3 +95,4 @@ if __name__ == "__main__":
89
 
90
 
91
 
 
 
1
  import gradio as gr
2
  from transformers import CLIPModel, CLIPProcessor
3
+ from PIL import Image
4
+ import torch
5
 
6
  # Step 1: Load Fine-Tuned Model from Hugging Face Model Hub
7
  model_name = "quadranttechnologies/retail-content-safety-clip-finetuned"
8
 
9
  print("Loading the fine-tuned model from Hugging Face Model Hub...")
10
+ try:
11
+ model = CLIPModel.from_pretrained(model_name, trust_remote_code=True)
12
+ processor = CLIPProcessor.from_pretrained(model_name)
13
+ print("Model loaded successfully.")
14
+ except Exception as e:
15
+ print(f"Error loading model or processor: {str(e)}")
16
+ raise
17
 
18
  # Step 2: Define the Inference Function
19
  def classify_image(image):
 
24
  image (PIL.Image.Image): The input image.
25
 
26
  Returns:
27
+ dict: A dictionary containing probabilities for 'safe' and 'unsafe' or an error message.
28
  """
29
  try:
30
+ # Check if the image is valid
31
  if image is None:
32
  raise ValueError("No image provided. Please upload an image.")
33
+ if not hasattr(image, "convert"):
34
+ raise ValueError("Uploaded file is not a valid image. Please upload a valid image (JPEG, PNG).")
35
 
36
+ # Define main categories
37
  main_categories = ["safe", "unsafe"]
38
 
39
+ # Process the image
40
  print("Processing the image...")
41
  inputs = processor(text=main_categories, images=image, return_tensors="pt", padding=True)
42
+ print("Inputs processed successfully.")
43
 
44
+ # Perform inference
45
  outputs = model(**inputs)
46
+ print("Model inference completed.")
47
 
48
+ # Calculate probabilities
49
  logits_per_image = outputs.logits_per_image # Image-text similarity scores
50
  probs = logits_per_image.softmax(dim=1) # Convert logits to probabilities
51
 
52
+ # Extract probabilities for "safe" and "unsafe"
53
  safe_probability = probs[0][0].item() * 100 # Convert to percentage
54
  unsafe_probability = probs[0][1].item() * 100 # Convert to percentage
55
 
56
  print(f"Safe: {safe_probability:.2f}%, Unsafe: {unsafe_probability:.2f}%")
57
 
58
+ # Return results
59
  return {
60
  "safe": f"{safe_probability:.2f}%",
61
  "unsafe": f"{unsafe_probability:.2f}%"
62
  }
63
+
64
  except Exception as e:
65
  print(f"Error during inference: {str(e)}")
66
  return {"Error": str(e)}
 
69
  iface = gr.Interface(
70
  fn=classify_image,
71
  inputs=gr.Image(type="pil"),
72
+ outputs=gr.Label(label="Output"), # Use Gradio's Label component for user-friendly display
73
  title="Content Safety Classification",
74
+ description="Upload an image to classify it as 'safe' or 'unsafe' with corresponding probabilities.",
 
 
 
75
  )
76
 
77
  # Step 4: Launch Gradio Interface
 
95
 
96
 
97
 
98
+