Dileep7729 commited on
Commit
790f088
·
verified ·
1 Parent(s): ca0f653

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -27
app.py CHANGED
@@ -1,55 +1,103 @@
1
  import gradio as gr
2
  from transformers import CLIPModel, CLIPProcessor
 
 
3
 
4
  # Step 1: Load Fine-Tuned Model from Hugging Face Model Hub
5
  model_name = "quadranttechnologies/retail-content-safety-clip-finetuned"
6
 
7
- print("Loading the fine-tuned model from Hugging Face Model Hub...")
8
- model = CLIPModel.from_pretrained(model_name, trust_remote_code=True)
9
- processor = CLIPProcessor.from_pretrained(model_name)
10
- print("Model loaded successfully.")
 
 
 
 
 
 
11
 
12
  # Step 2: Define the Inference Function
13
  def classify_image(image):
14
  """
15
- Classify an image as 'safe' or 'unsafe' with probabilities and display as a progress bar.
16
 
17
  Args:
18
- image (PIL.Image.Image): The input image.
19
 
20
  Returns:
21
- dict: A dictionary containing probabilities for 'safe' and 'unsafe'.
22
  """
23
- # Define the main categories
24
- main_categories = ["safe", "unsafe"]
25
-
26
- # Process the image with the main categories
27
- inputs = processor(text=main_categories, images=image, return_tensors="pt", padding=True)
28
- outputs = model(**inputs)
29
- logits_per_image = outputs.logits_per_image # Image-text similarity scores
30
- probs = logits_per_image.softmax(dim=1) # Convert logits to probabilities
31
-
32
- # Extract the probabilities
33
- safe_probability = probs[0][0].item() * 100 # Safe percentage
34
- unsafe_probability = probs[0][1].item() * 100 # Unsafe percentage
35
-
36
- # Return probabilities as a dictionary for display in Gradio's Label component
37
- return {
38
- "safe": f"{safe_probability:.2f}%",
39
- "unsafe": f"{unsafe_probability:.2f}%"
40
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  # Step 3: Set Up Gradio Interface
43
  iface = gr.Interface(
44
  fn=classify_image,
45
  inputs=gr.Image(type="pil"),
46
- outputs=gr.Label(label="Output"),
47
  title="Content Safety Classification",
48
  description="Upload an image to classify it as 'safe' or 'unsafe' with corresponding probabilities.",
49
  )
50
 
51
- # Step 4: Launch Gradio Interface
52
  if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  iface.launch()
54
 
55
 
@@ -79,5 +127,6 @@ if __name__ == "__main__":
79
 
80
 
81
 
 
82
 
83
 
 
1
  import gradio as gr
2
  from transformers import CLIPModel, CLIPProcessor
3
+ from PIL import Image
4
+ import requests
5
 
6
  # Step 1: Load Fine-Tuned Model from Hugging Face Model Hub
7
  model_name = "quadranttechnologies/retail-content-safety-clip-finetuned"
8
 
9
+ print("Initializing the application...")
10
+
11
+ try:
12
+ print("Loading the model from Hugging Face Model Hub...")
13
+ model = CLIPModel.from_pretrained(model_name, trust_remote_code=True)
14
+ processor = CLIPProcessor.from_pretrained(model_name)
15
+ print("Model and processor loaded successfully.")
16
+ except Exception as e:
17
+ print(f"Error loading the model or processor: {e}")
18
+ raise RuntimeError(f"Failed to load model: {e}")
19
 
20
  # Step 2: Define the Inference Function
21
  def classify_image(image):
22
  """
23
+ Classify an image as 'safe' or 'unsafe' and return probabilities.
24
 
25
  Args:
26
+ image (PIL.Image.Image): Uploaded image.
27
 
28
  Returns:
29
+ dict: Classification results or an error message.
30
  """
31
+ try:
32
+ print("Starting image classification...")
33
+
34
+ # Validate input
35
+ if image is None:
36
+ raise ValueError("No image provided. Please upload a valid image.")
37
+
38
+ # Validate image format
39
+ if not hasattr(image, "convert"):
40
+ raise ValueError("Invalid image format. Please upload a valid image (JPEG, PNG, etc.).")
41
+
42
+ # Define categories
43
+ categories = ["safe", "unsafe"]
44
+
45
+ # Process the image with the processor
46
+ print("Processing the image...")
47
+ inputs = processor(text=categories, images=image, return_tensors="pt", padding=True)
48
+ print(f"Processed inputs: {inputs}")
49
+
50
+ # Run inference with the model
51
+ print("Running model inference...")
52
+ outputs = model(**inputs)
53
+ print(f"Model outputs: {outputs}")
54
+
55
+ # Extract logits and probabilities
56
+ logits_per_image = outputs.logits_per_image # Image-text similarity scores
57
+ probs = logits_per_image.softmax(dim=1) # Convert logits to probabilities
58
+ print(f"Calculated probabilities: {probs}")
59
+
60
+ # Extract probabilities for each category
61
+ safe_prob = probs[0][0].item() * 100 # Safe percentage
62
+ unsafe_prob = probs[0][1].item() * 100 # Unsafe percentage
63
+
64
+ # Return results
65
+ return {
66
+ "safe": f"{safe_prob:.2f}%",
67
+ "unsafe": f"{unsafe_prob:.2f}%"
68
+ }
69
+
70
+ except Exception as e:
71
+ # Log and return detailed error messages
72
+ print(f"Error during classification: {e}")
73
+ return {"Error": str(e)}
74
 
75
  # Step 3: Set Up Gradio Interface
76
  iface = gr.Interface(
77
  fn=classify_image,
78
  inputs=gr.Image(type="pil"),
79
+ outputs=gr.Label(label="Output"), # Display probabilities as progress bars
80
  title="Content Safety Classification",
81
  description="Upload an image to classify it as 'safe' or 'unsafe' with corresponding probabilities.",
82
  )
83
 
84
+ # Step 4: Test Before Launch
85
  if __name__ == "__main__":
86
+ print("Testing model locally with a sample image...")
87
+ try:
88
+ # Test with a sample image
89
+ url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png"
90
+ test_image = Image.open(requests.get(url, stream=True).raw)
91
+
92
+ # Run the classification function
93
+ print("Running local test...")
94
+ result = classify_image(test_image)
95
+ print(f"Local Test Result: {result}")
96
+ except Exception as e:
97
+ print(f"Error during local test: {e}")
98
+
99
+ # Launch Gradio Interface
100
+ print("Launching the Gradio interface...")
101
  iface.launch()
102
 
103
 
 
127
 
128
 
129
 
130
+
131
 
132