Dileep7729 commited on
Commit
ca0f653
·
verified ·
1 Parent(s): c2ae397

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -76
app.py CHANGED
@@ -1,104 +1,55 @@
1
  import gradio as gr
2
  from transformers import CLIPModel, CLIPProcessor
3
- from PIL import Image
4
 
5
  # Step 1: Load Fine-Tuned Model from Hugging Face Model Hub
6
  model_name = "quadranttechnologies/retail-content-safety-clip-finetuned"
7
 
8
- print("Initializing the application...")
9
-
10
- try:
11
- print("Loading the model from Hugging Face Model Hub...")
12
- model = CLIPModel.from_pretrained(model_name, trust_remote_code=True)
13
- processor = CLIPProcessor.from_pretrained(model_name)
14
- print("Model and processor loaded successfully.")
15
- except Exception as e:
16
- print(f"Error loading the model or processor: {e}")
17
- raise RuntimeError(f"Failed to load model: {e}")
18
 
19
  # Step 2: Define the Inference Function
20
  def classify_image(image):
21
  """
22
- Classify an image as 'safe' or 'unsafe' and return probabilities.
23
 
24
  Args:
25
- image (PIL.Image.Image): Uploaded image.
26
 
27
  Returns:
28
- dict: Classification results or an error message.
29
  """
30
- try:
31
- print("Starting image classification...")
32
-
33
- # Validate input
34
- if image is None:
35
- raise ValueError("No image provided. Please upload a valid image.")
36
-
37
- # Validate image format
38
- if not hasattr(image, "convert"):
39
- raise ValueError("Invalid image format. Please upload a valid image (JPEG, PNG, etc.).")
40
-
41
- # Define categories
42
- categories = ["safe", "unsafe"]
43
-
44
- # Process the image with the processor
45
- print("Processing the image...")
46
- inputs = processor(text=categories, images=image, return_tensors="pt", padding=True)
47
- print(f"Processed inputs: {inputs}")
48
-
49
- # Run inference with the model
50
- print("Running model inference...")
51
- outputs = model(**inputs)
52
- print(f"Model outputs: {outputs}")
53
-
54
- # Extract logits and probabilities
55
- logits_per_image = outputs.logits_per_image # Image-text similarity scores
56
- probs = logits_per_image.softmax(dim=1) # Convert logits to probabilities
57
- print(f"Calculated probabilities: {probs}")
58
-
59
- # Extract probabilities for each category
60
- safe_prob = probs[0][0].item() * 100 # Safe percentage
61
- unsafe_prob = probs[0][1].item() * 100 # Unsafe percentage
62
-
63
- # Return results
64
- return {
65
- "safe": f"{safe_prob:.2f}%",
66
- "unsafe": f"{unsafe_prob:.2f}%"
67
- }
68
-
69
- except Exception as e:
70
- # Log and return detailed error messages
71
- print(f"Error during classification: {e}")
72
- return {"Error": str(e)}
73
 
74
  # Step 3: Set Up Gradio Interface
75
  iface = gr.Interface(
76
  fn=classify_image,
77
  inputs=gr.Image(type="pil"),
78
- outputs=gr.Label(label="Output"), # Display probabilities as progress bars
79
  title="Content Safety Classification",
80
  description="Upload an image to classify it as 'safe' or 'unsafe' with corresponding probabilities.",
81
  )
82
 
 
83
  if __name__ == "__main__":
84
- print("Testing model locally with a sample image...")
85
- from PIL import Image
86
- import requests
87
-
88
- # Use a sample image
89
- url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png"
90
- try:
91
- test_image = Image.open(requests.get(url, stream=True).raw)
92
-
93
- # Test the classification function
94
- print("Running local test...")
95
- test_result = classify_image(test_image)
96
- print(f"Local Test Result: {test_result}")
97
- except Exception as e:
98
- print(f"Error during local test: {e}")
99
-
100
- # Launch Gradio Interface
101
- print("Launching the Gradio interface...")
102
  iface.launch()
103
 
104
 
@@ -127,5 +78,6 @@ if __name__ == "__main__":
127
 
128
 
129
 
 
130
 
131
 
 
1
  import gradio as gr
2
  from transformers import CLIPModel, CLIPProcessor
 
3
 
4
  # Step 1: Load Fine-Tuned Model from Hugging Face Model Hub
5
  model_name = "quadranttechnologies/retail-content-safety-clip-finetuned"
6
 
7
+ print("Loading the fine-tuned model from Hugging Face Model Hub...")
8
+ model = CLIPModel.from_pretrained(model_name, trust_remote_code=True)
9
+ processor = CLIPProcessor.from_pretrained(model_name)
10
+ print("Model loaded successfully.")
 
 
 
 
 
 
11
 
12
  # Step 2: Define the Inference Function
13
  def classify_image(image):
14
  """
15
+ Classify an image as 'safe' or 'unsafe' with probabilities and display as a progress bar.
16
 
17
  Args:
18
+ image (PIL.Image.Image): The input image.
19
 
20
  Returns:
21
+ dict: A dictionary containing probabilities for 'safe' and 'unsafe'.
22
  """
23
+ # Define the main categories
24
+ main_categories = ["safe", "unsafe"]
25
+
26
+ # Process the image with the main categories
27
+ inputs = processor(text=main_categories, images=image, return_tensors="pt", padding=True)
28
+ outputs = model(**inputs)
29
+ logits_per_image = outputs.logits_per_image # Image-text similarity scores
30
+ probs = logits_per_image.softmax(dim=1) # Convert logits to probabilities
31
+
32
+ # Extract the probabilities
33
+ safe_probability = probs[0][0].item() * 100 # Safe percentage
34
+ unsafe_probability = probs[0][1].item() * 100 # Unsafe percentage
35
+
36
+ # Return probabilities as a dictionary for display in Gradio's Label component
37
+ return {
38
+ "safe": f"{safe_probability:.2f}%",
39
+ "unsafe": f"{unsafe_probability:.2f}%"
40
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
 
42
  # Step 3: Set Up Gradio Interface
43
  iface = gr.Interface(
44
  fn=classify_image,
45
  inputs=gr.Image(type="pil"),
46
+ outputs=gr.Label(label="Output"),
47
  title="Content Safety Classification",
48
  description="Upload an image to classify it as 'safe' or 'unsafe' with corresponding probabilities.",
49
  )
50
 
51
+ # Step 4: Launch Gradio Interface
52
  if __name__ == "__main__":
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  iface.launch()
54
 
55
 
 
78
 
79
 
80
 
81
+
82
 
83