sigyllly commited on
Commit
35b65a4
·
verified ·
1 Parent(s): 55b3155

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -37
app.py CHANGED
@@ -1,8 +1,11 @@
 
1
  from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
2
- import gradio as gr
3
  from PIL import Image
4
  import torch
5
  import numpy as np
 
 
 
6
 
7
  # Load CLIPSeg processor and model
8
  processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
@@ -41,6 +44,10 @@ def get_masks(prompts, img, threshold):
41
 
42
  return masks
43
 
 
 
 
 
44
  # Function to extract image using positive and negative prompts
45
  def extract_image(pos_prompts, neg_prompts, img, threshold):
46
  positive_masks = get_masks(pos_prompts, img, 0.5)
@@ -56,39 +63,30 @@ def extract_image(pos_prompts, neg_prompts, img, threshold):
56
 
57
  return output_image, final_mask
58
 
59
- # Gradio UI
60
- iface_ui = gr.Interface(
61
- fn=extract_image,
62
- inputs=[
63
- gr.Textbox(
64
- label="Please describe what you want to identify (comma separated)",
65
- key="pos_prompts",
66
- ),
67
- gr.Textbox(
68
- label="Please describe what you want to ignore (comma separated)",
69
- key="neg_prompts",
70
- ),
71
- gr.Image(type="pil", label="Input Image", key="img"),
72
- gr.Slider(minimum=0, maximum=1, default=0.4, label="Threshold", key="threshold"),
73
- ],
74
- outputs=[
75
- gr.Image(label="Result", key="output_image"),
76
- gr.Image(label="Mask", key="output_mask"),
77
- ],
78
- )
79
-
80
- # Launch Gradio UI
81
- # Launch Gradio UI
82
- iface_ui.launch(share=True)
83
-
84
- # Non-UI Version
85
- def run_non_ui(image_path, pos_prompts, neg_prompts, threshold):
86
- img = Image.open(image_path)
87
- output_image, output_mask = extract_image(pos_prompts, neg_prompts, img, threshold)
88
-
89
- # Save or use the output_image and output_mask as needed
90
- output_image.show() # For demonstration purposes, opens the image with the default image viewer
91
- output_mask.show() # For demonstration purposes, opens the mask with the default image viewer
92
-
93
- # Example of using non-UI version
94
- # run_non_ui("path/to/your/image.jpg", "positive prompt", "negative prompt", 0.5)
 
1
+ from flask import Flask, request, jsonify
2
  from transformers import CLIPSegProcessor, CLIPSegForImageSegmentation
 
3
  from PIL import Image
4
  import torch
5
  import numpy as np
6
+ import io
7
+ import base64
8
+
9
 
10
  # Load CLIPSeg processor and model
11
  processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined")
 
44
 
45
  return masks
46
 
47
+ @app.route('/')
48
+ def home():
49
+ return 'Server started. Verify it is running by visiting http://0.0.0.0:7860/'
50
+
51
  # Function to extract image using positive and negative prompts
52
  def extract_image(pos_prompts, neg_prompts, img, threshold):
53
  positive_masks = get_masks(pos_prompts, img, 0.5)
 
63
 
64
  return output_image, final_mask
65
 
66
+ @app.route('/api', methods=['POST'])
67
+ def process_request():
68
+ data = request.json
69
+
70
+ # Convert base64 image to PIL Image
71
+ base64_image = data.get('image')
72
+ image_data = base64.b64decode(base64_image.split(',')[1])
73
+ img = Image.open(io.BytesIO(image_data))
74
+
75
+ # Get other parameters
76
+ pos_prompts = data.get('positive_prompts', '')
77
+ neg_prompts = data.get('negative_prompts', '')
78
+ threshold = float(data.get('threshold', 0.4))
79
+
80
+ # Perform image segmentation
81
+ output_image, final_mask = extract_image(pos_prompts, neg_prompts, img, threshold)
82
+
83
+ # Convert result to base64 for response
84
+ buffered = io.BytesIO()
85
+ output_image.save(buffered, format="PNG")
86
+ result_image_base64 = base64.b64encode(buffered.getvalue()).decode("utf-8")
87
+
88
+ return jsonify({'result_image_base64': result_image_base64})
89
+
90
+ if __name__ == '__main__':
91
+ print("Server starting. Verify it is running by visiting http://0.0.0.0:7860/")
92
+ app.run(host='0.0.0.0', port=7860, debug=True)