|
import base64 |
|
import io |
|
import cv2 |
|
import requests |
|
import json |
|
import gradio as gr |
|
import os |
|
from PIL import Image |
|
import numpy as np |
|
from PIL import ImageOps |
|
|
|
|
|
api_key = os.environ.get('devisionx') |
|
|
|
|
|
if not api_key: |
|
print("devisionx environment variable is not set.") |
|
exit() |
|
|
|
|
|
|
|
def base64str_to_PILImage(base64str): |
|
base64_img_bytes = base64str.encode('utf-8') |
|
base64bytes = base64.b64decode(base64_img_bytes) |
|
bytesObj = io.BytesIO(base64bytes) |
|
return ImageOps.exif_transpose(Image.open(bytesObj)) |
|
|
|
def get_results(image, prompt,segment): |
|
threshold = 0.5 |
|
|
|
|
|
image = Image.fromarray(image) |
|
|
|
|
|
with io.BytesIO() as output: |
|
image.save(output, format="JPEG") |
|
base64str = base64.b64encode(output.getvalue()).decode("utf-8") |
|
|
|
|
|
|
|
task_="0" |
|
if segment == "Segmentation": |
|
task_="1" |
|
payload =json.dumps({ |
|
"base64str": base64str, |
|
"classes": prompt, |
|
"segment": task_ }) |
|
|
|
|
|
response = requests.put(api_key, data=payload) |
|
|
|
|
|
data = response.json() |
|
print(response.status_code) |
|
print(data) |
|
|
|
|
|
output_image_base64 = data['firstName'] |
|
|
|
|
|
|
|
output_image = base64str_to_PILImage(output_image_base64) |
|
output_image = np.array(output_image) |
|
|
|
return output_image |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
description_html = """ |
|
<!DOCTYPE html> |
|
<html> |
|
<head> |
|
<title>Tuba AI Auto-Annotation </title> |
|
</head> |
|
<body> |
|
<h1>Tuba AI Auto-Annotation π</h1> |
|
<h2>Saving Time, Bounding Boxes & Polygons at a Time </h2> |
|
<h2>Introduction</h2> |
|
<p>Welcome to the world of DevisionX, where AI meets vision to revolutionize annotation. Our mission is to make computer vision accessible to all, and this README is your gateway to understanding how our auto-annotation model can change the way you work.</p> |
|
<h2>Meet Tuba.AI - Your Partner in Vision</h2> |
|
<h3>What is Tuba?</h3> |
|
<p>Tuba is the secret sauce behind DevisionX, your no-code/low-code companion for all things computer vision. It's your toolkit for labeling, training data, and deploying AI-vision applications faster and easier than ever before.</p> |
|
<ul> |
|
<li>No-Code/Low-Code: Say goodbye to complex coding. Tuba's user-friendly interface makes it accessible to everyone.</li> |
|
<li>Labeling Made Easy: Annotate your data effortlessly with Tuba's intuitive tools.</li> |
|
<li>Faster Deployment: Deploy your AI models with ease, whether you're building a standalone app or integrating within an existing one.</li> |
|
<li>State-of-the-Art Technology: Tuba is powered by the latest AI tech and follows production-ready standards.</li> |
|
</ul> |
|
<h2>The DevisionX Auto-Annotation</h2> |
|
<p>Our auto-annotation model is a game-changer. It takes input text and images, weaving them together to generate precise bounding boxes. This AI marvel comes with a plethora of benefits:</p> |
|
<ul> |
|
<li>Time Saver: Say goodbye to hours of manual annotation. Let our model do the heavy lifting.</li> |
|
<li>Annotation Formats: It speaks the language of YOLO and COCO, making it versatile for various projects.</li> |
|
<li>Human Assistance: While it's incredibly efficient, it also respects human creativity and can be your reliable assistant.</li> |
|
</ul> |
|
<h2>Let's Build Together</h2> |
|
<p>We are here to redefine the way you approach computer vision. Join us in this exciting journey, where AI meets creativity, and innovation knows no bounds.</p> |
|
<p>Get started today and be a part of the future of vision.</p> |
|
</body> |
|
</html> |
|
|
|
|
|
|
|
""" |
|
title = "autoannotation" |
|
|
|
description = "This is a project description. It demonstrates how to use Gradio with an image and text input to interact with an API." |
|
|
|
import os |
|
examples = [ |
|
["traffic.jpg", 'person,car,traffic sign,traffic light', "Segmentation"], |
|
["3000.jpeg", 'person,car,traffic sign,traffic light', "Detection"], |
|
] |
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown( |
|
""" |
|
<div style="text-align: center;"> |
|
<h1>Tuba Autoannotation Demo</h1> |
|
<h3>A prompt based controllable model for auto annotation (Detection and Segmentation) </h3> |
|
<h3>Saving Time, Bounding Boxes & Polygons at a Time </h3> |
|
Powered by <a href="https://Tuba.ai">Tuba</a> |
|
</div> |
|
""" |
|
) |
|
|
|
|
|
with gr.Row(): |
|
image_input = gr.Image() |
|
output = gr.Image(type="numpy", label="Output Image") |
|
|
|
|
|
with gr.Row(): |
|
text_input = gr.Textbox(label="Prompt") |
|
|
|
|
|
with gr.Row(): |
|
segment_checkbox = gr.Radio(["Segmentation", "Detection"], value="file",label="Select Detection or Segmentation",info="Select Segmentation to extract Polygons or Detection to extract only the bounding boxes of of the desired objects automatically") |
|
|
|
with gr.Row(): |
|
button = gr.Button("Run") |
|
|
|
|
|
button.click(fn=get_results, inputs=[image_input, text_input, segment_checkbox], outputs=output, api_name="get_results") |
|
|
|
gr.Examples( |
|
fn=get_results, |
|
examples=examples, |
|
inputs=[image_input, text_input,segment_checkbox], |
|
outputs=[output] |
|
) |
|
gr.Markdown(description_html) |
|
|
|
demo.launch(share=False) |