Spaces:
Sleeping
Sleeping
File size: 6,084 Bytes
cef0ce8 78b95a9 cef0ce8 284934e cef0ce8 78b95a9 284934e 78b95a9 b7980cb 8b49954 610a0c1 8b49954 610a0c1 8b49954 867a3b8 55f1970 867a3b8 55f1970 610a0c1 cea09e7 610a0c1 076f339 610a0c1 4c26d71 284934e b8d8df9 284934e 4c26d71 b27d6f7 284934e b27d6f7 4c26d71 284934e 4c26d71 ea511cf 4c26d71 ea511cf 4c26d71 a9540ef 4c26d71 284934e e8889d2 284934e ea511cf 4c26d71 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import base64
import io
import cv2
import requests
import json
import gradio as gr
import os
from PIL import Image
import numpy as np
from PIL import ImageOps
# Accessing a specific environment variable
api_key = os.environ.get('devisionx')
# Checking if the environment variable exists
if not api_key:
print("devisionx environment variable is not set.")
exit()
# Define a function to call the API and get the results
def base64str_to_PILImage(base64str):
base64_img_bytes = base64str.encode('utf-8')
base64bytes = base64.b64decode(base64_img_bytes)
bytesObj = io.BytesIO(base64bytes)
return ImageOps.exif_transpose(Image.open(bytesObj))
def get_results(image, prompt):
threshold = 0.5
# Convert the NumPy array to PIL image
image = Image.fromarray(image)
# Convert the image to base64 string
with io.BytesIO() as output:
image.save(output, format="JPEG")
base64str = base64.b64encode(output.getvalue()).decode("utf-8")
# Prepare the payload (Adjust this part according to the API requirements)
payload = json.dumps({"base64str": base64str, "classes": prompt})
# Send the request to the API
response = requests.put(api_key, data=payload)
# Parse the JSON response
data = response.json()
print(response.status_code)
print(data)
# Access the values (Adjust this part according to the API response format)
output_image_base64 = data['firstName'] # Assuming the API returns the output image as base64
# Convert the output image from base64 to PIL and then to NumPy array
output_image = base64str_to_PILImage(output_image_base64)
output_image = np.array(output_image)
return output_image
# Define the input components for Gradio (adding a new input for the prompt)
# image_input = gr.inputs.Image()
# text_input = gr.inputs.Textbox(label="Prompt") # New input for the text prompt
# # Define the output components for Gradio (including both image and text)
# outputs = gr.Image(type="numpy", label="Output Image")
# Define the text description within an HTML <div> element
description_html = """
<!DOCTYPE html>
<html>
<head>
<title>Tuba AI Auto-Annotation </title>
</head>
<body>
<h1>Tuba AI Auto-Annotation Model 🚀</h1>
<h2>Saving Time, Bounding Boxes at a Time </h2>
<textarea readonly style="border:none;resize:none;background:none;">
<h2>Introduction</h2>
<p>Welcome to the world of DevisionX, where AI meets vision to revolutionize annotation. Our mission is to make computer vision accessible to all, and this README is your gateway to understanding how our auto-annotation model can change the way you work.</p>
<h2>Meet Tuba.AI - Your Partner in Vision</h2>
<h3>What is Tuba?</h3>
<p>Tuba is the secret sauce behind DevisionX, your no-code/low-code companion for all things computer vision. It's your toolkit for labeling, training data, and deploying AI-vision applications faster and easier than ever before.</p>
<ul>
<li>No-Code/Low-Code: Say goodbye to complex coding. Tuba's user-friendly interface makes it accessible to everyone.</li>
<li>Labeling Made Easy: Annotate your data effortlessly with Tuba's intuitive tools.</li>
<li>Faster Deployment: Deploy your AI models with ease, whether you're building a standalone app or integrating within an existing one.</li>
<li>State-of-the-Art Technology: Tuba is powered by the latest AI tech and follows production-ready standards.</li>
</ul>
<h2>The DevisionX Auto-Annotation</h2>
<p>Our auto-annotation model is a game-changer. It takes input text and images, weaving them together to generate precise bounding boxes. This AI marvel comes with a plethora of benefits:</p>
<ul>
<li>Time Saver: Say goodbye to hours of manual annotation. Let our model do the heavy lifting.</li>
<li>Annotation Formats: It speaks the language of YOLO and COCO, making it versatile for various projects.</li>
<li>Human Assistance: While it's incredibly efficient, it also respects human creativity and can be your reliable assistant.</li>
</ul>
<h2>Let's Build Together</h2>
<p>DevisionX and Tuba are here to redefine the way you approach computer vision. Join us in this exciting journey, where AI meets creativity, and innovation knows no bounds.</p>
<p>Get started today and be a part of the future of vision.</p>
</textarea>
</body>
</html>
"""
title = "autoannotation"
description = "This is a project description. It demonstrates how to use Gradio with an image and text input to interact with an API."
import os
examples = [
["3000.jpeg",'person,car,traffic sign,traffic light'],
["traffic.jpg", 'person,car,traffic sign,traffic light']
]
# Create a Blocks object and use it as a context manager
with gr.Blocks() as demo:
gr.Markdown(
"""
# Tuba Autoannotation Demo
This is your private demo for [Tuba Autoannotation](https://Tuba.ai)
A prompt based controllable model for auto annotation
"""
)
# Define the input components and add them to the layout
with gr.Row():
image_input = gr.inputs.Image()
output = gr.Image(type="numpy", label="Output Image")
# Define the output component and add it to the layout
with gr.Row():
text_input = gr.inputs.Textbox(label="Prompt")
with gr.Row():
button = gr.Button("Run")
# Define the event listener that connects the input and output components and triggers the function
button.click(fn=get_results, inputs=[image_input, text_input], outputs=output, api_name="get_results")
# Add the description below the layout
gr.Examples(
fn=get_results,
examples=examples,
inputs=[image_input, text_input],
outputs=[output]
)
gr.Markdown(description_html)
# Launch the app
demo.launch(share=False) |