Spaces:
Sleeping
Sleeping
import base64 | |
import io | |
import cv2 | |
import requests | |
import json | |
import gradio as gr | |
import os | |
from PIL import Image | |
import numpy as np | |
from PIL import ImageOps | |
# Accessing a specific environment variable | |
api_key = os.environ.get('devisionx') | |
# Checking if the environment variable exists | |
if not api_key: | |
print("devisionx environment variable is not set.") | |
exit() | |
# Define a function to call the API and get the results | |
def base64str_to_PILImage(base64str): | |
base64_img_bytes = base64str.encode('utf-8') | |
base64bytes = base64.b64decode(base64_img_bytes) | |
bytesObj = io.BytesIO(base64bytes) | |
return ImageOps.exif_transpose(Image.open(bytesObj)) | |
def get_results(image, prompt): | |
threshold = 0.5 | |
# Convert the NumPy array to PIL image | |
image = Image.fromarray(image) | |
# Convert the image to base64 string | |
with io.BytesIO() as output: | |
image.save(output, format="JPEG") | |
base64str = base64.b64encode(output.getvalue()).decode("utf-8") | |
# Prepare the payload (Adjust this part according to the API requirements) | |
payload = json.dumps({"base64str": base64str, "classes": prompt}) | |
# Send the request to the API | |
response = requests.put(api_key, data=payload) | |
# Parse the JSON response | |
data = response.json() | |
print(response.status_code) | |
print(data) | |
# Access the values (Adjust this part according to the API response format) | |
output_image_base64 = data['firstName'] # Assuming the API returns the output image as base64 | |
# Convert the output image from base64 to PIL and then to NumPy array | |
output_image = base64str_to_PILImage(output_image_base64) | |
output_image = np.array(output_image) | |
return output_image | |
# Define the input components for Gradio (adding a new input for the prompt) | |
image_input = gr.inputs.Image() | |
text_input = gr.inputs.Textbox(label="Prompt") # New input for the text prompt | |
# Define the output components for Gradio (including both image and text) | |
outputs = gr.Image(type="numpy", label="Output Image") | |
# Define the text description within an HTML <div> element | |
description_html = """ | |
<html> | |
<head> | |
<style> | |
.description { | |
margin: 20px; | |
padding: 10px; | |
border: 1px solid #ccc; | |
} | |
</style> | |
</head> | |
<body> | |
<div class="description"> | |
<p><strong>Description:</strong></p> | |
<p>We present a demo for performing object segmentation with training a Yolov8-seg on wheel Image dataset. The model was trained on 696 training images and validated on 199 images.</p> | |
<p><strong>Usage:</strong></p> | |
<p>You can upload wheel Image images, and the demo will provide you with your segmented image.</p> | |
<p><strong>Dataset:</strong></p> | |
<p>This dataset comprises a total of 994 images, which are divided into three distinct sets for various purposes:</p> | |
<ul> | |
<li><strong>Training Set:</strong> It includes 696 images and is intended for training the model.</li> | |
<li><strong>Validation Set:</strong> There are 199 images in the validation set, which is used for optimizing model parameters during development.</li> | |
<li><strong>Test Set:</strong> This set consists of 99 images and serves as a separate evaluation dataset to assess the performance of trained models.</li> | |
</ul> | |
<p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p> | |
<p>To access and download this dataset, please follow this link: <a href="https://universe.roboflow.com/project-wce7s/1000_seg_wheel" target="_blank">Dataset Download</a></p> | |
<p><strong>Download Dataset:</strong></p> | |
<p>To download the dataset we used, you can use the following command in colab:</p> | |
<pre>!wget https://universe.roboflow.com/ds/OPPOJjnJPs?key=5yzDMD610e</pre> | |
<p>Feel free to explore and use this repository for your object segmentation needs. If you have any questions or need assistance, please don't hesitate to reach out.</p> | |
</div> | |
</body> | |
</html> | |
""" | |
title = "autoannotation" | |
description = "This is a project description. It demonstrates how to use Gradio with an image and text input to interact with an API." | |
# Create a Blocks object and use it as a context manager | |
with gr.Blocks() as demo: | |
gr.Markdown( | |
""" | |
# Tuba Autoannotation Demo | |
This is your private demo for [Tuba Autoannotation](https://Tuba.ai), | |
a simple and controllable model for music generation | |
""" | |
) | |
# Define the input components and add them to the layout | |
with gr.Row(): | |
image_input = gr.inputs.Image() | |
output = gr.Image(type="numpy", label="Output Image") | |
# Define the output component and add it to the layout | |
with gr.Row(): | |
text_input = gr.inputs.Textbox(label="Prompt") | |
with gr.Row(): | |
button = gr.Button("Run") | |
# Define the event listener that connects the input and output components and triggers the function | |
button.click(fn=get_results, inputs=[image_input, text_input], outputs=output, api_name="get_results") | |
# Add the description below the layout | |
gr.Markdown(description_html) | |
# Launch the app | |
demo.launch(share=False) |