File size: 5,729 Bytes
cef0ce8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78b95a9
cef0ce8
284934e
 
cef0ce8
78b95a9
284934e
 
78b95a9
b7980cb
 
610a0c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
076f339
610a0c1
4c26d71
 
284934e
 
 
 
 
 
 
 
 
 
4c26d71
 
b27d6f7
 
 
284934e
 
 
 
b27d6f7
 
4c26d71
284934e
4c26d71
 
ea511cf
 
4c26d71
 
ea511cf
 
 
 
4c26d71
a9540ef
4c26d71
284934e
 
 
 
 
 
ea511cf
4c26d71
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import base64
import io
import cv2
import requests
import json
import gradio as gr
import os
from PIL import Image
import numpy as np
from PIL import ImageOps

# Accessing a specific environment variable
api_key = os.environ.get('devisionx')

# Checking if the environment variable exists
if not api_key:
    print("devisionx environment variable is not set.")
    exit()

# Define a function to call the API and get the results

def base64str_to_PILImage(base64str):
    base64_img_bytes = base64str.encode('utf-8')
    base64bytes = base64.b64decode(base64_img_bytes)
    bytesObj = io.BytesIO(base64bytes)
    return ImageOps.exif_transpose(Image.open(bytesObj))

def get_results(image, prompt):
    threshold = 0.5
    
    # Convert the NumPy array to PIL image
    image = Image.fromarray(image)

    # Convert the image to base64 string
    with io.BytesIO() as output:
        image.save(output, format="JPEG")
        base64str = base64.b64encode(output.getvalue()).decode("utf-8")

    # Prepare the payload (Adjust this part according to the API requirements)
    payload = json.dumps({"base64str": base64str, "classes": prompt})

    # Send the request to the API
    response = requests.put(api_key, data=payload)

    # Parse the JSON response
    data = response.json()
    print(response.status_code)
    print(data)

    # Access the values (Adjust this part according to the API response format)
    output_image_base64 = data['firstName']  # Assuming the API returns the output image as base64
    

    # Convert the output image from base64 to PIL and then to NumPy array
    output_image = base64str_to_PILImage(output_image_base64)
    output_image = np.array(output_image)

    return output_image

    
# Define the input components for Gradio (adding a new input for the prompt)
# image_input = gr.inputs.Image()
# text_input = gr.inputs.Textbox(label="Prompt")  # New input for the text prompt


# # Define the output components for Gradio (including both image and text)
# outputs = gr.Image(type="numpy", label="Output Image")

# Define the text description within an HTML <div> element
description_html = """
<html>
<head>
    <style>
        .description {
            margin: 20px;
            padding: 10px;
            border: 1px solid #ccc;
        }
    </style>
</head>
<body>
    <div class="description">
        <p><strong>Description:</strong></p>
        <p>We present a demo for performing object segmentation with training a Yolov8-seg on wheel Image dataset. The model was trained on 696 training images and validated on 199 images.</p>
        <p><strong>Usage:</strong></p>
        <p>You can upload wheel Image images, and the demo will provide you with your segmented image.</p>
        <p><strong>Dataset:</strong></p>
        <p>This dataset comprises a total of 994 images, which are divided into three distinct sets for various purposes:</p>
        <ul>
            <li><strong>Training Set:</strong> It includes 696 images and is intended for training the model.</li>
            <li><strong>Validation Set:</strong> There are 199 images in the validation set, which is used for optimizing model parameters during development.</li>
            <li><strong>Test Set:</strong> This set consists of 99 images and serves as a separate evaluation dataset to assess the performance of trained models.</li>
        </ul>
        <p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
        <p>To access and download this dataset, please follow this link: <a href="https://universe.roboflow.com/project-wce7s/1000_seg_wheel" target="_blank">Dataset Download</a></p>
        <p><strong>Download Dataset:</strong></p>
        <p>To download the dataset we used, you can use the following command in colab:</p>
        <pre>!wget https://universe.roboflow.com/ds/OPPOJjnJPs?key=5yzDMD610e</pre>
        <p>Feel free to explore and use this repository for your object segmentation needs. If you have any questions or need assistance, please don't hesitate to reach out.</p>
    </div>
</body>
</html>
"""
title = "autoannotation"

description = "This is a project description. It demonstrates how to use Gradio with an image and text input to interact with an API."

import os
examples = [
    ["3000.jpeg",'person,car,traffic sign,traffic light'],
    ["original (8).jpg", 'person,car,traffic sign,traffic light'],
    ["traffic.jpg", 'person,car,traffic sign,traffic light'],
]




# Create a Blocks object and use it as a context manager
with gr.Blocks() as demo:
    gr.Markdown(
            """
            # Tuba Autoannotation Demo

            This is your private demo for [Tuba Autoannotation](https://Tuba.ai)

            A prompt based controllable model for auto annotation
            """
        )
    # Define the input components and add them to the layout
    
    with gr.Row():
        image_input = gr.inputs.Image()
        output = gr.Image(type="numpy", label="Output Image")
        
    # Define the output component and add it to the layout
    with gr.Row():
        text_input = gr.inputs.Textbox(label="Prompt")
    with gr.Row():
        button = gr.Button("Run")
        
    # Define the event listener that connects the input and output components and triggers the function
    button.click(fn=get_results, inputs=[image_input, text_input], outputs=output, api_name="get_results")
    # Add the description below the layout
    gr.Examples(
            fn=get_results,
            examples=examples,
            inputs=[image_input, text_input],
            outputs=[outputs]
        )
    gr.Markdown(description_html)
# Launch the app
demo.launch(share=False)