classifier / app.py
tommy24's picture
Update app.py
df74693
raw
history blame
32.1 kB
# import gradio as gr
# import numpy as np
# import cv2 as cv
# import requests
# import time
# import os
# host = os.environ.get("host")
# code = os.environ.get("code")
# model_llm = os.environ.get("model")
# content = os.environ.get("content")
# state = os.environ.get("state")
# system = os.environ.get("system")
# auth = os.environ.get("auth")
# data = None
# model = None
# image = None
# prediction = None
# labels = None
# print('START')
# np.set_printoptions(suppress=True)
# data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# with open("labels.txt", "r") as file:
# labels = file.read().splitlines()
# messages = [
# {"role": "system", "content": system}
# ]
# def classify(UserInput, Image, Textbox2, Textbox3):
# if Textbox3 == code:
# print("Image: ", Image)
# if Image is not None:
# output = []
# image_data = np.array(Image)
# image_data = cv.resize(image_data, (224, 224))
# image_array = np.asarray(image_data)
# normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# data[0] = normalized_image_array
# import tensorflow as tf
# model = tf.keras.models.load_model('keras_model.h5')
# prediction = model.predict(data)
# max_label_index = None
# max_prediction_value = -1
# print('Prediction')
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
# Textbox2 = Textbox2.split(",")
# Textbox2_edited = [x.strip() for x in Textbox2]
# Textbox2_edited = list(Textbox2_edited)
# Textbox2_edited.append(UserInput)
# messages.append({"role": "user", "content": UserInput})
# for i, label in enumerate(labels):
# prediction_value = float(prediction[0][i])
# rounded_value = round(prediction_value, 2)
# print(f'{label}: {rounded_value}')
# if prediction_value > max_prediction_value:
# max_label_index = i
# max_prediction_value = prediction_value
# if max_label_index is not None:
# max_label = labels[max_label_index].split(' ', 1)[1]
# max_rounded_prediction = round(max_prediction_value, 2)
# print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
# time.sleep(1)
# if max_rounded_prediction > 0.5:
# print("\nWays to dispose of this waste: " + max_label)
# messages.append({"role": "user", "content": content + " " + max_label})
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {auth}"
# }
# response = requests.post(host, headers=headers, json={
# "messages": messages,
# "model": model_llm
# }).json()
# reply = response["choices"][0]["message"]["content"]
# messages.append({"role": "assistant", "content": reply})
# output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
# elif max_rounded_prediction < 0.5:
# output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one."})
# return output
# else:
# output = []
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
# Textbox2 = Textbox2.split(",")
# Textbox2_edited = [x.strip() for x in Textbox2]
# Textbox2_edited = list(Textbox2_edited)
# Textbox2_edited.append(UserInput)
# for i in Textbox2_edited:
# messages.append(
# {"role": "user", "content": i}
# )
# print("messages after appending:", messages)
# time.sleep(1)
# messages.append({"role": "user", "content": UserInput})
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {auth}"
# }
# response = requests.post(host, headers=headers, json={
# "messages": messages,
# "model": model_llm
# }).json()
# reply = response["choices"][0]["message"]["content"]
# messages.append({"role": "assistant", "content": reply})
# output.append({"Mode": "Chat", "content": reply})
# return output
# else:
# return "Unauthorized"
# user_inputs = [
# gr.Textbox(label="User Input", type="text"),
# gr.Image(),
# gr.Textbox(label="Textbox2", type="text"),
# gr.Textbox(label="Textbox3", type="password")
# ]
# iface = gr.Interface(
# fn=classify,
# inputs=user_inputs,
# outputs=gr.outputs.JSON(),
# title="Classifier",
# )
# iface.launch()
# import gradio as gr
# import numpy as np
# import cv2 as cv
# import requests
# import time
# import os
# host = os.environ.get("host")
# code = os.environ.get("code")
# model_llm = os.environ.get("model")
# content = os.environ.get("content")
# state = os.environ.get("state")
# system = os.environ.get("system")
# auth = os.environ.get("auth")
# auth2 = os.environ.get("auth2")
# data = None
# model = None
# image = None
# prediction = None
# labels = None
# print('START')
# np.set_printoptions(suppress=True)
# data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# with open("labels.txt", "r") as file:
# labels = file.read().splitlines()
# messages = [
# {"role": "system", "content": system}
# ]
# def classify(platform,UserInput, Image, Textbox2, Textbox3):
# if Textbox3 == code:
# if Image is not None:
# output = []
# headers = {
# "Authorization": f"Bearer {auth2}"
# }
# if platform == "wh":
# get_image = requests.get(Image, headers=headers)
# print(get_image.content)
# elif platform == "web":
# print("WEB")
# else:
# pass
# image_data = np.array(get_image)
# image_data = cv.resize(image_data, (224, 224))
# image_array = np.asarray(image_data)
# normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# data[0] = normalized_image_array
# import tensorflow as tf
# model = tf.keras.models.load_model('keras_model.h5')
# prediction = model.predict(data)
# max_label_index = None
# max_prediction_value = -1
# print('Prediction')
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
# Textbox2 = Textbox2.split(",")
# Textbox2_edited = [x.strip() for x in Textbox2]
# Textbox2_edited = list(Textbox2_edited)
# Textbox2_edited.append(UserInput)
# messages.append({"role": "user", "content": UserInput})
# for i, label in enumerate(labels):
# prediction_value = float(prediction[0][i])
# rounded_value = round(prediction_value, 2)
# print(f'{label}: {rounded_value}')
# if prediction_value > max_prediction_value:
# max_label_index = i
# max_prediction_value = prediction_value
# if max_label_index is not None:
# max_label = labels[max_label_index].split(' ', 1)[1]
# max_rounded_prediction = round(max_prediction_value, 2)
# print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
# time.sleep(1)
# if max_rounded_prediction > 0.5:
# print("\nWays to dispose of this waste: " + max_label)
# messages.append({"role": "user", "content": content + " " + max_label})
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {auth}"
# }
# response = requests.post(host, headers=headers, json={
# "messages": messages,
# "model": model_llm
# }).json()
# reply = response["choices"][0]["message"]["content"]
# messages.append({"role": "assistant", "content": reply})
# output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
# elif max_rounded_prediction < 0.5:
# output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one."})
# return output
# else:
# output = []
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
# Textbox2 = Textbox2.split(",")
# Textbox2_edited = [x.strip() for x in Textbox2]
# Textbox2_edited = list(Textbox2_edited)
# Textbox2_edited.append(UserInput)
# for i in Textbox2_edited:
# messages.append(
# {"role": "user", "content": i}
# )
# print("messages after appending:", messages)
# time.sleep(1)
# messages.append({"role": "user", "content": UserInput})
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {auth}"
# }
# response = requests.post(host, headers=headers, json={
# "messages": messages,
# "model": model_llm
# }).json()
# reply = response["choices"][0]["message"]["content"]
# messages.append({"role": "assistant", "content": reply})
# output.append({"Mode": "Chat", "content": reply})
# return output
# else:
# return "Unauthorized"
# user_inputs = [
# gr.Textbox(label="Platform", type="text"),
# gr.Textbox(label="User Input", type="text"),
# gr.Textbox(label="Image", type="text"),
# gr.Textbox(label="Textbox2", type="text"),
# gr.Textbox(label="Textbox3", type="password")
# ]
# iface = gr.Interface(
# fn=classify,
# inputs=user_inputs,
# outputs=gr.outputs.JSON(),
# title="Classifier",
# )
# iface.launch()
############################### MOST WORKING
# import gradio as gr
# import numpy as np
# import cv2 as cv
# import requests
# import io
# from PIL import Image
# import os
# import tensorflow as tf
# import random
# host = os.environ.get("host")
# code = os.environ.get("code")
# model_llm = os.environ.get("model")
# content = os.environ.get("content")
# state = os.environ.get("state")
# system = os.environ.get("system")
# auth = os.environ.get("auth")
# auth2 = os.environ.get("auth2")
# data = None
# np.set_printoptions(suppress=True)
# model = tf.keras.models.load_model('keras_model.h5')
# data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
# with open("labels.txt", "r") as file:
# labels = file.read().splitlines()
# messages = [
# {"role": "system", "content": system}
# ]
# def classify(platform, UserInput, Images, Textbox2, Textbox3):
# if Textbox3 == code:
# imageData = None
# if Images != "None":
# output = []
# headers = {
# "Authorization": f"Bearer {auth2}"
# }
# if platform == "wh":
# get_image = requests.get(Images, headers=headers)
# if get_image.status_code == 200:
# image_data = get_image.content
# elif platform == "web":
# print("WEB")
# else:
# pass
# image = cv.imdecode(np.frombuffer(image_data, np.uint8), cv.IMREAD_COLOR)
# image = cv.resize(image, (224, 224))
# image_array = np.asarray(image)
# normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# data[0] = normalized_image_array
# prediction = model.predict(data)
# max_label_index = None
# max_prediction_value = -1
# print('Prediction')
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
# Textbox2 = Textbox2.split(",")
# Textbox2_edited = [x.strip() for x in Textbox2]
# Textbox2_edited = list(Textbox2_edited)
# Textbox2_edited.append(UserInput)
# print(UserInput)
# print("appending")
# messages.append({"role": "user", "content": UserInput})
# for i, label in enumerate(labels):
# prediction_value = float(prediction[0][i])
# rounded_value = round(prediction_value, 2)
# print(f'{label}: {rounded_value}')
# if prediction_value > max_prediction_value:
# max_label_index = i
# max_prediction_value = prediction_value
# if max_label_index is not None:
# max_label = labels[max_label_index].split(' ', 1)[1]
# max_rounded_prediction = round(max_prediction_value, 2)
# print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
# if max_rounded_prediction > 0.5:
# print("\nWays to dispose of this waste: " + max_label)
# messages.append({"role": "user", "content": content + " " + max_label})
# # messages.append({"role": "user", "content": max_label})
# print("IMAGE messages after appending:", messages)
# header = {
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
# "Content-Type": "application/json",
# "Authorization": f"Bearer {auth}"
# }
# try:
# response = requests.post(host, headers=header, json={
# "messages": messages,
# "model": model_llm
# }).json()
# print("RESPONSE TRY",response)
# reply = response["choices"][0]["message"]["content"]
# # messages.append({"role": "assistant", "content": reply})
# output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
# except:
# print("DOESN'T WORK")
# elif max_rounded_prediction < 0.5:
# output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
# return output
# elif Images == "None":
# output = []
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
# Textbox2 = Textbox2.split(",")
# Textbox2_edited = [x.strip() for x in Textbox2]
# Textbox2_edited = list(Textbox2_edited)
# Textbox2_edited.append(UserInput)
# for i in Textbox2_edited:
# messages.append({"role": "user", "content": i})
# print("messages after appending:", messages)
# messages.append({"role": "user", "content": UserInput})
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {auth}"
# }
# response = requests.post(host, headers=headers, json={
# "messages": messages,
# "model": model_llm
# }).json()
# reply = response["choices"][0]["message"]["content"]
# # messages.append({"role": "assistant", "content": reply})
# output.append({"Mode": "Chat", "content": reply})
# return output
# else:
# return "Unauthorized"
# user_inputs = [
# gr.Textbox(label="Platform", type="text"),
# gr.Textbox(label="User Input", type="text"),
# gr.Textbox(label="Image", type="text"),
# gr.Textbox(label="Textbox2", type="text"),
# gr.Textbox(label="Textbox3", type="password")
# ]
# iface = gr.Interface(
# fn=classify,
# inputs=user_inputs,
# outputs=gr.outputs.JSON(),
# title="Classifier",
# )
# iface.launch()
############## TEST
import gradio as gr
import numpy as np
import cv2 as cv
import requests
import io
import time
from PIL import Image
import os
import tensorflow as tf
import random
import openai
host = os.environ.get("host")
code = os.environ.get("code")
model_llm = os.environ.get("model")
content = os.environ.get("content")
state = os.environ.get("state")
system = os.environ.get("system")
auth = os.environ.get("auth")
auth2 = os.environ.get("auth2")
openai.api_key = os.environ.get("auth")
openai.api_base = os.environ.get("host")
data = None
np.set_printoptions(suppress=True)
model = tf.keras.models.load_model('keras_model.h5')
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
with open("labels.txt", "r") as file:
labels = file.read().splitlines()
messages = [
{"role": "system", "content": system}
]
def classify(platform, UserInput, Images, Textbox2, Textbox3):
if UserInput.lower() == "clear history":
messages.clear()
messages.append(
{"role": "system", "content": system}
)
if Textbox3 == code:
imageData = None
if Images != "None":
output = []
headers = {
"Authorization": f"Bearer {auth2}"
}
if platform == "wh":
get_image = requests.get(Images, headers=headers)
if get_image.status_code == 200:
image_data = get_image.content
elif platform == "web":
# print("WEB")
url = requests.get(Images)
image_data = url.content
else:
pass
image = cv.imdecode(np.frombuffer(image_data, np.uint8), cv.IMREAD_COLOR)
image = cv.resize(image, (224, 224))
image_array = np.asarray(image)
normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
data[0] = normalized_image_array
prediction = model.predict(data)
max_label_index = None
max_prediction_value = -1
print('Prediction')
Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
Textbox2 = Textbox2.split(",")
Textbox2_edited = [x.strip() for x in Textbox2]
Textbox2_edited = list(Textbox2_edited)
Textbox2_edited.append(UserInput)
print(UserInput)
print("appending")
# messages.append({"role": "user", "content": UserInput})
# Pop earlier messages if there are more than 10
# if UserInput.lower() == "clear history":
# while len(messages) > 10:
# messages.pop(0)
for i, label in enumerate(labels):
prediction_value = float(prediction[0][i])
rounded_value = round(prediction_value, 2)
print(f'{label}: {rounded_value}')
if prediction_value > max_prediction_value:
max_label_index = i
max_prediction_value = prediction_value
if max_label_index is not None:
max_label = labels[max_label_index].split(' ', 1)[1]
max_rounded_prediction = round(max_prediction_value, 2)
print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
if max_rounded_prediction > 0.5:
print("\nWays to dispose of this waste: " + max_label)
messages.append({"role": "user", "content": content + " " + max_label})
print("IMAGE messages after appending:", messages)
print("Message list of image:", messages)
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
"Content-Type": "application/json",
"Authorization": f"Bearer {auth}"
}
try:
# response = requests.post(host, headers=header, json={
# "messages": messages,
# "model": model_llm
# }).json()
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# reply = response["choices"][0]["message"]["content"]
reply = completion.choices[0].message['content']
# # reply = response["choices"][0]["message"]["content"]
# reply = response.choices[0].message['content']
print("RESPONSE TRY", completion)
output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
except:
print("DOESN'T WORK")
elif max_rounded_prediction < 0.5:
output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
return output
elif Images == "None":
output = []
Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
Textbox2 = Textbox2.split(",")
Textbox2_edited = [x.strip() for x in Textbox2]
Textbox2_edited = list(Textbox2_edited)
Textbox2_edited.append(UserInput)
for i in Textbox2_edited:
messages.append({"role": "user", "content": i})
print("messages after appending:", messages)
messages.append({"role": "user", "content": UserInput})
# Pop earlier messages if there are more than 10
# if UserInput.lower() == "clear history":
# while len(messages) > 10:
# messages.pop(0)
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {auth}"
}
try:
# response = requests.post(host, headers=headers, json={
# "messages": messages,
# "model": model_llm
# }).json()
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages
)
# reply = response["choices"][0]["message"]["content"]
reply = completion.choices[0].message['content']
except:
reply = "Maximum messages: 15. Please clear your history and Try Again!"
output.append({"Mode": "Chat", "content": reply})
return output
else:
return "Unauthorized"
user_inputs = [
gr.Textbox(label="Platform", type="text"),
gr.Textbox(label="User Input", type="text"),
gr.Textbox(label="Image", type="text"),
gr.Textbox(label="Textbox2", type="text"),
gr.Textbox(label="Textbox3", type="password")
]
iface = gr.Interface(
fn=classify,
inputs=user_inputs,
outputs=gr.outputs.JSON(),
title="Classifier",
)
iface.launch()
# import gradio as gr
# import numpy as np
# import cv2 as cv
# import requests
# import random
# import os
# import tensorflow as tf
# import base64
# host = os.environ.get("host")
# code = os.environ.get("code")
# model_llm = os.environ.get("model")
# content = os.environ.get("content")
# state = os.environ.get("state")
# system = os.environ.get("system")
# auth = os.environ.get("auth")
# auth2 = os.environ.get("auth2")
# data = None
# np.set_printoptions(suppress=True)
# # Load the model outside of the function
# model = tf.keras.models.load_model('keras_model.h5')
# # Load labels from a file
# with open("labels.txt", "r") as file:
# labels = file.read().splitlines()
# messages = [{"role": "system", "content": system}]
# def classify(platform, UserInput, Images, Textbox2, Textbox3):
# if Textbox3 == code:
# imageData = None
# image_data_url = None # Initialize image_data_url
# if Images is not None:
# output = []
# headers = {
# "Authorization": f"Bearer {auth2}"
# }
# if platform == "wh":
# get_image = requests.get(Images, headers=headers)
# if get_image.status_code == 200:
# # Convert the image data to base64
# image_base64 = base64.b64encode(get_image.content).decode("utf-8")
# # Create a data URL
# image_data_url = f"data:image/png;base64,{image_base64}"
# elif platform == "web":
# print("WEB")
# # Handle web case if needed
# else:
# pass
# if image_data_url is not None:
# # Load the image from image_data_url
# image_data = base64.b64decode(image_base64)
# nparr = np.frombuffer(image_data, np.uint8)
# image = cv.imdecode(nparr, cv.IMREAD_COLOR)
# image = cv.resize(image, (224, 224))
# image_array = np.asarray(image)
# normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
# data[0] = normalized_image_array
# prediction = model.predict(data)
# max_label_index = None
# max_prediction_value = -1
# print('Prediction')
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
# Textbox2 = Textbox2.split(",")
# Textbox2_edited = [x.strip() for x in Textbox2]
# Textbox2_edited = list(Textbox2_edited)
# Textbox2_edited.append(UserInput)
# messages.append({"role": "user", "content": UserInput})
# for i, label in enumerate(labels):
# prediction_value = float(prediction[0][i])
# rounded_value = round(prediction_value, 2)
# print(f'{label}: {rounded_value}')
# if prediction_value > max_prediction_value:
# max_label_index = i
# max_prediction_value = prediction_value
# if max_label_index is not None:
# max_label = labels[max_label_index].split(' ', 1)[1]
# max_rounded_prediction = round(max_prediction_value, 2)
# print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
# if max_rounded_prediction > 0.5:
# print("\nWays to dispose of this waste: " + max_label)
# messages.append({"role": "user", "content": content + " " + max_label})
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {auth}"
# }
# response = requests.post(host, headers=headers, json={
# "messages": messages,
# "model": model_llm
# }).json()
# reply = response["choices"][0]["message"]["content"]
# messages.append({"role": "assistant", "content": reply})
# output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
# elif max_rounded_prediction < 0.5:
# output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
# output.append({"Mode": "Image", "type": "Data URL", "data_url": image_data_url})
# return output
# else:
# output = []
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
# Textbox2 = Textbox2.split(",")
# Textbox2_edited = [x.strip() for x in Textbox2]
# Textbox2_edited = list(Textbox2_edited)
# Textbox2_edited.append(UserInput)
# for i in Textbox2_edited:
# messages.append({"role": "user", "content": i})
# print("messages after appending:", messages)
# messages.append({"role": "user", "content": UserInput})
# headers = {
# "Content-Type": "application/json",
# "Authorization": f"Bearer {auth}"
# }
# response = requests.post(host, headers=headers, json={
# "messages": messages,
# "model": model_llm
# }).json()
# reply = response["choices"][0]["message"]["content"]
# messages.append({"role": "assistant", "content": reply})
# output.append({"Mode": "Chat", "content": reply})
# return output
# else:
# return "Unauthorized"
# user_inputs = [
# gr.Textbox(label="Platform", type="text"),
# gr.Textbox(label="User Input", type="text"),
# gr.Textbox(label="Images", type="text"),
# gr.Textbox(label="Textbox2", type="text"),
# gr.Textbox(label="Textbox3", type="password")
# ]
# iface = gr.Interface(
# fn=classify,
# inputs=user_inputs,
# outputs=gr.outputs.JSON(),
# title="Classifier",
# )
# iface.launch()