# import gradio as gr # import numpy as np # import cv2 as cv # import requests # import time # import os # host = os.environ.get("host") # code = os.environ.get("code") # model_llm = os.environ.get("model") # content = os.environ.get("content") # state = os.environ.get("state") # system = os.environ.get("system") # auth = os.environ.get("auth") # data = None # model = None # image = None # prediction = None # labels = None # print('START') # np.set_printoptions(suppress=True) # data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) # with open("labels.txt", "r") as file: # labels = file.read().splitlines() # messages = [ # {"role": "system", "content": system} # ] # def classify(UserInput, Image, Textbox2, Textbox3): # if Textbox3 == code: # print("Image: ", Image) # if Image is not None: # output = [] # image_data = np.array(Image) # image_data = cv.resize(image_data, (224, 224)) # image_array = np.asarray(image_data) # normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 # data[0] = normalized_image_array # import tensorflow as tf # model = tf.keras.models.load_model('keras_model.h5') # prediction = model.predict(data) # max_label_index = None # max_prediction_value = -1 # print('Prediction') # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") # Textbox2 = Textbox2.split(",") # Textbox2_edited = [x.strip() for x in Textbox2] # Textbox2_edited = list(Textbox2_edited) # Textbox2_edited.append(UserInput) # messages.append({"role": "user", "content": UserInput}) # for i, label in enumerate(labels): # prediction_value = float(prediction[0][i]) # rounded_value = round(prediction_value, 2) # print(f'{label}: {rounded_value}') # if prediction_value > max_prediction_value: # max_label_index = i # max_prediction_value = prediction_value # if max_label_index is not None: # max_label = labels[max_label_index].split(' ', 1)[1] # max_rounded_prediction = round(max_prediction_value, 2) # print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}') # time.sleep(1) # if max_rounded_prediction > 0.5: # print("\nWays to dispose of this waste: " + max_label) # messages.append({"role": "user", "content": content + " " + max_label}) # headers = { # "Content-Type": "application/json", # "Authorization": f"Bearer {auth}" # } # response = requests.post(host, headers=headers, json={ # "messages": messages, # "model": model_llm # }).json() # reply = response["choices"][0]["message"]["content"] # messages.append({"role": "assistant", "content": reply}) # output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply}) # elif max_rounded_prediction < 0.5: # output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one."}) # return output # else: # output = [] # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") # Textbox2 = Textbox2.split(",") # Textbox2_edited = [x.strip() for x in Textbox2] # Textbox2_edited = list(Textbox2_edited) # Textbox2_edited.append(UserInput) # for i in Textbox2_edited: # messages.append( # {"role": "user", "content": i} # ) # print("messages after appending:", messages) # time.sleep(1) # messages.append({"role": "user", "content": UserInput}) # headers = { # "Content-Type": "application/json", # "Authorization": f"Bearer {auth}" # } # response = requests.post(host, headers=headers, json={ # "messages": messages, # "model": model_llm # }).json() # reply = response["choices"][0]["message"]["content"] # messages.append({"role": "assistant", "content": reply}) # output.append({"Mode": "Chat", "content": reply}) # return output # else: # return "Unauthorized" # user_inputs = [ # gr.Textbox(label="User Input", type="text"), # gr.Image(), # gr.Textbox(label="Textbox2", type="text"), # gr.Textbox(label="Textbox3", type="password") # ] # iface = gr.Interface( # fn=classify, # inputs=user_inputs, # outputs=gr.outputs.JSON(), # title="Classifier", # ) # iface.launch() # import gradio as gr # import numpy as np # import cv2 as cv # import requests # import time # import os # host = os.environ.get("host") # code = os.environ.get("code") # model_llm = os.environ.get("model") # content = os.environ.get("content") # state = os.environ.get("state") # system = os.environ.get("system") # auth = os.environ.get("auth") # auth2 = os.environ.get("auth2") # data = None # model = None # image = None # prediction = None # labels = None # print('START') # np.set_printoptions(suppress=True) # data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) # with open("labels.txt", "r") as file: # labels = file.read().splitlines() # messages = [ # {"role": "system", "content": system} # ] # def classify(platform,UserInput, Image, Textbox2, Textbox3): # if Textbox3 == code: # if Image is not None: # output = [] # headers = { # "Authorization": f"Bearer {auth2}" # } # if platform == "wh": # get_image = requests.get(Image, headers=headers) # print(get_image.content) # elif platform == "web": # print("WEB") # else: # pass # image_data = np.array(get_image) # image_data = cv.resize(image_data, (224, 224)) # image_array = np.asarray(image_data) # normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 # data[0] = normalized_image_array # import tensorflow as tf # model = tf.keras.models.load_model('keras_model.h5') # prediction = model.predict(data) # max_label_index = None # max_prediction_value = -1 # print('Prediction') # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") # Textbox2 = Textbox2.split(",") # Textbox2_edited = [x.strip() for x in Textbox2] # Textbox2_edited = list(Textbox2_edited) # Textbox2_edited.append(UserInput) # messages.append({"role": "user", "content": UserInput}) # for i, label in enumerate(labels): # prediction_value = float(prediction[0][i]) # rounded_value = round(prediction_value, 2) # print(f'{label}: {rounded_value}') # if prediction_value > max_prediction_value: # max_label_index = i # max_prediction_value = prediction_value # if max_label_index is not None: # max_label = labels[max_label_index].split(' ', 1)[1] # max_rounded_prediction = round(max_prediction_value, 2) # print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}') # time.sleep(1) # if max_rounded_prediction > 0.5: # print("\nWays to dispose of this waste: " + max_label) # messages.append({"role": "user", "content": content + " " + max_label}) # headers = { # "Content-Type": "application/json", # "Authorization": f"Bearer {auth}" # } # response = requests.post(host, headers=headers, json={ # "messages": messages, # "model": model_llm # }).json() # reply = response["choices"][0]["message"]["content"] # messages.append({"role": "assistant", "content": reply}) # output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply}) # elif max_rounded_prediction < 0.5: # output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one."}) # return output # else: # output = [] # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") # Textbox2 = Textbox2.split(",") # Textbox2_edited = [x.strip() for x in Textbox2] # Textbox2_edited = list(Textbox2_edited) # Textbox2_edited.append(UserInput) # for i in Textbox2_edited: # messages.append( # {"role": "user", "content": i} # ) # print("messages after appending:", messages) # time.sleep(1) # messages.append({"role": "user", "content": UserInput}) # headers = { # "Content-Type": "application/json", # "Authorization": f"Bearer {auth}" # } # response = requests.post(host, headers=headers, json={ # "messages": messages, # "model": model_llm # }).json() # reply = response["choices"][0]["message"]["content"] # messages.append({"role": "assistant", "content": reply}) # output.append({"Mode": "Chat", "content": reply}) # return output # else: # return "Unauthorized" # user_inputs = [ # gr.Textbox(label="Platform", type="text"), # gr.Textbox(label="User Input", type="text"), # gr.Textbox(label="Image", type="text"), # gr.Textbox(label="Textbox2", type="text"), # gr.Textbox(label="Textbox3", type="password") # ] # iface = gr.Interface( # fn=classify, # inputs=user_inputs, # outputs=gr.outputs.JSON(), # title="Classifier", # ) # iface.launch() ############################### MOST WORKING # import gradio as gr # import numpy as np # import cv2 as cv # import requests # import io # from PIL import Image # import os # import tensorflow as tf # import random # host = os.environ.get("host") # code = os.environ.get("code") # model_llm = os.environ.get("model") # content = os.environ.get("content") # state = os.environ.get("state") # system = os.environ.get("system") # auth = os.environ.get("auth") # auth2 = os.environ.get("auth2") # data = None # np.set_printoptions(suppress=True) # model = tf.keras.models.load_model('keras_model.h5') # data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) # with open("labels.txt", "r") as file: # labels = file.read().splitlines() # messages = [ # {"role": "system", "content": system} # ] # def classify(platform, UserInput, Images, Textbox2, Textbox3): # if Textbox3 == code: # imageData = None # if Images != "None": # output = [] # headers = { # "Authorization": f"Bearer {auth2}" # } # if platform == "wh": # get_image = requests.get(Images, headers=headers) # if get_image.status_code == 200: # image_data = get_image.content # elif platform == "web": # print("WEB") # else: # pass # image = cv.imdecode(np.frombuffer(image_data, np.uint8), cv.IMREAD_COLOR) # image = cv.resize(image, (224, 224)) # image_array = np.asarray(image) # normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 # data[0] = normalized_image_array # prediction = model.predict(data) # max_label_index = None # max_prediction_value = -1 # print('Prediction') # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") # Textbox2 = Textbox2.split(",") # Textbox2_edited = [x.strip() for x in Textbox2] # Textbox2_edited = list(Textbox2_edited) # Textbox2_edited.append(UserInput) # print(UserInput) # print("appending") # messages.append({"role": "user", "content": UserInput}) # for i, label in enumerate(labels): # prediction_value = float(prediction[0][i]) # rounded_value = round(prediction_value, 2) # print(f'{label}: {rounded_value}') # if prediction_value > max_prediction_value: # max_label_index = i # max_prediction_value = prediction_value # if max_label_index is not None: # max_label = labels[max_label_index].split(' ', 1)[1] # max_rounded_prediction = round(max_prediction_value, 2) # print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}') # if max_rounded_prediction > 0.5: # print("\nWays to dispose of this waste: " + max_label) # messages.append({"role": "user", "content": content + " " + max_label}) # # messages.append({"role": "user", "content": max_label}) # print("IMAGE messages after appending:", messages) # header = { # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36", # "Content-Type": "application/json", # "Authorization": f"Bearer {auth}" # } # try: # response = requests.post(host, headers=header, json={ # "messages": messages, # "model": model_llm # }).json() # print("RESPONSE TRY",response) # reply = response["choices"][0]["message"]["content"] # # messages.append({"role": "assistant", "content": reply}) # output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply}) # except: # print("DOESN'T WORK") # elif max_rounded_prediction < 0.5: # output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"}) # return output # elif Images == "None": # output = [] # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") # Textbox2 = Textbox2.split(",") # Textbox2_edited = [x.strip() for x in Textbox2] # Textbox2_edited = list(Textbox2_edited) # Textbox2_edited.append(UserInput) # for i in Textbox2_edited: # messages.append({"role": "user", "content": i}) # print("messages after appending:", messages) # messages.append({"role": "user", "content": UserInput}) # headers = { # "Content-Type": "application/json", # "Authorization": f"Bearer {auth}" # } # response = requests.post(host, headers=headers, json={ # "messages": messages, # "model": model_llm # }).json() # reply = response["choices"][0]["message"]["content"] # # messages.append({"role": "assistant", "content": reply}) # output.append({"Mode": "Chat", "content": reply}) # return output # else: # return "Unauthorized" # user_inputs = [ # gr.Textbox(label="Platform", type="text"), # gr.Textbox(label="User Input", type="text"), # gr.Textbox(label="Image", type="text"), # gr.Textbox(label="Textbox2", type="text"), # gr.Textbox(label="Textbox3", type="password") # ] # iface = gr.Interface( # fn=classify, # inputs=user_inputs, # outputs=gr.outputs.JSON(), # title="Classifier", # ) # iface.launch() ############## WORKING AS OF THIS MONTH ############## # import gradio as gr # import numpy as np # import cv2 as cv # import requests # import io # import time # from PIL import Image # import os # import tensorflow as tf # import random # import openai # host = os.environ.get("host") # code = os.environ.get("code") # model_llm = os.environ.get("model") # content = os.environ.get("content") # state = os.environ.get("state") # system = os.environ.get("system") # auth = os.environ.get("auth") # auth2 = os.environ.get("auth2") # openai.api_key = os.environ.get("auth") # openai.api_base = os.environ.get("host") # data = None # np.set_printoptions(suppress=True) # model = tf.keras.models.load_model('keras_model.h5') # data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) # with open("labels.txt", "r") as file: # labels = file.read().splitlines() # messages = [ # {"role": "system", "content": system} # ] # def classify(platform, UserInput, Images, Textbox2, Textbox3): # if UserInput.lower() == "clear history": # messages.clear() # messages.append( # {"role": "system", "content": system} # ) # if Textbox3 == code: # imageData = None # if Images != "None": # output = [] # headers = { # "Authorization": f"Bearer {auth2}" # } # if platform == "wh": # get_image = requests.get(Images, headers=headers) # if get_image.status_code == 200: # image_data = get_image.content # elif platform == "web": # # print("WEB") # url = requests.get(Images) # image_data = url.content # else: # pass # image = cv.imdecode(np.frombuffer(image_data, np.uint8), cv.IMREAD_COLOR) # image = cv.resize(image, (224, 224)) # image_array = np.asarray(image) # normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 # data[0] = normalized_image_array # prediction = model.predict(data) # max_label_index = None # max_prediction_value = -1 # print('Prediction') # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") # Textbox2 = Textbox2.split(",") # Textbox2_edited = [x.strip() for x in Textbox2] # Textbox2_edited = list(Textbox2_edited) # Textbox2_edited.append(UserInput) # print(UserInput) # print("appending") # # messages.append({"role": "user", "content": UserInput}) # # Pop earlier messages if there are more than 10 # # if UserInput.lower() == "clear history": # # while len(messages) > 10: # # messages.pop(0) # for i, label in enumerate(labels): # prediction_value = float(prediction[0][i]) # rounded_value = round(prediction_value, 2) # print(f'{label}: {rounded_value}') # if prediction_value > max_prediction_value: # max_label_index = i # max_prediction_value = prediction_value # if max_label_index is not None: # max_label = labels[max_label_index].split(' ', 1)[1] # max_rounded_prediction = round(max_prediction_value, 2) # print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}') # if max_rounded_prediction > 0.5: # print("\nWays to dispose of this waste: " + max_label) # messages.append({"role": "user", "content": content + " " + max_label}) # print("IMAGE messages after appending:", messages) # print("Message list of image:", messages) # header = { # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36", # "Content-Type": "application/json", # "Authorization": f"Bearer {auth}" # } # try: # # response = requests.post(host, headers=header, json={ # # "messages": messages, # # "model": model_llm # # }).json() # completion = openai.ChatCompletion.create( # model="gpt-3.5-turbo", # messages=messages # ) # # reply = response["choices"][0]["message"]["content"] # reply = completion.choices[0].message['content'] # # # reply = response["choices"][0]["message"]["content"] # # reply = response.choices[0].message['content'] # print("RESPONSE TRY", completion) # output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply}) # except: # print("DOESN'T WORK") # elif max_rounded_prediction < 0.5: # output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"}) # return output # elif Images == "None": # output = [] # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") # Textbox2 = Textbox2.split(",") # Textbox2_edited = [x.strip() for x in Textbox2] # Textbox2_edited = list(Textbox2_edited) # Textbox2_edited.append(UserInput) # for i in Textbox2_edited: # messages.append({"role": "user", "content": i}) # print("messages after appending:", messages) # messages.append({"role": "user", "content": UserInput}) # # Pop earlier messages if there are more than 10 # # if UserInput.lower() == "clear history": # # while len(messages) > 10: # # messages.pop(0) # headers = { # "Content-Type": "application/json", # "Authorization": f"Bearer {auth}" # } # try: # # response = requests.post(host, headers=headers, json={ # # "messages": messages, # # "model": model_llm # # }).json() # completion = openai.ChatCompletion.create( # model="gpt-3.5-turbo", # messages=messages # ) # # reply = response["choices"][0]["message"]["content"] # reply = completion.choices[0].message['content'] # print("RESPONSE TRY (NO IMAGE)", completion, reply) # except: # reply = "Maximum messages: 15. Please clear your history and Try Again! (No Image)" # output.append({"Mode": "Chat", "content": reply}) # return output # else: # return "Unauthorized" # user_inputs = [ # gr.Textbox(label="Platform", type="text"), # gr.Textbox(label="User Input", type="text"), # gr.Textbox(label="Image", type="text"), # gr.Textbox(label="Textbox2", type="text"), # gr.Textbox(label="Textbox3", type="password") # ] # iface = gr.Interface( # fn=classify, # inputs=user_inputs, # outputs=gr.outputs.JSON(), # title="Classifier", # ) # iface.launch() ############## NEW VERSION ############## import gradio as gr import numpy as np import cv2 as cv import requests import io import time from PIL import Image import base64 import os import tensorflow as tf import random import openai import json host = os.environ.get("host") code = os.environ.get("code") model_llm = os.environ.get("model") content = os.environ.get("content") state = os.environ.get("state") system = os.environ.get("system") auth = os.environ.get("auth") auth2 = os.environ.get("auth2") openai.api_key = os.environ.get("auth") openai.api_base = os.environ.get("host") vis_url = os.environ.get("vis_url") vis_auth = os.environ.get("vis_auth") data = None np.set_printoptions(suppress=True) model = tf.keras.models.load_model('keras_model.h5') data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) with open("labels.txt", "r") as file: labels = file.read().splitlines() messages = [ {"role": "system", "content": system} ] def classify(platform, UserInput, Images, Textbox2, Textbox3): if UserInput.lower() == "clear history": messages.clear() messages.append( {"role": "system", "content": system} ) if Textbox3 == code: imageData = None if Images != "None": output = [] headers = { "Authorization": f"Bearer {auth2}" } if platform == "wh": get_image = requests.get(Images, headers=headers) if get_image.status_code == 200: image_data = get_image.content elif platform == "web": # print("WEB") url = requests.get(Images) image_data = url.content else: pass def encode_image(image: Image.Image, format: str = "PNG") -> str: with io.BytesIO() as buffer: image.save(buffer, format=format) encoded_image = buffer.getvalue().decode("latin-1") # Use "latin-1" encoding for bytes to str conversion return encoded_image def url_to_base64(image_url): try: # Download the image from the URL response = requests.get(image_url) response.raise_for_status() # Convert the image content to base64 base64_data = base64.b64encode(response.content).decode('utf-8') return base64_data except Exception as e: print(f"Error: {e}") return None def vision(): # with open("image.png", "wb") as file1_write: # file1_write.write(image_data) # Example usage # image_url = 'https://example.com/path/to/image.jpg' # base64_data = url_to_base64(image_url) # image = Image.open(io.BytesIO(file_content)) # base64_image_str = encode_image(image) if image_data: try: # Open the image directly from the image data image = Image.open(io.BytesIO(image_data)) base64_image_str = encode_image(image) payload = { "content": [ { "prompt": "What's this image about? or What does this image contains?", "image": base64_image_str, } ], "token": vis_auth, } url = vis_url headers = {"Content-Type": "application/json"} response = requests.post(url, headers=headers, data=json.dumps(payload)) results = response.json() results = results["result"] answer_index = results.find("Answer:") if answer_index != -1: try: result_text = results[answer_index + len("Answer:"):].strip() print(result_text) return result_text except: pass else: return "Answer: not found in the string." except: return "ERRRRRRR" else: print("Error: Image data is not available.") return None if "vision" in UserInput.lower(): vision() return "yesssss" image = cv.imdecode(np.frombuffer(image_data, np.uint8), cv.IMREAD_COLOR) image = cv.resize(image, (224, 224)) image_array = np.asarray(image) normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 data[0] = normalized_image_array prediction = model.predict(data) max_label_index = None max_prediction_value = -1 print('Prediction') Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") Textbox2 = Textbox2.split(",") Textbox2_edited = [x.strip() for x in Textbox2] Textbox2_edited = list(Textbox2_edited) Textbox2_edited.append(UserInput) print(UserInput) print("appending") # messages.append({"role": "user", "content": UserInput}) # Pop earlier messages if there are more than 10 # if UserInput.lower() == "clear history": # while len(messages) > 10: # messages.pop(0) for i, label in enumerate(labels): prediction_value = float(prediction[0][i]) rounded_value = round(prediction_value, 2) print(f'{label}: {rounded_value}') if prediction_value > max_prediction_value: max_label_index = i max_prediction_value = prediction_value if max_label_index is not None: max_label = labels[max_label_index].split(' ', 1)[1] max_rounded_prediction = round(max_prediction_value, 2) print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}') if max_rounded_prediction > 0.5: print("\nWays to dispose of this waste: " + max_label) messages.append({"role": "user", "content": content + " " + max_label}) print("IMAGE messages after appending:", messages) print("Message list of image:", messages) header = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36", "Content-Type": "application/json", "Authorization": f"Bearer {auth}" } try: # response = requests.post(host, headers=header, json={ # "messages": messages, # "model": model_llm # }).json() completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages ) # reply = response["choices"][0]["message"]["content"] reply = completion.choices[0].message['content'] # # reply = response["choices"][0]["message"]["content"] # reply = response.choices[0].message['content'] print("RESPONSE TRY", completion) output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply}) except: print("DOESN'T WORK") elif max_rounded_prediction < 0.5: output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"}) return output elif Images == "None": output = [] Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") Textbox2 = Textbox2.split(",") Textbox2_edited = [x.strip() for x in Textbox2] Textbox2_edited = list(Textbox2_edited) Textbox2_edited.append(UserInput) for i in Textbox2_edited: messages.append({"role": "user", "content": i}) print("messages after appending:", messages) messages.append({"role": "user", "content": UserInput}) # Pop earlier messages if there are more than 10 # if UserInput.lower() == "clear history": # while len(messages) > 10: # messages.pop(0) headers = { "Content-Type": "application/json", "Authorization": f"Bearer {auth}" } try: # response = requests.post(host, headers=headers, json={ # "messages": messages, # "model": model_llm # }).json() # def get_current_weather(location, unit): # params = { # 'appid': '334f89b7998e8df818503b0f33085621', # 'q': location, # 'units': unit # } # response = requests.get('https://api.openweathermap.org/data/2.5/weather', params=params) # if response.status_code == 200: # return response.json() # else: # return None # @openai_func def testing_this(name: str, number: str): """ This function ask user for their name and a random integer and returns it. @param name: The name of the user @param number: Random number entered by the user """ if name is None: return "Your name must be provided, inorder to run the function call" elif number is None: return "A random number must be provided, inorder to run the function call" elif name is None and number is None : return "You must provide your name and a random number to continue." else: return "Function call successfull" # functions = [ # { # "name": "get_current_weather", # "description": "Get the current weather in a given location", # "parameters": { # "type": "object", # "properties": { # "location": { # "type": "string", # "description": "The city and state, e.g. San Francisco, CA", # }, # "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, # }, # "required": ["location"], # }, # } # ] functions = [ { "name": "testing_this", "description": "This function ask user for their name and a random integer and returns it", "parameters": { "type": "object", "properties": { "name": { "type": "string", "description": "Name of the user", }, "number": {"type": "string", "description": "A random number"}, }, "required": ["name", "number"], }, } ] completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages, functions = functions, function_call = "auto", ) comp = completion.choices[0].message print("\nCOMPPP MESSAGE:", comp) # if comp.get("function_call"): # function_name = comp["function_call"]["name"] # # Convert JSON string to Python dictionary # data = json.loads(comp) # # Extract values # function_call = data["function_call"] # arguments_str = function_call["arguments"] # arguments_dict = json.loads(arguments_str) # function_response = testing_this( # name=arguments_dict["name"], # number=arguments_dict["number"] # ) # print("FUNCTION_RESPONSE:", function_response) # print("ARGUMENTS VALUES:", arguments_dict["number"], arguments_dict["number"]) # messages.append(comp) # messages.append({ # "role": "function", # "name": function_name, # "content": function_response # }) # second_response = openai.ChatCompletion.create( # model="gpt-3.5-turbo", # messages=messages # ) # print("YES_FUNCTION_CALL RESPONSE TRY (NO IMAGE)") # return second_response # else: # reply = comp['content'] # print("RESPONSE TRY (NO IMAGE, FUNCTION_CALL)") if comp.get("function_call"): function_name = comp["function_call"]["name"] # Convert JSON string to Python dictionary arguments_str = comp["function_call"]["arguments"] arguments_dict = json.loads(arguments_str) function_response = testing_this( name=arguments_dict["name"], number=arguments_dict["number"] ) print("FUNCTION_RESPONSE:", function_response) print("ARGUMENTS VALUES:", arguments_dict["name"], arguments_dict["number"]) messages.append(comp) messages.append({ "role": "function", "name": function_name, "content": function_response }) second_response = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=messages ) print("YES_FUNCTION_CALL RESPONSE TRY (NO IMAGE)") return second_response else: reply = comp['content'] print("RESPONSE TRY (NO IMAGE, FUNCTION_CALL)") # reply = comp['content'] # print("RESPONSE TRY (NO IMAGE)", completion, reply) except Exception as e: print("Error",e) reply = "Maximum messages: 15. Please clear your history and Try Again! (No Image)" output.append({"Mode": "Chat", "content": reply}) return output else: return "Unauthorized" user_inputs = [ gr.Textbox(label="Platform", type="text"), gr.Textbox(label="User Input", type="text"), gr.Textbox(label="Image", type="text"), gr.Textbox(label="Textbox2", type="text"), gr.Textbox(label="Textbox3", type="password") ] iface = gr.Interface( fn=classify, inputs=user_inputs, outputs=gr.outputs.JSON(), title="Classifier", ) iface.launch() # import gradio as gr # import numpy as np # import cv2 as cv # import requests # import random # import os # import tensorflow as tf # import base64 # host = os.environ.get("host") # code = os.environ.get("code") # model_llm = os.environ.get("model") # content = os.environ.get("content") # state = os.environ.get("state") # system = os.environ.get("system") # auth = os.environ.get("auth") # auth2 = os.environ.get("auth2") # data = None # np.set_printoptions(suppress=True) # # Load the model outside of the function # model = tf.keras.models.load_model('keras_model.h5') # # Load labels from a file # with open("labels.txt", "r") as file: # labels = file.read().splitlines() # messages = [{"role": "system", "content": system}] # def classify(platform, UserInput, Images, Textbox2, Textbox3): # if Textbox3 == code: # imageData = None # image_data_url = None # Initialize image_data_url # if Images is not None: # output = [] # headers = { # "Authorization": f"Bearer {auth2}" # } # if platform == "wh": # get_image = requests.get(Images, headers=headers) # if get_image.status_code == 200: # # Convert the image data to base64 # image_base64 = base64.b64encode(get_image.content).decode("utf-8") # # Create a data URL # image_data_url = f"data:image/png;base64,{image_base64}" # elif platform == "web": # print("WEB") # # Handle web case if needed # else: # pass # if image_data_url is not None: # # Load the image from image_data_url # image_data = base64.b64decode(image_base64) # nparr = np.frombuffer(image_data, np.uint8) # image = cv.imdecode(nparr, cv.IMREAD_COLOR) # image = cv.resize(image, (224, 224)) # image_array = np.asarray(image) # normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 # data[0] = normalized_image_array # prediction = model.predict(data) # max_label_index = None # max_prediction_value = -1 # print('Prediction') # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") # Textbox2 = Textbox2.split(",") # Textbox2_edited = [x.strip() for x in Textbox2] # Textbox2_edited = list(Textbox2_edited) # Textbox2_edited.append(UserInput) # messages.append({"role": "user", "content": UserInput}) # for i, label in enumerate(labels): # prediction_value = float(prediction[0][i]) # rounded_value = round(prediction_value, 2) # print(f'{label}: {rounded_value}') # if prediction_value > max_prediction_value: # max_label_index = i # max_prediction_value = prediction_value # if max_label_index is not None: # max_label = labels[max_label_index].split(' ', 1)[1] # max_rounded_prediction = round(max_prediction_value, 2) # print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}') # if max_rounded_prediction > 0.5: # print("\nWays to dispose of this waste: " + max_label) # messages.append({"role": "user", "content": content + " " + max_label}) # headers = { # "Content-Type": "application/json", # "Authorization": f"Bearer {auth}" # } # response = requests.post(host, headers=headers, json={ # "messages": messages, # "model": model_llm # }).json() # reply = response["choices"][0]["message"]["content"] # messages.append({"role": "assistant", "content": reply}) # output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply}) # elif max_rounded_prediction < 0.5: # output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"}) # output.append({"Mode": "Image", "type": "Data URL", "data_url": image_data_url}) # return output # else: # output = [] # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "") # Textbox2 = Textbox2.split(",") # Textbox2_edited = [x.strip() for x in Textbox2] # Textbox2_edited = list(Textbox2_edited) # Textbox2_edited.append(UserInput) # for i in Textbox2_edited: # messages.append({"role": "user", "content": i}) # print("messages after appending:", messages) # messages.append({"role": "user", "content": UserInput}) # headers = { # "Content-Type": "application/json", # "Authorization": f"Bearer {auth}" # } # response = requests.post(host, headers=headers, json={ # "messages": messages, # "model": model_llm # }).json() # reply = response["choices"][0]["message"]["content"] # messages.append({"role": "assistant", "content": reply}) # output.append({"Mode": "Chat", "content": reply}) # return output # else: # return "Unauthorized" # user_inputs = [ # gr.Textbox(label="Platform", type="text"), # gr.Textbox(label="User Input", type="text"), # gr.Textbox(label="Images", type="text"), # gr.Textbox(label="Textbox2", type="text"), # gr.Textbox(label="Textbox3", type="password") # ] # iface = gr.Interface( # fn=classify, # inputs=user_inputs, # outputs=gr.outputs.JSON(), # title="Classifier", # ) # iface.launch()