Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -483,7 +483,219 @@
|
|
| 483 |
# )
|
| 484 |
# iface.launch()
|
| 485 |
|
| 486 |
-
##############
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 487 |
|
| 488 |
import gradio as gr
|
| 489 |
import numpy as np
|
|
@@ -492,6 +704,7 @@ import requests
|
|
| 492 |
import io
|
| 493 |
import time
|
| 494 |
from PIL import Image
|
|
|
|
| 495 |
import os
|
| 496 |
import tensorflow as tf
|
| 497 |
import random
|
|
@@ -507,6 +720,8 @@ auth = os.environ.get("auth")
|
|
| 507 |
auth2 = os.environ.get("auth2")
|
| 508 |
openai.api_key = os.environ.get("auth")
|
| 509 |
openai.api_base = os.environ.get("host")
|
|
|
|
|
|
|
| 510 |
data = None
|
| 511 |
|
| 512 |
np.set_printoptions(suppress=True)
|
|
@@ -546,6 +761,49 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
|
|
| 546 |
else:
|
| 547 |
pass
|
| 548 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 549 |
image = cv.imdecode(np.frombuffer(image_data, np.uint8), cv.IMREAD_COLOR)
|
| 550 |
image = cv.resize(image, (224, 224))
|
| 551 |
image_array = np.asarray(image)
|
|
|
|
| 483 |
# )
|
| 484 |
# iface.launch()
|
| 485 |
|
| 486 |
+
############## WORKING AS OF THIS MONTH ##############
|
| 487 |
+
|
| 488 |
+
# import gradio as gr
|
| 489 |
+
# import numpy as np
|
| 490 |
+
# import cv2 as cv
|
| 491 |
+
# import requests
|
| 492 |
+
# import io
|
| 493 |
+
# import time
|
| 494 |
+
# from PIL import Image
|
| 495 |
+
# import os
|
| 496 |
+
# import tensorflow as tf
|
| 497 |
+
# import random
|
| 498 |
+
# import openai
|
| 499 |
+
|
| 500 |
+
# host = os.environ.get("host")
|
| 501 |
+
# code = os.environ.get("code")
|
| 502 |
+
# model_llm = os.environ.get("model")
|
| 503 |
+
# content = os.environ.get("content")
|
| 504 |
+
# state = os.environ.get("state")
|
| 505 |
+
# system = os.environ.get("system")
|
| 506 |
+
# auth = os.environ.get("auth")
|
| 507 |
+
# auth2 = os.environ.get("auth2")
|
| 508 |
+
# openai.api_key = os.environ.get("auth")
|
| 509 |
+
# openai.api_base = os.environ.get("host")
|
| 510 |
+
# data = None
|
| 511 |
+
|
| 512 |
+
# np.set_printoptions(suppress=True)
|
| 513 |
+
|
| 514 |
+
# model = tf.keras.models.load_model('keras_model.h5')
|
| 515 |
+
# data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
|
| 516 |
+
|
| 517 |
+
# with open("labels.txt", "r") as file:
|
| 518 |
+
# labels = file.read().splitlines()
|
| 519 |
+
|
| 520 |
+
# messages = [
|
| 521 |
+
# {"role": "system", "content": system}
|
| 522 |
+
# ]
|
| 523 |
+
|
| 524 |
+
# def classify(platform, UserInput, Images, Textbox2, Textbox3):
|
| 525 |
+
# if UserInput.lower() == "clear history":
|
| 526 |
+
# messages.clear()
|
| 527 |
+
# messages.append(
|
| 528 |
+
# {"role": "system", "content": system}
|
| 529 |
+
# )
|
| 530 |
+
|
| 531 |
+
# if Textbox3 == code:
|
| 532 |
+
# imageData = None
|
| 533 |
+
# if Images != "None":
|
| 534 |
+
# output = []
|
| 535 |
+
# headers = {
|
| 536 |
+
# "Authorization": f"Bearer {auth2}"
|
| 537 |
+
# }
|
| 538 |
+
# if platform == "wh":
|
| 539 |
+
# get_image = requests.get(Images, headers=headers)
|
| 540 |
+
# if get_image.status_code == 200:
|
| 541 |
+
# image_data = get_image.content
|
| 542 |
+
# elif platform == "web":
|
| 543 |
+
# # print("WEB")
|
| 544 |
+
# url = requests.get(Images)
|
| 545 |
+
# image_data = url.content
|
| 546 |
+
# else:
|
| 547 |
+
# pass
|
| 548 |
+
|
| 549 |
+
# image = cv.imdecode(np.frombuffer(image_data, np.uint8), cv.IMREAD_COLOR)
|
| 550 |
+
# image = cv.resize(image, (224, 224))
|
| 551 |
+
# image_array = np.asarray(image)
|
| 552 |
+
# normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
|
| 553 |
+
# data[0] = normalized_image_array
|
| 554 |
+
|
| 555 |
+
# prediction = model.predict(data)
|
| 556 |
+
|
| 557 |
+
# max_label_index = None
|
| 558 |
+
# max_prediction_value = -1
|
| 559 |
+
|
| 560 |
+
# print('Prediction')
|
| 561 |
+
|
| 562 |
+
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
|
| 563 |
+
# Textbox2 = Textbox2.split(",")
|
| 564 |
+
# Textbox2_edited = [x.strip() for x in Textbox2]
|
| 565 |
+
# Textbox2_edited = list(Textbox2_edited)
|
| 566 |
+
# Textbox2_edited.append(UserInput)
|
| 567 |
+
# print(UserInput)
|
| 568 |
+
# print("appending")
|
| 569 |
+
# # messages.append({"role": "user", "content": UserInput})
|
| 570 |
+
|
| 571 |
+
# # Pop earlier messages if there are more than 10
|
| 572 |
+
# # if UserInput.lower() == "clear history":
|
| 573 |
+
# # while len(messages) > 10:
|
| 574 |
+
# # messages.pop(0)
|
| 575 |
+
|
| 576 |
+
# for i, label in enumerate(labels):
|
| 577 |
+
# prediction_value = float(prediction[0][i])
|
| 578 |
+
# rounded_value = round(prediction_value, 2)
|
| 579 |
+
# print(f'{label}: {rounded_value}')
|
| 580 |
+
|
| 581 |
+
# if prediction_value > max_prediction_value:
|
| 582 |
+
# max_label_index = i
|
| 583 |
+
# max_prediction_value = prediction_value
|
| 584 |
+
|
| 585 |
+
# if max_label_index is not None:
|
| 586 |
+
# max_label = labels[max_label_index].split(' ', 1)[1]
|
| 587 |
+
# max_rounded_prediction = round(max_prediction_value, 2)
|
| 588 |
+
# print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
|
| 589 |
+
|
| 590 |
+
# if max_rounded_prediction > 0.5:
|
| 591 |
+
# print("\nWays to dispose of this waste: " + max_label)
|
| 592 |
+
# messages.append({"role": "user", "content": content + " " + max_label})
|
| 593 |
+
# print("IMAGE messages after appending:", messages)
|
| 594 |
+
|
| 595 |
+
# print("Message list of image:", messages)
|
| 596 |
+
|
| 597 |
+
# header = {
|
| 598 |
+
# "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
| 599 |
+
# "Content-Type": "application/json",
|
| 600 |
+
# "Authorization": f"Bearer {auth}"
|
| 601 |
+
# }
|
| 602 |
+
|
| 603 |
+
# try:
|
| 604 |
+
# # response = requests.post(host, headers=header, json={
|
| 605 |
+
# # "messages": messages,
|
| 606 |
+
# # "model": model_llm
|
| 607 |
+
# # }).json()
|
| 608 |
+
|
| 609 |
+
# completion = openai.ChatCompletion.create(
|
| 610 |
+
# model="gpt-3.5-turbo",
|
| 611 |
+
# messages=messages
|
| 612 |
+
# )
|
| 613 |
+
|
| 614 |
+
|
| 615 |
+
# # reply = response["choices"][0]["message"]["content"]
|
| 616 |
+
# reply = completion.choices[0].message['content']
|
| 617 |
+
# # # reply = response["choices"][0]["message"]["content"]
|
| 618 |
+
# # reply = response.choices[0].message['content']
|
| 619 |
+
# print("RESPONSE TRY", completion)
|
| 620 |
+
|
| 621 |
+
|
| 622 |
+
# output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
|
| 623 |
+
# except:
|
| 624 |
+
# print("DOESN'T WORK")
|
| 625 |
+
|
| 626 |
+
# elif max_rounded_prediction < 0.5:
|
| 627 |
+
# output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
|
| 628 |
+
|
| 629 |
+
# return output
|
| 630 |
+
|
| 631 |
+
# elif Images == "None":
|
| 632 |
+
# output = []
|
| 633 |
+
|
| 634 |
+
# Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
|
| 635 |
+
# Textbox2 = Textbox2.split(",")
|
| 636 |
+
# Textbox2_edited = [x.strip() for x in Textbox2]
|
| 637 |
+
# Textbox2_edited = list(Textbox2_edited)
|
| 638 |
+
# Textbox2_edited.append(UserInput)
|
| 639 |
+
|
| 640 |
+
# for i in Textbox2_edited:
|
| 641 |
+
# messages.append({"role": "user", "content": i})
|
| 642 |
+
|
| 643 |
+
# print("messages after appending:", messages)
|
| 644 |
+
|
| 645 |
+
# messages.append({"role": "user", "content": UserInput})
|
| 646 |
+
|
| 647 |
+
# # Pop earlier messages if there are more than 10
|
| 648 |
+
# # if UserInput.lower() == "clear history":
|
| 649 |
+
# # while len(messages) > 10:
|
| 650 |
+
# # messages.pop(0)
|
| 651 |
+
|
| 652 |
+
# headers = {
|
| 653 |
+
# "Content-Type": "application/json",
|
| 654 |
+
# "Authorization": f"Bearer {auth}"
|
| 655 |
+
# }
|
| 656 |
+
|
| 657 |
+
# try:
|
| 658 |
+
# # response = requests.post(host, headers=headers, json={
|
| 659 |
+
# # "messages": messages,
|
| 660 |
+
# # "model": model_llm
|
| 661 |
+
# # }).json()
|
| 662 |
+
|
| 663 |
+
# completion = openai.ChatCompletion.create(
|
| 664 |
+
# model="gpt-3.5-turbo",
|
| 665 |
+
# messages=messages
|
| 666 |
+
# )
|
| 667 |
+
|
| 668 |
+
|
| 669 |
+
# # reply = response["choices"][0]["message"]["content"]
|
| 670 |
+
# reply = completion.choices[0].message['content']
|
| 671 |
+
# print("RESPONSE TRY (NO IMAGE)", completion, reply)
|
| 672 |
+
|
| 673 |
+
# except:
|
| 674 |
+
# reply = "Maximum messages: 15. Please clear your history and Try Again! (No Image)"
|
| 675 |
+
# output.append({"Mode": "Chat", "content": reply})
|
| 676 |
+
|
| 677 |
+
# return output
|
| 678 |
+
# else:
|
| 679 |
+
# return "Unauthorized"
|
| 680 |
+
|
| 681 |
+
# user_inputs = [
|
| 682 |
+
# gr.Textbox(label="Platform", type="text"),
|
| 683 |
+
# gr.Textbox(label="User Input", type="text"),
|
| 684 |
+
# gr.Textbox(label="Image", type="text"),
|
| 685 |
+
# gr.Textbox(label="Textbox2", type="text"),
|
| 686 |
+
# gr.Textbox(label="Textbox3", type="password")
|
| 687 |
+
# ]
|
| 688 |
+
|
| 689 |
+
# iface = gr.Interface(
|
| 690 |
+
# fn=classify,
|
| 691 |
+
# inputs=user_inputs,
|
| 692 |
+
# outputs=gr.outputs.JSON(),
|
| 693 |
+
# title="Classifier",
|
| 694 |
+
# )
|
| 695 |
+
# iface.launch()
|
| 696 |
+
|
| 697 |
+
|
| 698 |
+
############## NEW VERSION ##############
|
| 699 |
|
| 700 |
import gradio as gr
|
| 701 |
import numpy as np
|
|
|
|
| 704 |
import io
|
| 705 |
import time
|
| 706 |
from PIL import Image
|
| 707 |
+
import base64
|
| 708 |
import os
|
| 709 |
import tensorflow as tf
|
| 710 |
import random
|
|
|
|
| 720 |
auth2 = os.environ.get("auth2")
|
| 721 |
openai.api_key = os.environ.get("auth")
|
| 722 |
openai.api_base = os.environ.get("host")
|
| 723 |
+
vis_url = os.environ.get("vis_url")
|
| 724 |
+
vis_auth = os.environ.get("vis_auth")
|
| 725 |
data = None
|
| 726 |
|
| 727 |
np.set_printoptions(suppress=True)
|
|
|
|
| 761 |
else:
|
| 762 |
pass
|
| 763 |
|
| 764 |
+
def vision():
|
| 765 |
+
with open("image.png", "wb") as file1_write:
|
| 766 |
+
file1_write.write(image_data)
|
| 767 |
+
|
| 768 |
+
with open("image.png", "rb") as file1_read:
|
| 769 |
+
file_content = file1_read.read()
|
| 770 |
+
|
| 771 |
+
image = Image.open(io.BytesIO(file_content))
|
| 772 |
+
|
| 773 |
+
base64_image_str = encode_image(image)
|
| 774 |
+
|
| 775 |
+
payload = {
|
| 776 |
+
"content": [
|
| 777 |
+
{
|
| 778 |
+
"prompt": user,
|
| 779 |
+
"image": base64_image_str,
|
| 780 |
+
}
|
| 781 |
+
],
|
| 782 |
+
"token": vis_auth,
|
| 783 |
+
}
|
| 784 |
+
|
| 785 |
+
url = vis_url
|
| 786 |
+
headers = {"Content-Type": "application/json"}
|
| 787 |
+
|
| 788 |
+
response = requests.post(url, headers=headers, data=json.dumps(payload))
|
| 789 |
+
results = response.json()
|
| 790 |
+
results = results["result"]
|
| 791 |
+
|
| 792 |
+
answer_index = results.find("Answer:")
|
| 793 |
+
|
| 794 |
+
if answer_index != -1:
|
| 795 |
+
try:
|
| 796 |
+
result_text = results[answer_index + len("Answer:"):].strip()
|
| 797 |
+
print(result_text)
|
| 798 |
+
return result_text
|
| 799 |
+
except:
|
| 800 |
+
pass
|
| 801 |
+
else:
|
| 802 |
+
return "Answer: not found in the string."
|
| 803 |
+
|
| 804 |
+
if "vision" in UserInput.lower():
|
| 805 |
+
vision()
|
| 806 |
+
|
| 807 |
image = cv.imdecode(np.frombuffer(image_data, np.uint8), cv.IMREAD_COLOR)
|
| 808 |
image = cv.resize(image, (224, 224))
|
| 809 |
image_array = np.asarray(image)
|