tommy24 commited on
Commit
206df47
·
1 Parent(s): 44d4647

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +181 -6
app.py CHANGED
@@ -314,6 +314,177 @@
314
  # )
315
  # iface.launch()
316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  import gradio as gr
318
  import numpy as np
319
  import cv2 as cv
@@ -385,6 +556,10 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
385
  print("appending")
386
  messages.append({"role": "user", "content": UserInput})
387
 
 
 
 
 
388
  for i, label in enumerate(labels):
389
  prediction_value = float(prediction[0][i])
390
  rounded_value = round(prediction_value, 2)
@@ -402,8 +577,6 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
402
  if max_rounded_prediction > 0.5:
403
  print("\nWays to dispose of this waste: " + max_label)
404
  messages.append({"role": "user", "content": content + " " + max_label})
405
- # messages.append({"role": "user", "content": max_label})
406
-
407
  print("IMAGE messages after appending:", messages)
408
 
409
  header = {
@@ -417,9 +590,8 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
417
  "messages": messages,
418
  "model": model_llm
419
  }).json()
420
- print("RESPONSE TRY",response)
421
  reply = response["choices"][0]["message"]["content"]
422
- # messages.append({"role": "assistant", "content": reply})
423
  output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
424
  except:
425
  print("DOESN'T WORK")
@@ -445,6 +617,10 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
445
 
446
  messages.append({"role": "user", "content": UserInput})
447
 
 
 
 
 
448
  headers = {
449
  "Content-Type": "application/json",
450
  "Authorization": f"Bearer {auth}"
@@ -456,8 +632,6 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
456
  }).json()
457
 
458
  reply = response["choices"][0]["message"]["content"]
459
- # messages.append({"role": "assistant", "content": reply})
460
-
461
  output.append({"Mode": "Chat", "content": reply})
462
 
463
  return output
@@ -480,6 +654,7 @@ iface = gr.Interface(
480
  )
481
  iface.launch()
482
 
 
483
  # import gradio as gr
484
  # import numpy as np
485
  # import cv2 as cv
 
314
  # )
315
  # iface.launch()
316
 
317
+
318
+ ############################### MOST WORKING
319
+
320
+ # import gradio as gr
321
+ # import numpy as np
322
+ # import cv2 as cv
323
+ # import requests
324
+ # import io
325
+ # from PIL import Image
326
+ # import os
327
+ # import tensorflow as tf
328
+ # import random
329
+
330
+ # host = os.environ.get("host")
331
+ # code = os.environ.get("code")
332
+ # model_llm = os.environ.get("model")
333
+ # content = os.environ.get("content")
334
+ # state = os.environ.get("state")
335
+ # system = os.environ.get("system")
336
+ # auth = os.environ.get("auth")
337
+ # auth2 = os.environ.get("auth2")
338
+ # data = None
339
+
340
+ # np.set_printoptions(suppress=True)
341
+
342
+ # model = tf.keras.models.load_model('keras_model.h5')
343
+ # data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
344
+
345
+ # with open("labels.txt", "r") as file:
346
+ # labels = file.read().splitlines()
347
+
348
+ # messages = [
349
+ # {"role": "system", "content": system}
350
+ # ]
351
+
352
+ # def classify(platform, UserInput, Images, Textbox2, Textbox3):
353
+ # if Textbox3 == code:
354
+ # imageData = None
355
+ # if Images != "None":
356
+ # output = []
357
+ # headers = {
358
+ # "Authorization": f"Bearer {auth2}"
359
+ # }
360
+ # if platform == "wh":
361
+ # get_image = requests.get(Images, headers=headers)
362
+ # if get_image.status_code == 200:
363
+ # image_data = get_image.content
364
+ # elif platform == "web":
365
+ # print("WEB")
366
+ # else:
367
+ # pass
368
+
369
+ # image = cv.imdecode(np.frombuffer(image_data, np.uint8), cv.IMREAD_COLOR)
370
+ # image = cv.resize(image, (224, 224))
371
+ # image_array = np.asarray(image)
372
+ # normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
373
+ # data[0] = normalized_image_array
374
+
375
+ # prediction = model.predict(data)
376
+
377
+ # max_label_index = None
378
+ # max_prediction_value = -1
379
+
380
+ # print('Prediction')
381
+
382
+ # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
383
+ # Textbox2 = Textbox2.split(",")
384
+ # Textbox2_edited = [x.strip() for x in Textbox2]
385
+ # Textbox2_edited = list(Textbox2_edited)
386
+ # Textbox2_edited.append(UserInput)
387
+ # print(UserInput)
388
+ # print("appending")
389
+ # messages.append({"role": "user", "content": UserInput})
390
+
391
+ # for i, label in enumerate(labels):
392
+ # prediction_value = float(prediction[0][i])
393
+ # rounded_value = round(prediction_value, 2)
394
+ # print(f'{label}: {rounded_value}')
395
+
396
+ # if prediction_value > max_prediction_value:
397
+ # max_label_index = i
398
+ # max_prediction_value = prediction_value
399
+
400
+ # if max_label_index is not None:
401
+ # max_label = labels[max_label_index].split(' ', 1)[1]
402
+ # max_rounded_prediction = round(max_prediction_value, 2)
403
+ # print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
404
+
405
+ # if max_rounded_prediction > 0.5:
406
+ # print("\nWays to dispose of this waste: " + max_label)
407
+ # messages.append({"role": "user", "content": content + " " + max_label})
408
+ # # messages.append({"role": "user", "content": max_label})
409
+
410
+ # print("IMAGE messages after appending:", messages)
411
+
412
+ # header = {
413
+ # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
414
+ # "Content-Type": "application/json",
415
+ # "Authorization": f"Bearer {auth}"
416
+ # }
417
+
418
+ # try:
419
+ # response = requests.post(host, headers=header, json={
420
+ # "messages": messages,
421
+ # "model": model_llm
422
+ # }).json()
423
+ # print("RESPONSE TRY",response)
424
+ # reply = response["choices"][0]["message"]["content"]
425
+ # # messages.append({"role": "assistant", "content": reply})
426
+ # output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
427
+ # except:
428
+ # print("DOESN'T WORK")
429
+
430
+ # elif max_rounded_prediction < 0.5:
431
+ # output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
432
+
433
+ # return output
434
+
435
+ # elif Images == "None":
436
+ # output = []
437
+
438
+ # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
439
+ # Textbox2 = Textbox2.split(",")
440
+ # Textbox2_edited = [x.strip() for x in Textbox2]
441
+ # Textbox2_edited = list(Textbox2_edited)
442
+ # Textbox2_edited.append(UserInput)
443
+
444
+ # for i in Textbox2_edited:
445
+ # messages.append({"role": "user", "content": i})
446
+
447
+ # print("messages after appending:", messages)
448
+
449
+ # messages.append({"role": "user", "content": UserInput})
450
+
451
+ # headers = {
452
+ # "Content-Type": "application/json",
453
+ # "Authorization": f"Bearer {auth}"
454
+ # }
455
+
456
+ # response = requests.post(host, headers=headers, json={
457
+ # "messages": messages,
458
+ # "model": model_llm
459
+ # }).json()
460
+
461
+ # reply = response["choices"][0]["message"]["content"]
462
+ # # messages.append({"role": "assistant", "content": reply})
463
+
464
+ # output.append({"Mode": "Chat", "content": reply})
465
+
466
+ # return output
467
+ # else:
468
+ # return "Unauthorized"
469
+
470
+ # user_inputs = [
471
+ # gr.Textbox(label="Platform", type="text"),
472
+ # gr.Textbox(label="User Input", type="text"),
473
+ # gr.Textbox(label="Image", type="text"),
474
+ # gr.Textbox(label="Textbox2", type="text"),
475
+ # gr.Textbox(label="Textbox3", type="password")
476
+ # ]
477
+
478
+ # iface = gr.Interface(
479
+ # fn=classify,
480
+ # inputs=user_inputs,
481
+ # outputs=gr.outputs.JSON(),
482
+ # title="Classifier",
483
+ # )
484
+ # iface.launch()
485
+
486
+ ############## TEST
487
+
488
  import gradio as gr
489
  import numpy as np
490
  import cv2 as cv
 
556
  print("appending")
557
  messages.append({"role": "user", "content": UserInput})
558
 
559
+ # Pop earlier messages if there are more than 10
560
+ while len(messages) > 10:
561
+ messages.pop(0)
562
+
563
  for i, label in enumerate(labels):
564
  prediction_value = float(prediction[0][i])
565
  rounded_value = round(prediction_value, 2)
 
577
  if max_rounded_prediction > 0.5:
578
  print("\nWays to dispose of this waste: " + max_label)
579
  messages.append({"role": "user", "content": content + " " + max_label})
 
 
580
  print("IMAGE messages after appending:", messages)
581
 
582
  header = {
 
590
  "messages": messages,
591
  "model": model_llm
592
  }).json()
593
+ print("RESPONSE TRY", response)
594
  reply = response["choices"][0]["message"]["content"]
 
595
  output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
596
  except:
597
  print("DOESN'T WORK")
 
617
 
618
  messages.append({"role": "user", "content": UserInput})
619
 
620
+ # Pop earlier messages if there are more than 10
621
+ while len(messages) > 10:
622
+ messages.pop(0)
623
+
624
  headers = {
625
  "Content-Type": "application/json",
626
  "Authorization": f"Bearer {auth}"
 
632
  }).json()
633
 
634
  reply = response["choices"][0]["message"]["content"]
 
 
635
  output.append({"Mode": "Chat", "content": reply})
636
 
637
  return output
 
654
  )
655
  iface.launch()
656
 
657
+
658
  # import gradio as gr
659
  # import numpy as np
660
  # import cv2 as cv