tommy24 commited on
Commit
89b3647
·
1 Parent(s): 6585dee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +224 -226
app.py CHANGED
@@ -315,181 +315,15 @@
315
  # iface.launch()
316
 
317
 
318
- # import gradio as gr
319
- # import numpy as np
320
- # import cv2 as cv
321
- # import requests
322
- # import io
323
- # from PIL import Image
324
- # import os
325
- # import tensorflow as tf
326
- # import random
327
-
328
- # host = os.environ.get("host")
329
- # code = os.environ.get("code")
330
- # model_llm = os.environ.get("model")
331
- # content = os.environ.get("content")
332
- # state = os.environ.get("state")
333
- # system = os.environ.get("system")
334
- # auth = os.environ.get("auth")
335
- # auth2 = os.environ.get("auth2")
336
- # data = None
337
-
338
- # np.set_printoptions(suppress=True)
339
-
340
- # # Load the model outside of the function
341
- # model = tf.keras.models.load_model('keras_model.h5')
342
-
343
- # # Load labels from a file
344
- # with open("labels.txt", "r") as file:
345
- # labels = file.read().splitlines()
346
-
347
- # messages = [
348
- # {"role": "system", "content": system}
349
- # ]
350
-
351
- # def classify(platform, UserInput, Images, Textbox2, Textbox3):
352
- # if Textbox3 == code:
353
- # imageData = None
354
- # if Images is not None:
355
- # output = []
356
- # headers = {
357
- # "Authorization": f"Bearer {auth2}"
358
- # }
359
- # if platform == "wh":
360
- # get_image = requests.get(Images, headers=headers)
361
- # if get_image.status_code == 200:
362
- # random_id = random.randint(1000, 9999)
363
- # file_extension = ".png"
364
- # filename = f"image_{random_id}{file_extension}"
365
- # with open(filename, "wb") as file:
366
- # file.write(get_image.content)
367
- # print(f"Saved image as: {filename}")
368
-
369
- # full_path = os.path.join(os.getcwd(), filename)
370
- # print(f"Saved image as: {full_path}")
371
- # elif platform == "web":
372
- # print("WEB")
373
- # # Handle web case if needed
374
- # else:
375
- # pass
376
-
377
- # image = cv.imread(full_path)
378
- # image = cv.resize(image, (224, 224))
379
- # image_array = np.asarray(image)
380
- # image_data = cv.resize(imageData, (224, 224))
381
- # normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
382
- # data[0] = normalized_image_array
383
-
384
- # prediction = model.predict(data)
385
-
386
- # max_label_index = None
387
- # max_prediction_value = -1
388
-
389
- # print('Prediction')
390
-
391
- # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
392
- # Textbox2 = Textbox2.split(",")
393
- # Textbox2_edited = [x.strip() for x in Textbox2]
394
- # Textbox2_edited = list(Textbox2_edited)
395
- # Textbox2_edited.append(UserInput)
396
- # messages.append({"role": "user", "content": UserInput})
397
-
398
- # for i, label in enumerate(labels):
399
- # prediction_value = float(prediction[0][i])
400
- # rounded_value = round(prediction_value, 2)
401
- # print(f'{label}: {rounded_value}')
402
-
403
- # if prediction_value > max_prediction_value:
404
- # max_label_index = i
405
- # max_prediction_value = prediction_value
406
-
407
- # if max_label_index is not None:
408
- # max_label = labels[max_label_index].split(' ', 1)[1]
409
- # max_rounded_prediction = round(max_prediction_value, 2)
410
- # print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
411
-
412
- # if max_rounded_prediction > 0.5:
413
- # print("\nWays to dispose of this waste: " + max_label)
414
- # messages.append({"role": "user", "content": content + " " + max_label})
415
-
416
- # headers = {
417
- # "Content-Type": "application/json",
418
- # "Authorization": f"Bearer {auth}"
419
- # }
420
-
421
- # response = requests.post(host, headers=headers, json={
422
- # "messages": messages,
423
- # "model": model_llm
424
- # }).json()
425
-
426
- # reply = response["choices"][0]["message"]["content"]
427
- # messages.append({"role": "assistant", "content": reply})
428
-
429
- # output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
430
- # elif max_rounded_prediction < 0.5:
431
- # output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
432
-
433
- # return output
434
-
435
- # else:
436
- # output = []
437
-
438
- # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
439
- # Textbox2 = Textbox2.split(",")
440
- # Textbox2_edited = [x.strip() for x in Textbox2]
441
- # Textbox2_edited = list(Textbox2_edited)
442
- # Textbox2_edited.append(UserInput)
443
-
444
- # for i in Textbox2_edited:
445
- # messages.append({"role": "user", "content": i})
446
-
447
- # print("messages after appending:", messages)
448
-
449
- # messages.append({"role": "user", "content": UserInput})
450
-
451
- # headers = {
452
- # "Content-Type": "application/json",
453
- # "Authorization": f"Bearer {auth}"
454
- # }
455
-
456
- # response = requests.post(host, headers=headers, json={
457
- # "messages": messages,
458
- # "model": model_llm
459
- # }).json()
460
-
461
- # reply = response["choices"][0]["message"]["content"]
462
- # messages.append({"role": "assistant", "content": reply})
463
-
464
- # output.append({"Mode": "Chat", "content": reply})
465
-
466
- # return output
467
- # else:
468
- # return "Unauthorized"
469
-
470
- # user_inputs = [
471
- # gr.Textbox(label="Platform", type="text"),
472
- # gr.Textbox(label="User Input", type="text"),
473
- # gr.Textbox(label="Image", type="text"),
474
- # gr.Textbox(label="Textbox2", type="text"),
475
- # gr.Textbox(label="Textbox3", type="password")
476
- # ]
477
-
478
- # iface = gr.Interface(
479
- # fn=classify,
480
- # inputs=user_inputs,
481
- # outputs=gr.outputs.JSON(),
482
- # title="Classifier",
483
- # )
484
- # iface.launch()
485
  import gradio as gr
486
  import numpy as np
487
  import cv2 as cv
488
  import requests
489
- import random
 
490
  import os
491
  import tensorflow as tf
492
- import base64
493
 
494
  host = os.environ.get("host")
495
  code = os.environ.get("code")
@@ -510,12 +344,13 @@ model = tf.keras.models.load_model('keras_model.h5')
510
  with open("labels.txt", "r") as file:
511
  labels = file.read().splitlines()
512
 
513
- messages = [{"role": "system", "content": system}]
 
 
514
 
515
  def classify(platform, UserInput, Images, Textbox2, Textbox3):
516
  if Textbox3 == code:
517
  imageData = None
518
- image_data_url = None # Initialize image_data_url
519
  if Images is not None:
520
  output = []
521
  headers = {
@@ -524,80 +359,76 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
524
  if platform == "wh":
525
  get_image = requests.get(Images, headers=headers)
526
  if get_image.status_code == 200:
527
- # Convert the image data to base64
528
- image_base64 = base64.b64encode(get_image.content).decode("utf-8")
529
-
530
- # Create a data URL
531
- image_data_url = f"data:image/png;base64,{image_base64}"
532
-
533
  elif platform == "web":
534
  print("WEB")
535
  # Handle web case if needed
536
  else:
537
  pass
538
 
539
- if image_data_url is not None:
540
- # Load the image from image_data_url
541
- image_data = base64.b64decode(image_base64)
542
- nparr = np.frombuffer(image_data, np.uint8)
543
- image = cv.imdecode(nparr, cv.IMREAD_COLOR)
544
-
545
- image = cv.resize(image, (224, 224))
546
- image_array = np.asarray(image)
547
- normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
548
- data[0] = normalized_image_array
549
 
550
- prediction = model.predict(data)
551
 
552
- max_label_index = None
553
- max_prediction_value = -1
554
 
555
- print('Prediction')
556
 
557
- Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
558
- Textbox2 = Textbox2.split(",")
559
- Textbox2_edited = [x.strip() for x in Textbox2]
560
- Textbox2_edited = list(Textbox2_edited)
561
- Textbox2_edited.append(UserInput)
562
- messages.append({"role": "user", "content": UserInput})
563
 
564
- for i, label in enumerate(labels):
565
- prediction_value = float(prediction[0][i])
566
- rounded_value = round(prediction_value, 2)
567
- print(f'{label}: {rounded_value}')
568
 
569
- if prediction_value > max_prediction_value:
570
- max_label_index = i
571
- max_prediction_value = prediction_value
572
 
573
- if max_label_index is not None:
574
- max_label = labels[max_label_index].split(' ', 1)[1]
575
- max_rounded_prediction = round(max_prediction_value, 2)
576
- print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
577
 
578
- if max_rounded_prediction > 0.5:
579
- print("\nWays to dispose of this waste: " + max_label)
580
- messages.append({"role": "user", "content": content + " " + max_label})
581
 
582
- headers = {
583
- "Content-Type": "application/json",
584
- "Authorization": f"Bearer {auth}"
585
- }
586
 
587
- response = requests.post(host, headers=headers, json={
588
- "messages": messages,
589
- "model": model_llm
590
- }).json()
591
 
592
- reply = response["choices"][0]["message"]["content"]
593
- messages.append({"role": "assistant", "content": reply})
594
 
595
- output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
596
- elif max_rounded_prediction < 0.5:
597
- output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
598
 
599
- output.append({"Mode": "Image", "type": "Data URL", "data_url": image_data_url})
600
  return output
 
601
  else:
602
  output = []
603
 
@@ -636,7 +467,7 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
636
  user_inputs = [
637
  gr.Textbox(label="Platform", type="text"),
638
  gr.Textbox(label="User Input", type="text"),
639
- gr.Textbox(label="Images", type="text"),
640
  gr.Textbox(label="Textbox2", type="text"),
641
  gr.Textbox(label="Textbox3", type="password")
642
  ]
@@ -648,3 +479,170 @@ iface = gr.Interface(
648
  title="Classifier",
649
  )
650
  iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
315
  # iface.launch()
316
 
317
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
  import gradio as gr
319
  import numpy as np
320
  import cv2 as cv
321
  import requests
322
+ import io
323
+ from PIL import Image
324
  import os
325
  import tensorflow as tf
326
+ import random
327
 
328
  host = os.environ.get("host")
329
  code = os.environ.get("code")
 
344
  with open("labels.txt", "r") as file:
345
  labels = file.read().splitlines()
346
 
347
+ messages = [
348
+ {"role": "system", "content": system}
349
+ ]
350
 
351
  def classify(platform, UserInput, Images, Textbox2, Textbox3):
352
  if Textbox3 == code:
353
  imageData = None
 
354
  if Images is not None:
355
  output = []
356
  headers = {
 
359
  if platform == "wh":
360
  get_image = requests.get(Images, headers=headers)
361
  if get_image.status_code == 200:
362
+ random_id = random.randint(1000, 9999)
363
+ file_extension = ".png"
364
+ filename = f"image_{random_id}{file_extension}"
365
+ with open(filename, "wb") as file:
366
+ file.write(get_image.content)
367
+ print(f"Saved image as: {filename}")
368
  elif platform == "web":
369
  print("WEB")
370
  # Handle web case if needed
371
  else:
372
  pass
373
 
374
+ image = cv.imread("https://tommy24-classifier.hf.space/file=/tmp/",filename)
375
+ image = cv.resize(image, (224, 224))
376
+ image_array = np.asarray(image)
377
+ image_data = cv.resize(imageData, (224, 224))
378
+ normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
379
+ data[0] = normalized_image_array
 
 
 
 
380
 
381
+ prediction = model.predict(data)
382
 
383
+ max_label_index = None
384
+ max_prediction_value = -1
385
 
386
+ print('Prediction')
387
 
388
+ Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
389
+ Textbox2 = Textbox2.split(",")
390
+ Textbox2_edited = [x.strip() for x in Textbox2]
391
+ Textbox2_edited = list(Textbox2_edited)
392
+ Textbox2_edited.append(UserInput)
393
+ messages.append({"role": "user", "content": UserInput})
394
 
395
+ for i, label in enumerate(labels):
396
+ prediction_value = float(prediction[0][i])
397
+ rounded_value = round(prediction_value, 2)
398
+ print(f'{label}: {rounded_value}')
399
 
400
+ if prediction_value > max_prediction_value:
401
+ max_label_index = i
402
+ max_prediction_value = prediction_value
403
 
404
+ if max_label_index is not None:
405
+ max_label = labels[max_label_index].split(' ', 1)[1]
406
+ max_rounded_prediction = round(max_prediction_value, 2)
407
+ print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
408
 
409
+ if max_rounded_prediction > 0.5:
410
+ print("\nWays to dispose of this waste: " + max_label)
411
+ messages.append({"role": "user", "content": content + " " + max_label})
412
 
413
+ headers = {
414
+ "Content-Type": "application/json",
415
+ "Authorization": f"Bearer {auth}"
416
+ }
417
 
418
+ response = requests.post(host, headers=headers, json={
419
+ "messages": messages,
420
+ "model": model_llm
421
+ }).json()
422
 
423
+ reply = response["choices"][0]["message"]["content"]
424
+ messages.append({"role": "assistant", "content": reply})
425
 
426
+ output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
427
+ elif max_rounded_prediction < 0.5:
428
+ output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
429
 
 
430
  return output
431
+
432
  else:
433
  output = []
434
 
 
467
  user_inputs = [
468
  gr.Textbox(label="Platform", type="text"),
469
  gr.Textbox(label="User Input", type="text"),
470
+ gr.Textbox(label="Image", type="text"),
471
  gr.Textbox(label="Textbox2", type="text"),
472
  gr.Textbox(label="Textbox3", type="password")
473
  ]
 
479
  title="Classifier",
480
  )
481
  iface.launch()
482
+
483
+ # import gradio as gr
484
+ # import numpy as np
485
+ # import cv2 as cv
486
+ # import requests
487
+ # import random
488
+ # import os
489
+ # import tensorflow as tf
490
+ # import base64
491
+
492
+ # host = os.environ.get("host")
493
+ # code = os.environ.get("code")
494
+ # model_llm = os.environ.get("model")
495
+ # content = os.environ.get("content")
496
+ # state = os.environ.get("state")
497
+ # system = os.environ.get("system")
498
+ # auth = os.environ.get("auth")
499
+ # auth2 = os.environ.get("auth2")
500
+ # data = None
501
+
502
+ # np.set_printoptions(suppress=True)
503
+
504
+ # # Load the model outside of the function
505
+ # model = tf.keras.models.load_model('keras_model.h5')
506
+
507
+ # # Load labels from a file
508
+ # with open("labels.txt", "r") as file:
509
+ # labels = file.read().splitlines()
510
+
511
+ # messages = [{"role": "system", "content": system}]
512
+
513
+ # def classify(platform, UserInput, Images, Textbox2, Textbox3):
514
+ # if Textbox3 == code:
515
+ # imageData = None
516
+ # image_data_url = None # Initialize image_data_url
517
+ # if Images is not None:
518
+ # output = []
519
+ # headers = {
520
+ # "Authorization": f"Bearer {auth2}"
521
+ # }
522
+ # if platform == "wh":
523
+ # get_image = requests.get(Images, headers=headers)
524
+ # if get_image.status_code == 200:
525
+ # # Convert the image data to base64
526
+ # image_base64 = base64.b64encode(get_image.content).decode("utf-8")
527
+
528
+ # # Create a data URL
529
+ # image_data_url = f"data:image/png;base64,{image_base64}"
530
+
531
+ # elif platform == "web":
532
+ # print("WEB")
533
+ # # Handle web case if needed
534
+ # else:
535
+ # pass
536
+
537
+ # if image_data_url is not None:
538
+ # # Load the image from image_data_url
539
+ # image_data = base64.b64decode(image_base64)
540
+ # nparr = np.frombuffer(image_data, np.uint8)
541
+ # image = cv.imdecode(nparr, cv.IMREAD_COLOR)
542
+
543
+ # image = cv.resize(image, (224, 224))
544
+ # image_array = np.asarray(image)
545
+ # normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1
546
+ # data[0] = normalized_image_array
547
+
548
+ # prediction = model.predict(data)
549
+
550
+ # max_label_index = None
551
+ # max_prediction_value = -1
552
+
553
+ # print('Prediction')
554
+
555
+ # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
556
+ # Textbox2 = Textbox2.split(",")
557
+ # Textbox2_edited = [x.strip() for x in Textbox2]
558
+ # Textbox2_edited = list(Textbox2_edited)
559
+ # Textbox2_edited.append(UserInput)
560
+ # messages.append({"role": "user", "content": UserInput})
561
+
562
+ # for i, label in enumerate(labels):
563
+ # prediction_value = float(prediction[0][i])
564
+ # rounded_value = round(prediction_value, 2)
565
+ # print(f'{label}: {rounded_value}')
566
+
567
+ # if prediction_value > max_prediction_value:
568
+ # max_label_index = i
569
+ # max_prediction_value = prediction_value
570
+
571
+ # if max_label_index is not None:
572
+ # max_label = labels[max_label_index].split(' ', 1)[1]
573
+ # max_rounded_prediction = round(max_prediction_value, 2)
574
+ # print(f'Maximum Prediction: {max_label} with a value of {max_rounded_prediction}')
575
+
576
+ # if max_rounded_prediction > 0.5:
577
+ # print("\nWays to dispose of this waste: " + max_label)
578
+ # messages.append({"role": "user", "content": content + " " + max_label})
579
+
580
+ # headers = {
581
+ # "Content-Type": "application/json",
582
+ # "Authorization": f"Bearer {auth}"
583
+ # }
584
+
585
+ # response = requests.post(host, headers=headers, json={
586
+ # "messages": messages,
587
+ # "model": model_llm
588
+ # }).json()
589
+
590
+ # reply = response["choices"][0]["message"]["content"]
591
+ # messages.append({"role": "assistant", "content": reply})
592
+
593
+ # output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
594
+ # elif max_rounded_prediction < 0.5:
595
+ # output.append({"Mode": "Image", "type": "Not predictable", "prediction_value": max_rounded_prediction, "content": "Seems like the prediction rate is too low due to that won't be able to predict the type of material. Try again with a cropped image or different one"})
596
+
597
+ # output.append({"Mode": "Image", "type": "Data URL", "data_url": image_data_url})
598
+ # return output
599
+ # else:
600
+ # output = []
601
+
602
+ # Textbox2 = Textbox2.replace("[", "").replace("]", "").replace("'", "")
603
+ # Textbox2 = Textbox2.split(",")
604
+ # Textbox2_edited = [x.strip() for x in Textbox2]
605
+ # Textbox2_edited = list(Textbox2_edited)
606
+ # Textbox2_edited.append(UserInput)
607
+
608
+ # for i in Textbox2_edited:
609
+ # messages.append({"role": "user", "content": i})
610
+
611
+ # print("messages after appending:", messages)
612
+
613
+ # messages.append({"role": "user", "content": UserInput})
614
+
615
+ # headers = {
616
+ # "Content-Type": "application/json",
617
+ # "Authorization": f"Bearer {auth}"
618
+ # }
619
+
620
+ # response = requests.post(host, headers=headers, json={
621
+ # "messages": messages,
622
+ # "model": model_llm
623
+ # }).json()
624
+
625
+ # reply = response["choices"][0]["message"]["content"]
626
+ # messages.append({"role": "assistant", "content": reply})
627
+
628
+ # output.append({"Mode": "Chat", "content": reply})
629
+
630
+ # return output
631
+ # else:
632
+ # return "Unauthorized"
633
+
634
+ # user_inputs = [
635
+ # gr.Textbox(label="Platform", type="text"),
636
+ # gr.Textbox(label="User Input", type="text"),
637
+ # gr.Textbox(label="Images", type="text"),
638
+ # gr.Textbox(label="Textbox2", type="text"),
639
+ # gr.Textbox(label="Textbox3", type="password")
640
+ # ]
641
+
642
+ # iface = gr.Interface(
643
+ # fn=classify,
644
+ # inputs=user_inputs,
645
+ # outputs=gr.outputs.JSON(),
646
+ # title="Classifier",
647
+ # )
648
+ # iface.launch()