Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -504,6 +504,8 @@ state = os.environ.get("state")
|
|
504 |
system = os.environ.get("system")
|
505 |
auth = os.environ.get("auth")
|
506 |
auth2 = os.environ.get("auth2")
|
|
|
|
|
507 |
data = None
|
508 |
|
509 |
np.set_printoptions(suppress=True)
|
@@ -598,13 +600,23 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
|
|
598 |
}
|
599 |
|
600 |
try:
|
601 |
-
response = requests.post(host, headers=header, json={
|
602 |
-
|
603 |
-
|
604 |
-
}).json()
|
605 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
606 |
# reply = response["choices"][0]["message"]["content"]
|
607 |
-
reply =
|
|
|
|
|
|
|
|
|
608 |
|
609 |
output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
|
610 |
except:
|
@@ -642,13 +654,19 @@ def classify(platform, UserInput, Images, Textbox2, Textbox3):
|
|
642 |
}
|
643 |
|
644 |
try:
|
645 |
-
response = requests.post(host, headers=headers, json={
|
646 |
-
|
647 |
-
|
648 |
-
}).json()
|
|
|
|
|
|
|
|
|
|
|
|
|
649 |
|
650 |
# reply = response["choices"][0]["message"]["content"]
|
651 |
-
reply =
|
652 |
except:
|
653 |
reply = "Maximum messages: 15. Please clear your history and Try Again!"
|
654 |
output.append({"Mode": "Chat", "content": reply})
|
|
|
504 |
system = os.environ.get("system")
|
505 |
auth = os.environ.get("auth")
|
506 |
auth2 = os.environ.get("auth2")
|
507 |
+
openai.api_key = os.environ.get("auth")
|
508 |
+
openai.api_base = os.environ.get("host")
|
509 |
data = None
|
510 |
|
511 |
np.set_printoptions(suppress=True)
|
|
|
600 |
}
|
601 |
|
602 |
try:
|
603 |
+
# response = requests.post(host, headers=header, json={
|
604 |
+
# "messages": messages,
|
605 |
+
# "model": model_llm
|
606 |
+
# }).json()
|
607 |
+
|
608 |
+
completion = openai.ChatCompletion.create(
|
609 |
+
model="gpt-3.5-turbo",
|
610 |
+
messages=messages
|
611 |
+
)
|
612 |
+
|
613 |
+
|
614 |
# reply = response["choices"][0]["message"]["content"]
|
615 |
+
reply = completion.choices[0].message['content']
|
616 |
+
# # reply = response["choices"][0]["message"]["content"]
|
617 |
+
# reply = response.choices[0].message['content']
|
618 |
+
print("RESPONSE TRY", completion)
|
619 |
+
|
620 |
|
621 |
output.append({"Mode": "Image", "type": max_label, "prediction_value": max_rounded_prediction, "content": reply})
|
622 |
except:
|
|
|
654 |
}
|
655 |
|
656 |
try:
|
657 |
+
# response = requests.post(host, headers=headers, json={
|
658 |
+
# "messages": messages,
|
659 |
+
# "model": model_llm
|
660 |
+
# }).json()
|
661 |
+
|
662 |
+
completion = openai.ChatCompletion.create(
|
663 |
+
model="gpt-3.5-turbo",
|
664 |
+
messages=messages
|
665 |
+
)
|
666 |
+
|
667 |
|
668 |
# reply = response["choices"][0]["message"]["content"]
|
669 |
+
reply = completion.choices[0].message['content']
|
670 |
except:
|
671 |
reply = "Maximum messages: 15. Please clear your history and Try Again!"
|
672 |
output.append({"Mode": "Chat", "content": reply})
|