Commit
·
1492293
1
Parent(s):
17d9293
open ai support
Browse files- app.py +8 -3
- openai_api.py +3 -3
- requirements.txt +2 -1
app.py
CHANGED
@@ -2,6 +2,7 @@ import gradio as gr
|
|
2 |
from datetime import datetime, timedelta
|
3 |
|
4 |
from gemini_api import model_api, sentiment, category, ord_num, NO_ORDER, NO_ITEM, food_return, cloth_return, item_identy, item_match
|
|
|
5 |
|
6 |
cust_qry_resp = {"senti":"", "cat":"", "num":""}
|
7 |
|
@@ -17,6 +18,7 @@ with gr.Blocks(title="Customer Support Assistant",
|
|
17 |
state_match_item = gr.State([None, None])
|
18 |
|
19 |
gr.Markdown("# Customer Support Assistant")
|
|
|
20 |
# Inputs from user
|
21 |
with gr.Row():
|
22 |
cust_qry = gr.Textbox(lines=5, type="text", label="Customer Query")
|
@@ -24,13 +26,16 @@ with gr.Blocks(title="Customer Support Assistant",
|
|
24 |
btn_cust_qry = gr.Button("Analyze Query")
|
25 |
|
26 |
# Model Output
|
27 |
-
@gr.render(inputs=[cust_qry], triggers=[btn_cust_qry.click])
|
28 |
# Function for prediction
|
29 |
-
def invoke_model(user_input):
|
30 |
if len(user_input) == 0:
|
31 |
gr.Markdown("## No Customer Query Provided")
|
32 |
else:
|
33 |
-
|
|
|
|
|
|
|
34 |
cat = model_api(user_input, category)
|
35 |
num = model_api(user_input, ord_num)
|
36 |
item = model_api(user_input, item_identy)
|
|
|
2 |
from datetime import datetime, timedelta
|
3 |
|
4 |
from gemini_api import model_api, sentiment, category, ord_num, NO_ORDER, NO_ITEM, food_return, cloth_return, item_identy, item_match
|
5 |
+
from openai_api import model_api as openai_model_api, sentiment as openai_sentiment
|
6 |
|
7 |
cust_qry_resp = {"senti":"", "cat":"", "num":""}
|
8 |
|
|
|
18 |
state_match_item = gr.State([None, None])
|
19 |
|
20 |
gr.Markdown("# Customer Support Assistant")
|
21 |
+
llm_api = gr.Radio(["gemini-1.0-pro", "gpt-3.5-turbo"], label="Choose LLM", value="gemini-1.0-pro")
|
22 |
# Inputs from user
|
23 |
with gr.Row():
|
24 |
cust_qry = gr.Textbox(lines=5, type="text", label="Customer Query")
|
|
|
26 |
btn_cust_qry = gr.Button("Analyze Query")
|
27 |
|
28 |
# Model Output
|
29 |
+
@gr.render(inputs=[cust_qry, llm_api], triggers=[btn_cust_qry.click])
|
30 |
# Function for prediction
|
31 |
+
def invoke_model(user_input, llm_api):
|
32 |
if len(user_input) == 0:
|
33 |
gr.Markdown("## No Customer Query Provided")
|
34 |
else:
|
35 |
+
if llm_api == "gemini-1.0-pro":
|
36 |
+
senti = model_api(user_input, sentiment)
|
37 |
+
else:
|
38 |
+
senti = openai_model_api(user_input, openai_sentiment)
|
39 |
cat = model_api(user_input, category)
|
40 |
num = model_api(user_input, ord_num)
|
41 |
item = model_api(user_input, item_identy)
|
openai_api.py
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
import openai
|
2 |
from openai import OpenAI
|
3 |
-
import json
|
4 |
import os
|
5 |
|
6 |
# Set your OpenAI API key (replace with your actual key)
|
@@ -13,10 +12,11 @@ def model_api(input, prompt_type):
|
|
13 |
return prompt_type(input)
|
14 |
|
15 |
def sentiment(text):
|
|
|
16 |
# Create a prompt for the model
|
17 |
prompt = f"""You are trained to analyze and detect the sentiment of the given text.
|
18 |
If you are unsure of an answer, you can say "not sure" and recommend the user review manually.
|
19 |
-
Analyze the following text and determine if the sentiment is:
|
20 |
{text}"""
|
21 |
|
22 |
# Call the OpenAI API to generate a response
|
@@ -29,7 +29,7 @@ def sentiment(text):
|
|
29 |
max_tokens=1, # Limit response to a single word
|
30 |
temperature=0 # Keep response consistent
|
31 |
)
|
32 |
-
|
33 |
# Extract the sentiment from the response
|
34 |
sentiment = response.choices[0].message.content.strip().lower()
|
35 |
|
|
|
1 |
import openai
|
2 |
from openai import OpenAI
|
|
|
3 |
import os
|
4 |
|
5 |
# Set your OpenAI API key (replace with your actual key)
|
|
|
12 |
return prompt_type(input)
|
13 |
|
14 |
def sentiment(text):
|
15 |
+
print(text)
|
16 |
# Create a prompt for the model
|
17 |
prompt = f"""You are trained to analyze and detect the sentiment of the given text.
|
18 |
If you are unsure of an answer, you can say "not sure" and recommend the user review manually.
|
19 |
+
Analyze the following text and determine if the sentiment is: POSITIVE, NEGATIVE, NEUTRAL or MIXED.
|
20 |
{text}"""
|
21 |
|
22 |
# Call the OpenAI API to generate a response
|
|
|
29 |
max_tokens=1, # Limit response to a single word
|
30 |
temperature=0 # Keep response consistent
|
31 |
)
|
32 |
+
print(response)
|
33 |
# Extract the sentiment from the response
|
34 |
sentiment = response.choices[0].message.content.strip().lower()
|
35 |
|
requirements.txt
CHANGED
@@ -1,2 +1,3 @@
|
|
1 |
gradio
|
2 |
-
google-generativeai
|
|
|
|
1 |
gradio
|
2 |
+
google-generativeai
|
3 |
+
langchain_openai
|