Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,64 +1,156 @@
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
if
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
)
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
"""
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dotenv
|
2 |
+
from openai import OpenAI
|
3 |
+
import os
|
4 |
+
from pypdf import PdfReader
|
5 |
import gradio as gr
|
6 |
+
import json
|
7 |
+
import requests
|
8 |
+
|
9 |
+
dotenv.load_dotenv(override=True)
|
10 |
+
|
11 |
+
openai_api_key = "AIzaSyBRLbvYfTDSq2F8o20Fc0okGAMxiRMNnVk"
|
12 |
+
pushover_user = os.getenv("PUSHOVER_USER")
|
13 |
+
pushover_token = os.getenv("PUSHOVER_TOKEN")
|
14 |
+
|
15 |
+
if pushover_user and pushover_token:
|
16 |
+
print("Pushover user and token found")
|
17 |
+
print("Pushover user and token not found")
|
18 |
+
|
19 |
+
def send_pushover_notification(message):
|
20 |
+
url = "https://api.pushover.net/1/messages.json"
|
21 |
+
data = {
|
22 |
+
"token": os.getenv("PUSHOVER_TOKEN"),
|
23 |
+
"user": os.getenv("PUSHOVER_USER"),
|
24 |
+
"message": message
|
25 |
+
}
|
26 |
+
response = requests.post(url, data=data)
|
27 |
+
if response.status_code == 200:
|
28 |
+
print("Pushover notification sent successfully")
|
29 |
+
else:
|
30 |
+
print("Failed to send Pushover notification")
|
31 |
+
|
32 |
+
def get_pdf_text(pdf_path):
|
33 |
+
reader = PdfReader(pdf_path)
|
34 |
+
text = ""
|
35 |
+
for page in reader.pages:
|
36 |
+
text += page.extract_text()
|
37 |
+
return text
|
38 |
+
|
39 |
+
def record_user_details(email, name="Not provided", notes="Not provided" ):
|
40 |
+
print(f"User details recorded: Name: {name}, Email: {email}, Notes: {notes}")
|
41 |
+
send_pushover_notification(f"Recording interest from : Name: {name}, Email: {email}, Notes: {notes}")
|
42 |
+
return {"recorded": "ok"}
|
43 |
+
|
44 |
+
def record_unknown_question(question):
|
45 |
+
print(f"Unknown question recorded: {question}")
|
46 |
+
send_pushover_notification(f"Unknown question recorded: {question}")
|
47 |
+
return {"recorded": "ok"}
|
48 |
+
|
49 |
+
record_user_details_json = {
|
50 |
+
"name" : "record_user_details",
|
51 |
+
"description" : "Record user details",
|
52 |
+
"parameters" : {
|
53 |
+
"type" : "object",
|
54 |
+
"properties" : {
|
55 |
+
"email" : {"type" : "string", "description" : "The email of the user"},
|
56 |
+
"name" : {"type" : "string", "description" : "The name of the user, if they provided it"},
|
57 |
+
"notes" : {"type" : "string", "description" : "Any additional information about conversation that worth's recording to given context"}
|
58 |
+
},
|
59 |
+
"required" : ["email"],
|
60 |
+
"additionalProperties" : False
|
61 |
+
}
|
62 |
+
}
|
63 |
+
|
64 |
+
record_unknown_question_json = {
|
65 |
+
"name" : "record_unknown_question",
|
66 |
+
"description" : "Record unknown question",
|
67 |
+
"parameters" : {
|
68 |
+
"type" : "object",
|
69 |
+
"properties" : {"question" : {"type" : "string", "description" : "The question that the user asked"}}
|
70 |
+
}
|
71 |
+
}
|
72 |
+
|
73 |
+
tools = [ {"type":"function", "function": record_user_details_json},
|
74 |
+
{"type" : "function", "function" : record_unknown_question_json}]
|
75 |
+
|
76 |
+
class Me :
|
77 |
+
def __init__(self):
|
78 |
+
self.openai = OpenAI()
|
79 |
+
self.name = "Ravi Prakash Kewat"
|
80 |
+
self.linked_profile = get_pdf_text("me/Ravi_LinkedInProfile.pdf")
|
81 |
+
with open("me/Ravi_summary.txt", "r", encoding="utf-8") as file:
|
82 |
+
self.summary = file.read()
|
83 |
+
self.name = "Ravi Prakash Kewat"
|
84 |
+
self.client = OpenAI(api_key=openai_api_key, base_url="https://generativelanguage.googleapis.com/v1beta/openai/")
|
85 |
+
|
86 |
+
def handle_tool_calls(self, tool_calls):
|
87 |
+
results = []
|
88 |
+
for tool_call in tool_calls :
|
89 |
+
tool_name = tool_call.function.name
|
90 |
+
print(f"Tool called: {tool_name}", flush=True)
|
91 |
+
|
92 |
+
tool = globals().get(tool_name)
|
93 |
+
arguments = json.loads(tool_call.function.arguments)
|
94 |
+
|
95 |
+
result = tool(**arguments) if tool else {}
|
96 |
+
results.append({"role" : "tool", "content" : json.dumps(result), "tool_call_id" : tool_call.id})
|
97 |
+
return results
|
98 |
+
|
99 |
+
def system_prompt(self):
|
100 |
+
system_prompt = f"You are acting as Ravi Prakash Kewat. You are answering questions on Ravi Prakash Kewat's website, \
|
101 |
+
particularly questions related to Ravi Prakash Kewat's career, background, skills and experience. \
|
102 |
+
Your responsibility is to represent Ravi Prakash Kewat for interactions on the website as faithfully as possible. \
|
103 |
+
You are given a summary of Ravi Prakash Kewat's background and LinkedIn profile which you can use to answer questions. \
|
104 |
+
Be professional and engaging, as if talking to a potential client or future employer who came across the website. \
|
105 |
+
If you don't know the answer to any question, use your record_unknown_question tool to record the question that you couldn't answer, even if it's about something trivial or unrelated to career. \
|
106 |
+
If the user is engaging in discussion, try to steer them towards getting in touch via email; ask for their email and record it using your record_user_details tool. "
|
107 |
+
|
108 |
+
system_prompt += f"\n\n## Summary:\n{self.summary}\n\n## LinkedIn Profile:\n{self.linked_profile}\n\n"
|
109 |
+
system_prompt += f"With this context, please chat with the user, always staying in character as {self.name}."
|
110 |
+
return system_prompt
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
def chat_with_me(self, message, history):
|
115 |
+
messages = [{"role" : "system", "content" : self.system_prompt()}] + history + [{"role" : "user","content" : message}]
|
116 |
+
done = False
|
117 |
+
while not done:
|
118 |
+
|
119 |
+
# this is the call to LLM - passing tool json
|
120 |
+
response = self.client.chat.completions.create(model="gemini-1.5-flash", messages=messages, tools=tools)
|
121 |
+
|
122 |
+
finish_reson = response.choices[0].finish_reason
|
123 |
+
|
124 |
+
if finish_reson == "tool_calls":
|
125 |
+
message = response.choices[0].message
|
126 |
+
tool_calls = message.tool_calls
|
127 |
+
results = self.handle_tool_calls(tool_calls)
|
128 |
+
messages.append(message)
|
129 |
+
messages.extend(results)
|
130 |
+
else:
|
131 |
+
done = True
|
132 |
+
return response.choices[0].message.content
|
133 |
|
134 |
|
135 |
if __name__ == "__main__":
|
136 |
+
me = Me()
|
137 |
+
gr.ChatInterface(me.chat_with_me, type="messages").launch()
|
138 |
+
|
139 |
+
|
140 |
+
|
141 |
+
|
142 |
+
|
143 |
+
|
144 |
+
|
145 |
+
|
146 |
+
|
147 |
+
|
148 |
+
|
149 |
+
|
150 |
+
|
151 |
+
|
152 |
+
|
153 |
+
|
154 |
+
|
155 |
+
|
156 |
+
|