pvanand commited on
Commit
f9cd0c5
·
1 Parent(s): 5b2a137

Update actions/actions.py

Browse files
Files changed (1) hide show
  1. actions/actions.py +55 -4
actions/actions.py CHANGED
@@ -31,15 +31,19 @@ secret_value_0 = os.environ.get("openai")
31
  openai.api_key = secret_value_0
32
  # Provide your OpenAI API key
33
 
34
- def generate_openai_response(query, model_engine="text-davinci-002", max_tokens=124, temperature=0.8):
35
  """Generate a response using the OpenAI API."""
36
 
37
  # Run the main function from search_content.py and store the results in a variable
38
- results = main_search(query)
 
 
39
 
40
  # Create context from the results
41
  context = "".join([f"#{str(i)}" for i in results])[:2014] # Trim the context to 2014 characters - Modify as necessory
42
- prompt_template = f"Relevant context: {context}\n\n Answer the question in detail: {query}"
 
 
43
 
44
  # Generate a response using the OpenAI API
45
  response = openai.Completion.create(
@@ -64,7 +68,8 @@ class GetOpenAIResponse(Action):
64
  domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
65
 
66
  # Use OpenAI API to generate a response
67
- query = tracker.latest_message.get('text')
 
68
  response = generate_openai_response(query)
69
 
70
  # Output the generated response to user
@@ -211,3 +216,49 @@ class SayHelloWorld(Action):
211
  # Output the generated response to user
212
  generated_text = response.choices[0].text
213
  dispatcher.utter_message(text=generated_text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  openai.api_key = secret_value_0
32
  # Provide your OpenAI API key
33
 
34
+ def generate_openai_response(query, model_engine="text-davinci-002", max_tokens=256, temperature=0.8):
35
  """Generate a response using the OpenAI API."""
36
 
37
  # Run the main function from search_content.py and store the results in a variable
38
+
39
+ #results = main_search(query)
40
+ results = main_search(query["current_user_query"])
41
 
42
  # Create context from the results
43
  context = "".join([f"#{str(i)}" for i in results])[:2014] # Trim the context to 2014 characters - Modify as necessory
44
+
45
+ #prompt_template = f"Relevant context: {context}\n\n Answer the question in detail: {query}"
46
+ prompt_template = f"Relevant context: {context}\n\nPrevious User Query: {conversation_data['previous_user_query']}\nPrevious Bot Response: {conversation_data['previous_bot_response']}\n\n Answer the current_user_query in detail:Current User Query: {conversation_data['current_user_query']}"
47
 
48
  # Generate a response using the OpenAI API
49
  response = openai.Completion.create(
 
68
  domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
69
 
70
  # Use OpenAI API to generate a response
71
+ #query = tracker.latest_message.get('text')
72
+ query = [FollowupAction("action_extract_history")]
73
  response = generate_openai_response(query)
74
 
75
  # Output the generated response to user
 
216
  # Output the generated response to user
217
  generated_text = response.choices[0].text
218
  dispatcher.utter_message(text=generated_text)
219
+
220
+ class ExtractConversationhistory(Action):
221
+ def name(self) -> Text:
222
+ return "action_extract_history"
223
+
224
+ def run(self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
225
+ conversation_history = tracker.events
226
+
227
+ user_queries = []
228
+ bot_responses = []
229
+ current_user_query = ""
230
+ previous_user_query = None
231
+ previous_bot_response = None
232
+
233
+ for event in conversation_history:
234
+ if event.get("event") == "user":
235
+ user_queries.append(event.get("text"))
236
+ elif event.get("event") == "bot":
237
+ bot_responses.append(event.get("text"))
238
+
239
+ if user_queries:
240
+ if len(user_queries) >= 2:
241
+ previous_user_query = user_queries[-2]
242
+ else:
243
+ pass
244
+
245
+ try:
246
+ current_user_query = user_queries[-1]
247
+ except:
248
+ pass
249
+
250
+ if bot_responses:
251
+ if len(bot_responses) >= 2:
252
+ previous_bot_response = bot_responses[-2]
253
+ else:
254
+ pass
255
+ else:
256
+ pass
257
+
258
+ conversation_data = {
259
+ "previous_user_query": previous_user_query,
260
+ "previous_bot_response": previous_bot_response,
261
+ "current_user_query": current_user_query
262
+ }
263
+ # Now you can use the conversation_data dictionary as needed.
264
+ return conversation_data