Update app.py
Browse files
app.py
CHANGED
@@ -39,55 +39,43 @@ def duckduckgo_search(query):
|
|
39 |
logging.error(f"Error during DuckDuckGo search: {str(e)}")
|
40 |
return []
|
41 |
|
42 |
-
async def rephrase_query(query,
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
Previous conversation history:
|
58 |
-
{history}
|
59 |
-
|
60 |
-
Analyze the query and provide a rephrased version suitable for web search."""
|
61 |
|
62 |
client = InferenceClient(model, token=huggingface_token)
|
63 |
-
|
64 |
try:
|
65 |
-
logging.info(f"Sending rephrase request to model {model}.")
|
66 |
response = await asyncio.to_thread(
|
67 |
client.text_generation,
|
68 |
-
prompt=
|
69 |
-
max_new_tokens=
|
70 |
temperature=0.2,
|
71 |
)
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
analysis = response[analysis_start + 10:analysis_end].strip()
|
82 |
-
rephrased_query = response[rephrased_start + 17:rephrased_end].strip()
|
83 |
-
logging.info(f"Rephrased query: {rephrased_query}")
|
84 |
-
return analysis, rephrased_query
|
85 |
-
else:
|
86 |
-
logging.error("Failed to parse the rephrase response.")
|
87 |
-
return None, query
|
88 |
except Exception as e:
|
89 |
logging.error(f"Error in rephrase_query: {str(e)}")
|
90 |
-
return
|
91 |
|
92 |
def create_web_search_vectors(search_results):
|
93 |
logging.info(f"Creating web search vectors from {len(search_results)} search results.")
|
|
|
39 |
logging.error(f"Error during DuckDuckGo search: {str(e)}")
|
40 |
return []
|
41 |
|
42 |
+
async def rephrase_query(query, context, model):
|
43 |
+
# Log the original query for debugging
|
44 |
+
logging.info(f"Original query: {query}")
|
45 |
+
|
46 |
+
prompt = f"""You are a highly intelligent conversational chatbot. Your task is to analyze the given context and new query, then decide whether to rephrase the query with or without incorporating the context. Follow these steps:
|
47 |
+
1. Determine if the new query is a continuation of the previous conversation or an entirely new topic.
|
48 |
+
2. If it's a continuation, rephrase the query by incorporating relevant information from the context to make it more specific and contextual.
|
49 |
+
3. If it's a new topic, rephrase the query to make it more appropriate for a web search, focusing on clarity and accuracy without using the previous context.
|
50 |
+
4. Provide ONLY the rephrased query without any additional explanation or reasoning.
|
51 |
+
|
52 |
+
Context: {context}
|
53 |
+
|
54 |
+
New query: {query}
|
55 |
+
|
56 |
+
Rephrased query:"""
|
|
|
|
|
|
|
|
|
57 |
|
58 |
client = InferenceClient(model, token=huggingface_token)
|
59 |
+
|
60 |
try:
|
|
|
61 |
response = await asyncio.to_thread(
|
62 |
client.text_generation,
|
63 |
+
prompt=prompt,
|
64 |
+
max_new_tokens=100,
|
65 |
temperature=0.2,
|
66 |
)
|
67 |
+
|
68 |
+
# The response should be the rephrased query as per your prompt
|
69 |
+
rephrased_query = response.strip()
|
70 |
+
|
71 |
+
# Log the rephrased query
|
72 |
+
logging.info(f"Rephrased query: {rephrased_query}")
|
73 |
+
|
74 |
+
return rephrased_query
|
75 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
except Exception as e:
|
77 |
logging.error(f"Error in rephrase_query: {str(e)}")
|
78 |
+
return query # Fallback to the original query if there's an error
|
79 |
|
80 |
def create_web_search_vectors(search_results):
|
81 |
logging.info(f"Creating web search vectors from {len(search_results)} search results.")
|