Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import os
|
2 |
import logging
|
3 |
import json
|
|
|
4 |
import gradio as gr
|
5 |
from huggingface_hub import InferenceClient
|
6 |
from langchain.embeddings import HuggingFaceEmbeddings
|
@@ -9,6 +10,7 @@ from langchain.schema import Document
|
|
9 |
from duckduckgo_search import DDGS
|
10 |
from dotenv import load_dotenv
|
11 |
from functools import lru_cache
|
|
|
12 |
|
13 |
# Load environment variables
|
14 |
load_dotenv()
|
@@ -70,7 +72,11 @@ def create_web_search_vectors(search_results):
|
|
70 |
logger.info(f"Created vectors for {len(documents)} search results.")
|
71 |
return FAISS.from_documents(documents, embed)
|
72 |
|
73 |
-
|
|
|
|
|
|
|
|
|
74 |
searcher = WebSearcher()
|
75 |
search_results = searcher.search(query)
|
76 |
|
@@ -91,7 +97,11 @@ def get_response_with_search(query, system_prompt, model, use_embeddings, num_ca
|
|
91 |
|
92 |
logger.info(f"Context created for query: {query}")
|
93 |
|
94 |
-
|
|
|
|
|
|
|
|
|
95 |
{context}
|
96 |
|
97 |
Write a detailed and complete research document that fulfills the following user request: '{query}'."""
|
@@ -110,7 +120,7 @@ Write a detailed and complete research document that fulfills the following user
|
|
110 |
"top_p": 0.8,
|
111 |
}
|
112 |
logger.info(f"Sending request to API with params: {json.dumps(api_params, indent=2, default=str)}")
|
113 |
-
response = client
|
114 |
logger.info(f"Raw response from model: {response}")
|
115 |
|
116 |
if isinstance(response, dict):
|
@@ -128,6 +138,8 @@ Write a detailed and complete research document that fulfills the following user
|
|
128 |
else:
|
129 |
logger.error(f"Unexpected response format from the model: {type(response)}")
|
130 |
return "Unexpected response format from the model. Please try again.", ""
|
|
|
|
|
131 |
except Exception as e:
|
132 |
logger.error(f"Error in get_response_with_search: {str(e)}")
|
133 |
logger.info(f"Attempting fallback to {FALLBACK_MODEL}")
|
@@ -149,9 +161,10 @@ def respond(message, system_prompt, history, model, temperature, num_calls, use_
|
|
149 |
logger.info(f"Number of API Calls: {num_calls}")
|
150 |
logger.info(f"Use Embeddings: {use_embeddings}")
|
151 |
logger.info(f"System Prompt: {system_prompt}")
|
|
|
152 |
|
153 |
try:
|
154 |
-
main_content, sources = get_response_with_search(message, system_prompt, model, use_embeddings, num_calls=num_calls, temperature=temperature)
|
155 |
return main_content
|
156 |
except Exception as e:
|
157 |
logger.error(f"Error in respond function: {str(e)}")
|
|
|
1 |
import os
|
2 |
import logging
|
3 |
import json
|
4 |
+
import time
|
5 |
import gradio as gr
|
6 |
from huggingface_hub import InferenceClient
|
7 |
from langchain.embeddings import HuggingFaceEmbeddings
|
|
|
10 |
from duckduckgo_search import DDGS
|
11 |
from dotenv import load_dotenv
|
12 |
from functools import lru_cache
|
13 |
+
from tenacity import retry, stop_after_attempt, wait_fixed
|
14 |
|
15 |
# Load environment variables
|
16 |
load_dotenv()
|
|
|
72 |
logger.info(f"Created vectors for {len(documents)} search results.")
|
73 |
return FAISS.from_documents(documents, embed)
|
74 |
|
75 |
+
@retry(stop=stop_after_attempt(3), wait=wait_fixed(2))
|
76 |
+
def make_api_call(client, api_params):
|
77 |
+
return client.chat_completion(**api_params)
|
78 |
+
|
79 |
+
def get_response_with_search(query, system_prompt, model, use_embeddings, history, num_calls=3, temperature=0.2):
|
80 |
searcher = WebSearcher()
|
81 |
search_results = searcher.search(query)
|
82 |
|
|
|
97 |
|
98 |
logger.info(f"Context created for query: {query}")
|
99 |
|
100 |
+
chat_history = "\n".join([f"Human: {h[0]}\nAI: {h[1]}" for h in history])
|
101 |
+
user_message = f"""Chat history:
|
102 |
+
{chat_history}
|
103 |
+
|
104 |
+
Using the following context from web search results:
|
105 |
{context}
|
106 |
|
107 |
Write a detailed and complete research document that fulfills the following user request: '{query}'."""
|
|
|
120 |
"top_p": 0.8,
|
121 |
}
|
122 |
logger.info(f"Sending request to API with params: {json.dumps(api_params, indent=2, default=str)}")
|
123 |
+
response = make_api_call(client, api_params)
|
124 |
logger.info(f"Raw response from model: {response}")
|
125 |
|
126 |
if isinstance(response, dict):
|
|
|
138 |
else:
|
139 |
logger.error(f"Unexpected response format from the model: {type(response)}")
|
140 |
return "Unexpected response format from the model. Please try again.", ""
|
141 |
+
|
142 |
+
time.sleep(1) # Add a 1-second delay between API calls
|
143 |
except Exception as e:
|
144 |
logger.error(f"Error in get_response_with_search: {str(e)}")
|
145 |
logger.info(f"Attempting fallback to {FALLBACK_MODEL}")
|
|
|
161 |
logger.info(f"Number of API Calls: {num_calls}")
|
162 |
logger.info(f"Use Embeddings: {use_embeddings}")
|
163 |
logger.info(f"System Prompt: {system_prompt}")
|
164 |
+
logger.info(f"History: {history}")
|
165 |
|
166 |
try:
|
167 |
+
main_content, sources = get_response_with_search(message, system_prompt, model, use_embeddings, history, num_calls=num_calls, temperature=temperature)
|
168 |
return main_content
|
169 |
except Exception as e:
|
170 |
logger.error(f"Error in respond function: {str(e)}")
|