Spaces:
Paused
Paused
Hemang Thakur
commited on
Commit
Β·
c8abe84
1
Parent(s):
0c9661a
fixed chat anthropic
Browse files- src/crawl/crawler.py +3 -2
- src/utils/api_key_manager.py +16 -36
src/crawl/crawler.py
CHANGED
@@ -744,8 +744,9 @@ Query:"""
|
|
744 |
print(f"β
Successfully fetched content from {url}") if self.verbose else None
|
745 |
return content
|
746 |
else:
|
747 |
-
|
748 |
-
|
|
|
749 |
|
750 |
print(f"π« Failed to fetch content from {url} after {max_attempts} attempts.") if self.verbose else None
|
751 |
return None
|
|
|
744 |
print(f"β
Successfully fetched content from {url}") if self.verbose else None
|
745 |
return content
|
746 |
else:
|
747 |
+
if max_attempts > 1:
|
748 |
+
print(f"π« Failed to fetch content from {url}. Retrying in {delay} seconds...") if self.verbose else None
|
749 |
+
await asyncio.sleep(delay)
|
750 |
|
751 |
print(f"π« Failed to fetch content from {url} after {max_attempts} attempts.") if self.verbose else None
|
752 |
return None
|
src/utils/api_key_manager.py
CHANGED
@@ -275,47 +275,27 @@ class APIKeyManager:
|
|
275 |
api_key = self.get_next_api_key(provider)
|
276 |
print(f"Using provider={provider}, model_name={model_name}, "
|
277 |
f"temperature={temperature}, top_p={top_p}, key={api_key}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
278 |
|
|
|
|
|
|
|
279 |
if provider == "openai":
|
280 |
-
self._llm = ChatOpenAI(
|
281 |
-
model=model_name,
|
282 |
-
temperature=temperature,
|
283 |
-
top_p=top_p,
|
284 |
-
max_tokens=max_tokens,
|
285 |
-
max_retries=0,
|
286 |
-
streaming=streaming,
|
287 |
-
api_key=api_key
|
288 |
-
)
|
289 |
elif provider == "google":
|
290 |
-
self._llm = ChatGoogleGenerativeAI(
|
291 |
-
model=model_name,
|
292 |
-
temperature=temperature,
|
293 |
-
top_p=top_p,
|
294 |
-
max_tokens=max_tokens,
|
295 |
-
max_retries=0,
|
296 |
-
streaming=streaming,
|
297 |
-
api_key=api_key
|
298 |
-
)
|
299 |
elif provider == "anthropic":
|
300 |
-
self._llm = ChatAnthropic(
|
301 |
-
model=model_name,
|
302 |
-
temperature=temperature,
|
303 |
-
top_p=top_p,
|
304 |
-
max_tokens=max_tokens,
|
305 |
-
max_retries=0,
|
306 |
-
streaming=streaming,
|
307 |
-
api_key=api_key
|
308 |
-
)
|
309 |
else:
|
310 |
-
self._llm = ChatXAI(
|
311 |
-
model=model_name,
|
312 |
-
temperature=temperature,
|
313 |
-
top_p=top_p,
|
314 |
-
max_tokens=max_tokens,
|
315 |
-
max_retries=0,
|
316 |
-
streaming=streaming,
|
317 |
-
api_key=api_key
|
318 |
-
)
|
319 |
|
320 |
self._current_provider = provider
|
321 |
|
|
|
275 |
api_key = self.get_next_api_key(provider)
|
276 |
print(f"Using provider={provider}, model_name={model_name}, "
|
277 |
f"temperature={temperature}, top_p={top_p}, key={api_key}")
|
278 |
+
|
279 |
+
kwargs = {
|
280 |
+
"model": model_name,
|
281 |
+
"temperature": temperature,
|
282 |
+
"top_p": top_p,
|
283 |
+
"max_retries": 0,
|
284 |
+
"streaming": streaming,
|
285 |
+
"api_key": api_key,
|
286 |
+
}
|
287 |
|
288 |
+
if max_tokens is not None:
|
289 |
+
kwargs["max_tokens"] = max_tokens
|
290 |
+
|
291 |
if provider == "openai":
|
292 |
+
self._llm = ChatOpenAI(**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
elif provider == "google":
|
294 |
+
self._llm = ChatGoogleGenerativeAI(**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
295 |
elif provider == "anthropic":
|
296 |
+
self._llm = ChatAnthropic(**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
297 |
else:
|
298 |
+
self._llm = ChatXAI(**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
299 |
|
300 |
self._current_provider = provider
|
301 |
|