Spaces:
Running
Running
Update kig_core/llm_interface.py
Browse files- kig_core/llm_interface.py +14 -5
kig_core/llm_interface.py
CHANGED
@@ -1,10 +1,12 @@
|
|
1 |
import os
|
|
|
2 |
import time
|
|
|
|
|
3 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
4 |
from langchain_openai import ChatOpenAI
|
5 |
from langchain_core.language_models.chat_models import BaseChatModel
|
6 |
from .config import settings
|
7 |
-
import logging
|
8 |
|
9 |
logger = logging.getLogger(__name__)
|
10 |
|
@@ -55,15 +57,22 @@ def get_llm(model_name: str) -> BaseChatModel:
|
|
55 |
logger.error(f"Unsupported model provider for model name: {model_name}")
|
56 |
raise ValueError(f"Model '{model_name}' is not supported or configuration is missing.")
|
57 |
|
58 |
-
def invoke_llm(var,parameters):
|
59 |
try:
|
60 |
return var.invoke(parameters)
|
61 |
except Exception as e:
|
62 |
-
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
print("Waiting is finished")
|
65 |
return var.invoke(parameters)
|
66 |
-
|
67 |
# Example usage (could be called from other modules)
|
68 |
# main_llm = get_llm(settings.main_llm_model)
|
69 |
# eval_llm = get_llm(settings.eval_llm_model)
|
|
|
1 |
import os
|
2 |
+
import re
|
3 |
import time
|
4 |
+
import logging
|
5 |
+
|
6 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
7 |
from langchain_openai import ChatOpenAI
|
8 |
from langchain_core.language_models.chat_models import BaseChatModel
|
9 |
from .config import settings
|
|
|
10 |
|
11 |
logger = logging.getLogger(__name__)
|
12 |
|
|
|
57 |
logger.error(f"Unsupported model provider for model name: {model_name}")
|
58 |
raise ValueError(f"Model '{model_name}' is not supported or configuration is missing.")
|
59 |
|
60 |
+
def invoke_llm(var, parameters):
|
61 |
try:
|
62 |
return var.invoke(parameters)
|
63 |
except Exception as e:
|
64 |
+
# Try to extract retry_delay seconds from the error message string
|
65 |
+
match = re.search(r'retry_delay\s*{\s*seconds:\s*(\d+)', str(e))
|
66 |
+
if match:
|
67 |
+
retry_seconds = int(match.group(1)) + 1 # Add 1 second buffer
|
68 |
+
else:
|
69 |
+
retry_seconds = 60 # fallback to 60 seconds if not found
|
70 |
+
|
71 |
+
print(f"Error during .invoke : {e} \nwaiting {retry_seconds} seconds")
|
72 |
+
time.sleep(retry_seconds)
|
73 |
print("Waiting is finished")
|
74 |
return var.invoke(parameters)
|
75 |
+
|
76 |
# Example usage (could be called from other modules)
|
77 |
# main_llm = get_llm(settings.main_llm_model)
|
78 |
# eval_llm = get_llm(settings.eval_llm_model)
|