Spaces:
Runtime error
Runtime error
add application files
Browse files- config.py +12 -1
- model/chat.py +11 -4
- model/llm/llm.py +28 -20
config.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
|
2 |
gpt_3_5 = "gpt-3.5-turbo-instruct"
|
3 |
gpt_mini = "gpt-4o-mini"
|
4 |
|
@@ -14,6 +13,18 @@ GILAS_CONFIG = {
|
|
14 |
"base_url": 'https://api.gilas.io/v1',
|
15 |
}
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
OPENAI_CONFIG = {
|
18 |
"model": gpt_mini,
|
19 |
}
|
|
|
|
|
1 |
gpt_3_5 = "gpt-3.5-turbo-instruct"
|
2 |
gpt_mini = "gpt-4o-mini"
|
3 |
|
|
|
13 |
"base_url": 'https://api.gilas.io/v1',
|
14 |
}
|
15 |
|
16 |
+
GILAS_API_KEYS = [
|
17 |
+
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwMzg5OTQ0NjgsImp0aSI6IjExNDg4MzAyMTE3NDA0MzY2ODc0NiIsImlhdCI6MTcyMzYzNDQ2OCwiaXNzIjoiaHR0cHM6Ly9naWxhcy5pbyIsIm5iZiI6MTcyMzYzNDQ2OCwic3ViIjoiMTE0ODgzMDIxMTc0MDQzNjY4NzQ2In0.8hbh59BmwBcAfoH9nEB98_5BIuxzwUUb8fpHSKF1S_Q",
|
18 |
+
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNDI1MzI3NTYsImp0aSI6IjEwNjg5OTE1MjQwNTM4MzY3Nzc2NyIsImlhdCI6MTcyNzE3Mjc1NiwiaXNzIjoiaHR0cHM6Ly9naWxhcy5pbyIsIm5iZiI6MTcyNzE3Mjc1Niwic3ViIjoiMTA2ODk5MTUyNDA1MzgzNjc3NzY3In0.Jgfi7BWhpXFTYdHe73md5p932EP75wTD-CZQ6SfGkK8",
|
19 |
+
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNDI1MzMzNzIsImp0aSI6IjEwNjg4MTE2MzAzOTkzMTg2MjY3NiIsImlhdCI6MTcyNzE3MzM3MiwiaXNzIjoiaHR0cHM6Ly9naWxhcy5pbyIsIm5iZiI6MTcyNzE3MzM3Miwic3ViIjoiMTA2ODgxMTYzMDM5OTMxODYyNjc2In0.PhVdoRUdaCfHa4va-EtWP5o7KISCSdMjT5mWtc9cefo",
|
20 |
+
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNDI1MzM0MDIsImp0aSI6IjExNTY3MDAwOTQyMjcyNTE3NDE1NCIsImlhdCI6MTcyNzE3MzQwMiwiaXNzIjoiaHR0cHM6Ly9naWxhcy5pbyIsIm5iZiI6MTcyNzE3MzQwMiwic3ViIjoiMTE1NjcwMDA5NDIyNzI1MTc0MTU0In0.IRcnkiZJdKNPTE1nYXoeiVMfxj9xXHSvAxBLaBGC6yk",
|
21 |
+
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNDI1MzM1MzEsImp0aSI6IjExMzk2NzY4OTcxNjg2NjYzNDk3MCIsImlhdCI6MTcyNzE3MzUzMSwiaXNzIjoiaHR0cHM6Ly9naWxhcy5pbyIsIm5iZiI6MTcyNzE3MzUzMSwic3ViIjoiMTEzOTY3Njg5NzE2ODY2NjM0OTcwIn0.kHZZDlVnZsbnoSac0wtM3ezrPCkIBYVQSdkfbFsT_xs",
|
22 |
+
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNDI1MzM1ODksImp0aSI6IjEwNzM3MDcyODA4NDQxMTk0MTQwOSIsImlhdCI6MTcyNzE3MzU4OSwiaXNzIjoiaHR0cHM6Ly9naWxhcy5pbyIsIm5iZiI6MTcyNzE3MzU4OSwic3ViIjoiMTA3MzcwNzI4MDg0NDExOTQxNDA5In0.4qhnj6YhunOHoAMmosibf4CaopJqSlvwxvhB6671Suw",
|
23 |
+
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNDI1MzQ5ODEsImp0aSI6IjEwNjE2NTI5NzI5MjAxODExMzgwMCIsImlhdCI6MTcyNzE3NDk4MSwiaXNzIjoiaHR0cHM6Ly9naWxhcy5pbyIsIm5iZiI6MTcyNzE3NDk4MSwic3ViIjoiMTA2MTY1Mjk3MjkyMDE4MTEzODAwIn0.9QvgxTlDugcDwSa880B0hefhWjVfEzjTDX2ywgNORrc",
|
24 |
+
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNDI1MzUwNTIsImp0aSI6IjExMzA3MTQ4ODA5OTA0OTQzMDI0MSIsImlhdCI6MTcyNzE3NTA1MiwiaXNzIjoiaHR0cHM6Ly9naWxhcy5pbyIsIm5iZiI6MTcyNzE3NTA1Miwic3ViIjoiMTEzMDcxNDg4MDk5MDQ5NDMwMjQxIn0.Z8TNrz_LXCtFjE0BwBLCBqh03uTKZ6WWLptQA6zdy1Y",
|
25 |
+
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjIwNDI1MzUxMjUsImp0aSI6IjExMTU3MzA2NjkwODIzNjk4MjM1OSIsImlhdCI6MTcyNzE3NTEyNSwiaXNzIjoiaHR0cHM6Ly9naWxhcy5pbyIsIm5iZiI6MTcyNzE3NTEyNSwic3ViIjoiMTExNTczMDY2OTA4MjM2OTgyMzU5In0.eQIqXoSbsD19AJrQxCVh7T6tcLvCJ7TH3c8Ajso9CJU",
|
26 |
+
]
|
27 |
+
|
28 |
OPENAI_CONFIG = {
|
29 |
"model": gpt_mini,
|
30 |
}
|
model/chat.py
CHANGED
@@ -17,8 +17,15 @@ class Chat:
|
|
17 |
|
18 |
info_list = self.rag_handler.get_information(message)
|
19 |
prompt = self.prompt_handler.get_prompt(message, info_list)
|
20 |
-
|
21 |
-
response = self.llm.get_LLM_response(prompt=prompt)
|
22 |
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
info_list = self.rag_handler.get_information(message)
|
19 |
prompt = self.prompt_handler.get_prompt(message, info_list)
|
20 |
+
llm_response = self.llm.get_LLM_response(prompt=prompt)
|
|
|
21 |
|
22 |
+
final_response = f"**Response**:\n{llm_response}\n\n"
|
23 |
+
if info_list:
|
24 |
+
final_response += "The following legal cases and information were retrieved and considered:\n"
|
25 |
+
for i, info in enumerate(info_list):
|
26 |
+
case_text = info['text'].replace("[end]", "")
|
27 |
+
final_response += f"\n**Case {i+1}:** {info['title']}\n{case_text}\n"
|
28 |
+
|
29 |
+
self.response_history.append(final_response)
|
30 |
+
|
31 |
+
return final_response
|
model/llm/llm.py
CHANGED
@@ -11,15 +11,14 @@ from config import *
|
|
11 |
|
12 |
|
13 |
class LLM_API_Call:
|
14 |
-
|
15 |
def __init__(self, type) -> None:
|
16 |
if type == "openai":
|
17 |
self.llm = OpenAI_API_Call(api_key = LLM_CONFIG[""],
|
18 |
model = LLM_CONFIG["model"])
|
19 |
elif type == "gilas":
|
20 |
-
self.llm = Gilas_API_Call(
|
21 |
model = GILAS_CONFIG["model"],
|
22 |
-
base_url=GILAS_CONFIG["base_url"])
|
23 |
else:
|
24 |
self.llm = OpenAI(
|
25 |
**LLM_CONFIG
|
@@ -58,20 +57,23 @@ class OpenAI_API_Call:
|
|
58 |
|
59 |
|
60 |
class Gilas_API_Call:
|
61 |
-
def __init__(self,
|
62 |
-
self.
|
63 |
self.base_url = base_url
|
64 |
self.model = model
|
65 |
self.headers = {
|
66 |
-
"Authorization": f"Bearer {self.api_key}",
|
67 |
"Content-Type": "application/json"
|
68 |
}
|
69 |
self.conversation = []
|
|
|
|
|
70 |
|
71 |
def add_message(self, role, content):
|
72 |
self.conversation.append({"role": role, "content": content})
|
73 |
|
74 |
-
def get_response(self):
|
|
|
|
|
75 |
data = {
|
76 |
"model": self.model,
|
77 |
"messages": self.conversation
|
@@ -83,9 +85,6 @@ class Gilas_API_Call:
|
|
83 |
json=data
|
84 |
)
|
85 |
|
86 |
-
# print(f"Response status code: {response.status_code}")
|
87 |
-
# print(f"Response content: {response.text}")
|
88 |
-
|
89 |
if response.status_code == 200:
|
90 |
try:
|
91 |
return response.json()['choices'][0]['message']['content']
|
@@ -94,15 +93,24 @@ class Gilas_API_Call:
|
|
94 |
else:
|
95 |
raise Exception(f"Gilas API call failed: {response.status_code} - {response.text}")
|
96 |
|
97 |
-
def invoke(self, user_input):
|
98 |
self.add_message("user", user_input)
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
|
13 |
class LLM_API_Call:
|
|
|
14 |
def __init__(self, type) -> None:
|
15 |
if type == "openai":
|
16 |
self.llm = OpenAI_API_Call(api_key = LLM_CONFIG[""],
|
17 |
model = LLM_CONFIG["model"])
|
18 |
elif type == "gilas":
|
19 |
+
self.llm = Gilas_API_Call(api_keys = GILAS_API_KEYS,
|
20 |
model = GILAS_CONFIG["model"],
|
21 |
+
base_url = GILAS_CONFIG["base_url"])
|
22 |
else:
|
23 |
self.llm = OpenAI(
|
24 |
**LLM_CONFIG
|
|
|
57 |
|
58 |
|
59 |
class Gilas_API_Call:
|
60 |
+
def __init__(self, api_keys, base_url, model="gpt-4o-mini"):
|
61 |
+
self.api_keys = api_keys
|
62 |
self.base_url = base_url
|
63 |
self.model = model
|
64 |
self.headers = {
|
|
|
65 |
"Content-Type": "application/json"
|
66 |
}
|
67 |
self.conversation = []
|
68 |
+
self.retry_wait_time = 30
|
69 |
+
|
70 |
|
71 |
def add_message(self, role, content):
|
72 |
self.conversation.append({"role": role, "content": content})
|
73 |
|
74 |
+
def get_response(self, api_key):
|
75 |
+
self.headers["Authorization"] = f"Bearer {api_key}"
|
76 |
+
|
77 |
data = {
|
78 |
"model": self.model,
|
79 |
"messages": self.conversation
|
|
|
85 |
json=data
|
86 |
)
|
87 |
|
|
|
|
|
|
|
88 |
if response.status_code == 200:
|
89 |
try:
|
90 |
return response.json()['choices'][0]['message']['content']
|
|
|
93 |
else:
|
94 |
raise Exception(f"Gilas API call failed: {response.status_code} - {response.text}")
|
95 |
|
96 |
+
def invoke(self, user_input, max_retries=3):
|
97 |
self.add_message("user", user_input)
|
98 |
|
99 |
+
retries = 0
|
100 |
+
while retries < max_retries:
|
101 |
+
for i, api_key in enumerate(self.api_keys):
|
102 |
+
try:
|
103 |
+
response = self.get_response(api_key)
|
104 |
+
self.add_message("assistant", response)
|
105 |
+
return response
|
106 |
+
except (JSONDecodeError, Exception) as e:
|
107 |
+
print(f"Error encountered with API key {api_key}: {e}. Trying next key...")
|
108 |
+
# Sleep before trying next key
|
109 |
+
if i == len(self.api_keys) - 1:
|
110 |
+
print(f"All keys failed. Retrying oldest key after {self.retry_wait_time} seconds...")
|
111 |
+
time.sleep(self.retry_wait_time)
|
112 |
+
self.retry_wait_time += 30 # Increase wait time for next retry
|
113 |
+
|
114 |
+
retries += 1
|
115 |
+
|
116 |
+
raise Exception(f"Failed to get a valid response after {max_retries} retries.")
|