Spaces:
Running
Running
Update helpers/ai_client.py
Browse files- helpers/ai_client.py +37 -31
helpers/ai_client.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
# helpers/ai_client.py
|
2 |
-
import requests
|
3 |
import os
|
4 |
-
from
|
|
|
5 |
import logging
|
6 |
|
7 |
# Set up logging
|
@@ -11,8 +11,16 @@ logger = logging.getLogger(__name__)
|
|
11 |
class AIClient:
|
12 |
def __init__(self):
|
13 |
# Load environment variables
|
14 |
-
self.
|
15 |
-
self.api_key = os.getenv("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
def chat(
|
18 |
self,
|
@@ -20,12 +28,10 @@ class AIClient:
|
|
20 |
system_message: str = "",
|
21 |
model_id: str = "openai/gpt-4o-mini",
|
22 |
conversation_id: str = "",
|
23 |
-
user_id: str = "string"
|
24 |
-
api_key: Optional[str] = None
|
25 |
) -> str:
|
26 |
"""
|
27 |
-
Sends a prompt to the
|
28 |
-
|
29 |
Args:
|
30 |
prompt (str): The user's input prompt.
|
31 |
system_message (str): Optional system message for the LLM.
|
@@ -33,34 +39,34 @@ class AIClient:
|
|
33 |
conversation_id (str): Unique ID for the conversation.
|
34 |
user_id (str): Unique ID for the user.
|
35 |
api_key (str): API key for authentication.
|
36 |
-
|
37 |
Returns:
|
38 |
str: The text response from the LLM API.
|
39 |
-
|
40 |
Raises:
|
41 |
Exception: If the API request fails.
|
42 |
"""
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
"
|
52 |
-
}
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
|
|
|
|
|
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
if response.status_code != 200:
|
63 |
-
raise Exception(f"Error from LLM API: {response.status_code} - {response.text}")
|
64 |
|
65 |
-
|
66 |
-
|
|
|
|
1 |
# helpers/ai_client.py
|
|
|
2 |
import os
|
3 |
+
from openai import OpenAI
|
4 |
+
from typing import Optional
|
5 |
import logging
|
6 |
|
7 |
# Set up logging
|
|
|
11 |
class AIClient:
|
12 |
def __init__(self):
|
13 |
# Load environment variables
|
14 |
+
self.base_url = os.getenv("LLM_API_URL", "https://openrouter.ai/api/v1")
|
15 |
+
self.api_key = os.getenv("OPENROUTER_API_KEY")
|
16 |
+
self.site_url = os.getenv("SITE_URL", "") # Your site URL
|
17 |
+
self.site_name = os.getenv("SITE_NAME", "") # Your site name
|
18 |
+
|
19 |
+
# Initialize OpenAI client
|
20 |
+
self.client = OpenAI(
|
21 |
+
base_url=self.base_url,
|
22 |
+
api_key=self.api_key
|
23 |
+
)
|
24 |
|
25 |
def chat(
|
26 |
self,
|
|
|
28 |
system_message: str = "",
|
29 |
model_id: str = "openai/gpt-4o-mini",
|
30 |
conversation_id: str = "",
|
31 |
+
user_id: str = "string"
|
|
|
32 |
) -> str:
|
33 |
"""
|
34 |
+
Sends a prompt to the OpenRouter API and returns the response as text.
|
|
|
35 |
Args:
|
36 |
prompt (str): The user's input prompt.
|
37 |
system_message (str): Optional system message for the LLM.
|
|
|
39 |
conversation_id (str): Unique ID for the conversation.
|
40 |
user_id (str): Unique ID for the user.
|
41 |
api_key (str): API key for authentication.
|
|
|
42 |
Returns:
|
43 |
str: The text response from the LLM API.
|
|
|
44 |
Raises:
|
45 |
Exception: If the API request fails.
|
46 |
"""
|
47 |
+
try:
|
48 |
+
messages = []
|
49 |
+
|
50 |
+
# Add system message if provided
|
51 |
+
if system_message:
|
52 |
+
messages.append({"role": "system", "content": system_message})
|
53 |
+
|
54 |
+
# Add user message
|
55 |
+
messages.append({"role": "user", "content": prompt})
|
|
|
56 |
|
57 |
+
# Create completion
|
58 |
+
completion = self.client.chat.completions.create(
|
59 |
+
extra_headers={
|
60 |
+
"HTTP-Referer": self.site_url, # Optional
|
61 |
+
"X-Title": self.site_name, # Optional
|
62 |
+
},
|
63 |
+
model=model_id,
|
64 |
+
messages=messages
|
65 |
+
)
|
66 |
|
67 |
+
# Return the response text
|
68 |
+
return completion.choices[0].message.content
|
|
|
|
|
69 |
|
70 |
+
except Exception as e:
|
71 |
+
logger.error(f"Error in chat completion: {str(e)}")
|
72 |
+
raise Exception(f"Error from OpenRouter API: {str(e)}")
|