Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -105,45 +105,55 @@ Reference json:
|
|
105 |
}
|
106 |
"""
|
107 |
|
108 |
-
def
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
{
|
115 |
"role": "system",
|
116 |
"content": system_prompt
|
117 |
},
|
118 |
{
|
119 |
-
"role": "user",
|
120 |
"content": user_message
|
121 |
}
|
122 |
-
]
|
123 |
-
|
124 |
-
)
|
125 |
|
126 |
-
|
127 |
|
128 |
-
|
129 |
-
|
|
|
|
|
130 |
|
131 |
-
response
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
{
|
139 |
-
"role": "user",
|
140 |
-
"content": user_message
|
141 |
-
}
|
142 |
-
],
|
143 |
-
stream=False,
|
144 |
-
)
|
145 |
-
|
146 |
-
ai_reply = response.choices[0].message.content
|
147 |
print(f"AI REPLY json:\n{ai_reply}")
|
148 |
|
149 |
# Process the response to ensure we return valid JSON
|
|
|
105 |
}
|
106 |
"""
|
107 |
|
108 |
+
# def chat_groq(user_message, model='llama-3.3-70b-versatile', system_prompt="You are a helpful assistant."):
|
109 |
+
# client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
110 |
+
|
111 |
+
# response = client.chat.completions.create(
|
112 |
+
# model=model,
|
113 |
+
# messages=[
|
114 |
+
# {
|
115 |
+
# "role": "system",
|
116 |
+
# "content": system_prompt
|
117 |
+
# },
|
118 |
+
# {
|
119 |
+
# "role": "user",
|
120 |
+
# "content": user_message
|
121 |
+
# }
|
122 |
+
# ],
|
123 |
+
# stream=False,
|
124 |
+
# )
|
125 |
+
|
126 |
+
# return response.choices[0].message.content
|
127 |
+
|
128 |
+
def chat_groq(user_message, model='llama-3.3-70b-versatile', system_prompt="You are a helpful assistant."):
|
129 |
+
payload = {
|
130 |
+
"models": [model],
|
131 |
+
"messages": [
|
132 |
{
|
133 |
"role": "system",
|
134 |
"content": system_prompt
|
135 |
},
|
136 |
{
|
137 |
+
"role": "user",
|
138 |
"content": user_message
|
139 |
}
|
140 |
+
]
|
141 |
+
}
|
|
|
142 |
|
143 |
+
response = requests.post("https://organizedprogrammers-bettergroqinterface.hf.space/chat", json=payload)
|
144 |
|
145 |
+
if response.status_code == 200:
|
146 |
+
return response.json()
|
147 |
+
else:
|
148 |
+
raise Exception(f"API request failed with status {response.status_code}: {response.text}")
|
149 |
|
150 |
+
return response['content'][0]['message']['content']
|
151 |
+
|
152 |
+
def ask_llm(user_message, model='llama-3.3-70b-versatile', system_prompt="You are a helpful assistant."):
|
153 |
+
return chat_groq(user_message, model=model, system_prompt=system_prompt)
|
154 |
+
|
155 |
+
def ask_ollama(user_message, model='llama-3.3-70b-versatile', system_prompt=search_prompt):
|
156 |
+
ai_reply = chat_groq(user_message, model=model, system_prompt=system_prompt)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
print(f"AI REPLY json:\n{ai_reply}")
|
158 |
|
159 |
# Process the response to ensure we return valid JSON
|