Sanchit Verma
commited on
Commit
Β·
1b460ab
1
Parent(s):
37ececc
Add environment configuration, update README, and implement core functionality
Browse files- Create .env file for API keys and model configurations
- Update .gitignore to exclude .env file
- Revise README for clarity and add local run instructions
- Implement OpenAI and Ollama query functions in utils.py
- Establish response generation logic based on selected model
- .env +4 -0
- .gitignore +1 -0
- README.md +14 -16
- app.py +1 -22
- pyproject.toml +6 -1
- utils.py +44 -0
.env
CHANGED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
OPENAI_API_KEY=sk-xxxxx
|
2 |
+
OPENAI_MODEL=gpt-4o
|
3 |
+
USE_OLLAMA=false
|
4 |
+
OLLAMA_MODEL=llama3
|
.gitignore
CHANGED
@@ -19,6 +19,7 @@ dist/
|
|
19 |
downloads/
|
20 |
eggs/
|
21 |
.eggs/
|
|
|
22 |
lib/
|
23 |
lib64/
|
24 |
parts/
|
|
|
19 |
downloads/
|
20 |
eggs/
|
21 |
.eggs/
|
22 |
+
.env
|
23 |
lib/
|
24 |
lib64/
|
25 |
parts/
|
README.md
CHANGED
@@ -1,22 +1,20 @@
|
|
1 |
# π€ LLMates β Chat with Custom AI Personas
|
2 |
|
3 |
-
LLMates is a
|
4 |
-
It
|
5 |
|
6 |
-
##
|
7 |
-
-
|
8 |
-
-
|
9 |
-
-
|
10 |
-
-
|
11 |
|
12 |
-
##
|
13 |
- Gradio UI
|
14 |
-
- OpenAI
|
15 |
-
- Python
|
16 |
|
17 |
-
##
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
---
|
|
|
1 |
# π€ LLMates β Chat with Custom AI Personas
|
2 |
|
3 |
+
LLMates is a minimal, modular chatbot app where you can switch between assistant personas powered by LLMs.
|
4 |
+
It supports OpenAI (e.g. GPT-4o), or free alternatives like **Ollama** (LLaMA3, Mistral) or Hugging Face.
|
5 |
|
6 |
+
## π‘ Personas
|
7 |
+
- Python Tutor
|
8 |
+
- Regex Helper
|
9 |
+
- Motivational Coach
|
10 |
+
- Startup Advisor
|
11 |
|
12 |
+
## βοΈ Stack
|
13 |
- Gradio UI
|
14 |
+
- OpenAI / Ollama / HF model backend
|
15 |
+
- Modular Python
|
16 |
|
17 |
+
## π§ͺ Run Locally
|
18 |
+
```bash
|
19 |
+
pip install -r requirements.txt
|
20 |
+
python app.py
|
|
|
|
app.py
CHANGED
@@ -1,23 +1,2 @@
|
|
1 |
-
#
|
2 |
|
3 |
-
"""
|
4 |
-
llmates.py
|
5 |
-
This module contains the main entry point for the llmates program.
|
6 |
-
It initializes the application and displays a welcome message.
|
7 |
-
"""
|
8 |
-
|
9 |
-
|
10 |
-
def main():
|
11 |
-
"""
|
12 |
-
The main entry point for the llmates program.
|
13 |
-
This function initializes the llmates application and displays a welcome message.
|
14 |
-
Returns:
|
15 |
-
None
|
16 |
-
"""
|
17 |
-
|
18 |
-
print("Hello from llmates!")
|
19 |
-
|
20 |
-
|
21 |
-
if __name__ == "__main__":
|
22 |
-
X = "Hello from the llama!"
|
23 |
-
main()
|
|
|
1 |
+
# This is the file for the gradio app
|
2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pyproject.toml
CHANGED
@@ -4,4 +4,9 @@ version = "0.1.0"
|
|
4 |
description = "Add your description here"
|
5 |
readme = "README.md"
|
6 |
requires-python = ">=3.9"
|
7 |
-
dependencies = [
|
|
|
|
|
|
|
|
|
|
|
|
4 |
description = "Add your description here"
|
5 |
readme = "README.md"
|
6 |
requires-python = ">=3.9"
|
7 |
+
dependencies = [
|
8 |
+
"dotenv>=0.9.9",
|
9 |
+
"gradio>=4.44.1",
|
10 |
+
"openai>=1.82.0",
|
11 |
+
"requests>=2.32.3",
|
12 |
+
]
|
utils.py
CHANGED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from config import OPENAI_API_KEY, OPENAI_MODEL, USE_OLLAMA, OLLAMA_MODEL
|
2 |
+
import requests
|
3 |
+
import openai
|
4 |
+
import os
|
5 |
+
|
6 |
+
openai.api_key = OPENAI_API_KEY
|
7 |
+
|
8 |
+
def query_openai(messages):
|
9 |
+
try:
|
10 |
+
response = openai.ChatCompletion.create(
|
11 |
+
model=OPENAI_MODEL,
|
12 |
+
messages=messages
|
13 |
+
)
|
14 |
+
return response["choices"][0]["message"]["content"]
|
15 |
+
except Exception as e:
|
16 |
+
return f"β οΈ OpenAI Error: {e}"
|
17 |
+
|
18 |
+
def query_ollama(prompt):
|
19 |
+
try:
|
20 |
+
res = requests.post(
|
21 |
+
"http://localhost:11434/api/generate",
|
22 |
+
json={"model": OLLAMA_MODEL, "prompt": prompt}
|
23 |
+
)
|
24 |
+
return res.json()["response"]
|
25 |
+
except Exception as e:
|
26 |
+
return f"β οΈ Ollama Error: {e}"
|
27 |
+
|
28 |
+
def generate_response(persona, user_input, history):
|
29 |
+
if USE_OLLAMA:
|
30 |
+
full_prompt = f"{persona}\n\n"
|
31 |
+
for u, b in history:
|
32 |
+
full_prompt += f"User: {u}\nBot: {b}\n"
|
33 |
+
full_prompt += f"User: {user_input}\nBot:"
|
34 |
+
reply = query_ollama(full_prompt)
|
35 |
+
else:
|
36 |
+
messages = [{"role": "system", "content": persona}]
|
37 |
+
for u, b in history:
|
38 |
+
messages.append({"role": "user", "content": u})
|
39 |
+
messages.append({"role": "assistant", "content": b})
|
40 |
+
messages.append({"role": "user", "content": user_input})
|
41 |
+
reply = query_openai(messages)
|
42 |
+
|
43 |
+
history.append((user_input, reply))
|
44 |
+
return history, history
|