Spaces:
Build error
Build error
Upload folder using huggingface_hub
Browse files- .github/workflows/update_space.yml +28 -0
- .gitignore +23 -0
- README.md +2 -8
- __pycache__/api_client.cpython-311.pyc +0 -0
- __pycache__/chat_state.cpython-311.pyc +0 -0
- __pycache__/config.cpython-311.pyc +0 -0
- __pycache__/estimatetokens.cpython-311.pyc +0 -0
- __pycache__/ollama_api.cpython-311.pyc +0 -0
- __pycache__/prompt_library.cpython-311.pyc +0 -0
- __pycache__/ui.cpython-311.pyc +0 -0
- __pycache__/utils.cpython-311.pyc +0 -0
- app.py +189 -0
- chat_history.json +1 -0
- chat_state.py +10 -0
- config.py +4 -0
- constraints.txt +84 -0
- requirements.txt +5 -0
.github/workflows/update_space.yml
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Run Python script
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
build:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- name: Checkout
|
14 |
+
uses: actions/checkout@v2
|
15 |
+
|
16 |
+
- name: Set up Python
|
17 |
+
uses: actions/setup-python@v2
|
18 |
+
with:
|
19 |
+
python-version: '3.9'
|
20 |
+
|
21 |
+
- name: Install Gradio
|
22 |
+
run: python -m pip install gradio
|
23 |
+
|
24 |
+
- name: Log in to Hugging Face
|
25 |
+
run: python -c 'import huggingface_hub; huggingface_hub.login(token="${{ secrets.hf_token }}")'
|
26 |
+
|
27 |
+
- name: Deploy to Spaces
|
28 |
+
run: gradio deploy
|
.gitignore
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### AL ###
|
2 |
+
#Template for AL projects for Dynamics 365 Business Central
|
3 |
+
#launch.json folder
|
4 |
+
.vscode/
|
5 |
+
#Cache folder
|
6 |
+
.alcache/
|
7 |
+
#Symbols folder
|
8 |
+
.alpackages/
|
9 |
+
#Snapshots folder
|
10 |
+
.snapshots/
|
11 |
+
#Testing Output folder
|
12 |
+
.output/
|
13 |
+
#Extension App-file
|
14 |
+
*.app
|
15 |
+
#Rapid Application Development File
|
16 |
+
rad.json
|
17 |
+
#Translation Base-file
|
18 |
+
*.g.xlf
|
19 |
+
#License-file
|
20 |
+
*.flf
|
21 |
+
#Test results file
|
22 |
+
TestResults.xml
|
23 |
+
.venv
|
README.md
CHANGED
@@ -1,12 +1,6 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: green
|
5 |
-
colorTo: gray
|
6 |
sdk: gradio
|
7 |
sdk_version: 4.44.0
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
11 |
-
|
12 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
title: Ollama_test
|
3 |
+
app_file: app.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
sdk_version: 4.44.0
|
|
|
|
|
6 |
---
|
|
|
|
__pycache__/api_client.cpython-311.pyc
ADDED
Binary file (4.44 kB). View file
|
|
__pycache__/chat_state.cpython-311.pyc
ADDED
Binary file (834 Bytes). View file
|
|
__pycache__/config.cpython-311.pyc
ADDED
Binary file (297 Bytes). View file
|
|
__pycache__/estimatetokens.cpython-311.pyc
ADDED
Binary file (559 Bytes). View file
|
|
__pycache__/ollama_api.cpython-311.pyc
ADDED
Binary file (5.07 kB). View file
|
|
__pycache__/prompt_library.cpython-311.pyc
ADDED
Binary file (640 Bytes). View file
|
|
__pycache__/ui.cpython-311.pyc
ADDED
Binary file (7.22 kB). View file
|
|
__pycache__/utils.cpython-311.pyc
ADDED
Binary file (2.83 kB). View file
|
|
app.py
ADDED
@@ -0,0 +1,189 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import httpx
|
3 |
+
import json
|
4 |
+
import asyncio
|
5 |
+
import os
|
6 |
+
from chat_state import chat_state
|
7 |
+
from config import OLLAMA_URL, DEFAULT_TEMPERATURE, DEFAULT_SYSTEM_MESSAGE
|
8 |
+
|
9 |
+
theme = gr.themes.Soft(
|
10 |
+
primary_hue="yellow",
|
11 |
+
neutral_hue="neutral",
|
12 |
+
text_size="md",
|
13 |
+
spacing_size="md",
|
14 |
+
radius_size="md",
|
15 |
+
font=[gr.themes.GoogleFont('Montserrat'), gr.themes.GoogleFont('ui-sans-serif'), 'system-ui', 'sans-serif'],
|
16 |
+
)
|
17 |
+
|
18 |
+
|
19 |
+
async def fetch_available_models():
|
20 |
+
async with httpx.AsyncClient() as client:
|
21 |
+
try:
|
22 |
+
response = await client.get(f"{OLLAMA_URL}/api/tags")
|
23 |
+
response.raise_for_status()
|
24 |
+
data = response.json()
|
25 |
+
return [model["name"] for model in data.get("models", [])]
|
26 |
+
except httpx.HTTPStatusError as e:
|
27 |
+
print(f"Error fetching models: {e}")
|
28 |
+
return []
|
29 |
+
|
30 |
+
async def get_model_info(model_name):
|
31 |
+
async with httpx.AsyncClient() as client:
|
32 |
+
try:
|
33 |
+
response = await client.post(f"{OLLAMA_URL}/api/show", json={"name": model_name})
|
34 |
+
response.raise_for_status()
|
35 |
+
return response.json()
|
36 |
+
except httpx.HTTPStatusError as e:
|
37 |
+
print(f"Error fetching model info: {e}")
|
38 |
+
return {}
|
39 |
+
|
40 |
+
async def call_ollama_api(prompt, history):
|
41 |
+
messages = [{"role": "system", "content": chat_state.system_message}]
|
42 |
+
for user_msg, assistant_msg in history:
|
43 |
+
if user_msg:
|
44 |
+
messages.append({"role": "user", "content": user_msg})
|
45 |
+
if assistant_msg:
|
46 |
+
messages.append({"role": "assistant", "content": assistant_msg})
|
47 |
+
messages.append({"role": "user", "content": prompt})
|
48 |
+
|
49 |
+
payload = {
|
50 |
+
"model": chat_state.model,
|
51 |
+
"messages": messages,
|
52 |
+
"stream": True,
|
53 |
+
"temperature": chat_state.temperature
|
54 |
+
}
|
55 |
+
|
56 |
+
async with httpx.AsyncClient() as client:
|
57 |
+
try:
|
58 |
+
async with client.stream("POST", f"{OLLAMA_URL}/api/chat", json=payload, timeout=30.0) as response:
|
59 |
+
response.raise_for_status()
|
60 |
+
full_response = ""
|
61 |
+
async for line in response.aiter_lines():
|
62 |
+
if line:
|
63 |
+
json_line = json.loads(line)
|
64 |
+
message_content = json_line.get('message', {}).get('content', '')
|
65 |
+
if message_content:
|
66 |
+
full_response += message_content
|
67 |
+
yield full_response
|
68 |
+
if json_line.get('done'):
|
69 |
+
break
|
70 |
+
except httpx.HTTPStatusError as e:
|
71 |
+
yield f"An error occurred: {e}"
|
72 |
+
except asyncio.TimeoutError:
|
73 |
+
yield "The request timed out. Please try again."
|
74 |
+
|
75 |
+
async def user(user_message, history):
|
76 |
+
return "", history + [[user_message, None]]
|
77 |
+
|
78 |
+
async def bot(history):
|
79 |
+
user_message = history[-1][0]
|
80 |
+
bot_message_generator = call_ollama_api(user_message, history[:-1])
|
81 |
+
async for message_content in bot_message_generator:
|
82 |
+
history[-1][1] = message_content
|
83 |
+
yield history
|
84 |
+
|
85 |
+
def clear_chat():
|
86 |
+
return None
|
87 |
+
|
88 |
+
def save_chat_history(history, filename="chat_history.json"):
|
89 |
+
with open(filename, "w") as f:
|
90 |
+
json.dump(history, f)
|
91 |
+
return f"Chat history saved to {filename}"
|
92 |
+
|
93 |
+
def load_chat_history(filename="chat_history.json"):
|
94 |
+
try:
|
95 |
+
with open(filename, "r") as f:
|
96 |
+
return json.load(f)
|
97 |
+
except FileNotFoundError:
|
98 |
+
return None
|
99 |
+
|
100 |
+
async def change_model(model_name):
|
101 |
+
chat_state.model = model_name
|
102 |
+
model_info = await get_model_info(model_name)
|
103 |
+
info_text = f"Model: {model_name}\n"
|
104 |
+
info_text += f"Parameter Size: {model_info.get('details', {}).get('parameter_size', 'Unknown')}\n"
|
105 |
+
info_text += f"Quantization: {model_info.get('details', {}).get('quantization_level', 'Unknown')}\n"
|
106 |
+
info_text += f"Format: {model_info.get('details', {}).get('format', 'Unknown')}"
|
107 |
+
return f"Model changed to {chat_state.model}", info_text
|
108 |
+
|
109 |
+
def update_temperature(new_temp):
|
110 |
+
chat_state.temperature = float(new_temp)
|
111 |
+
return f"Temperature set to {chat_state.temperature}"
|
112 |
+
|
113 |
+
def update_system_message(new_message):
|
114 |
+
chat_state.system_message = new_message
|
115 |
+
return f"System message updated: {chat_state.system_message}"
|
116 |
+
|
117 |
+
async def initialize_interface():
|
118 |
+
chat_state.available_models = await fetch_available_models()
|
119 |
+
|
120 |
+
with gr.Blocks(theme=theme) as demo:
|
121 |
+
gr.Markdown("# 🤖 Enhanced Ollama Chatbot Interface")
|
122 |
+
|
123 |
+
with gr.Row():
|
124 |
+
with gr.Column(scale=7):
|
125 |
+
chatbot = gr.Chatbot(height=600, elem_id="chatbot")
|
126 |
+
with gr.Row():
|
127 |
+
msg = gr.Textbox(
|
128 |
+
label="Message",
|
129 |
+
placeholder="Type your message here...",
|
130 |
+
scale=4,
|
131 |
+
elem_id="user-input"
|
132 |
+
)
|
133 |
+
send = gr.Button("Send", scale=1, elem_id="send-btn")
|
134 |
+
|
135 |
+
with gr.Column(scale=3):
|
136 |
+
with gr.Accordion("Model Settings", open=True):
|
137 |
+
model_dropdown = gr.Dropdown(
|
138 |
+
choices=chat_state.available_models,
|
139 |
+
label="Select Model",
|
140 |
+
value=chat_state.available_models[0] if chat_state.available_models else None,
|
141 |
+
elem_id="model-dropdown"
|
142 |
+
)
|
143 |
+
model_info = gr.Textbox(label="Model Information", interactive=False, lines=4)
|
144 |
+
temp_slider = gr.Slider(
|
145 |
+
minimum=0, maximum=1, value=DEFAULT_TEMPERATURE, step=0.1,
|
146 |
+
label="Temperature",
|
147 |
+
elem_id="temp-slider"
|
148 |
+
)
|
149 |
+
|
150 |
+
with gr.Accordion("System Message", open=False):
|
151 |
+
system_message_input = gr.Textbox(
|
152 |
+
label="System Message",
|
153 |
+
value=DEFAULT_SYSTEM_MESSAGE,
|
154 |
+
lines=3,
|
155 |
+
elem_id="system-message"
|
156 |
+
)
|
157 |
+
update_system_button = gr.Button("Update System Message", elem_id="update-system-btn")
|
158 |
+
|
159 |
+
with gr.Accordion("Chat Management", open=False):
|
160 |
+
with gr.Row():
|
161 |
+
clear = gr.Button("Clear Chat", elem_id="clear-btn")
|
162 |
+
save_button = gr.Button("Save Chat", elem_id="save-btn")
|
163 |
+
load_button = gr.Button("Load Chat", elem_id="load-btn")
|
164 |
+
|
165 |
+
status_box = gr.Textbox(label="Status", interactive=False, elem_id="status-box")
|
166 |
+
|
167 |
+
# Event handlers
|
168 |
+
send_event = msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
169 |
+
bot, chatbot, chatbot
|
170 |
+
)
|
171 |
+
send.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
|
172 |
+
bot, chatbot, chatbot
|
173 |
+
)
|
174 |
+
clear.click(clear_chat, outputs=[chatbot])
|
175 |
+
model_dropdown.change(change_model, inputs=[model_dropdown], outputs=[status_box, model_info])
|
176 |
+
temp_slider.change(update_temperature, inputs=[temp_slider], outputs=[status_box])
|
177 |
+
update_system_button.click(update_system_message, inputs=[system_message_input], outputs=[status_box])
|
178 |
+
save_button.click(save_chat_history, inputs=[chatbot], outputs=[status_box])
|
179 |
+
load_button.click(load_chat_history, outputs=[chatbot])
|
180 |
+
|
181 |
+
# Initialize the first model
|
182 |
+
if chat_state.available_models:
|
183 |
+
chat_state.model = chat_state.available_models[0]
|
184 |
+
|
185 |
+
return demo
|
186 |
+
|
187 |
+
if __name__ == "__main__":
|
188 |
+
demo = asyncio.run(initialize_interface())
|
189 |
+
demo.launch(share=True)
|
chat_history.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
[["F", "I'm sorry, but I cannot assist you with that. Could you please provide more context or clarify your question?"]]
|
chat_state.py
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from config import DEFAULT_TEMPERATURE, DEFAULT_SYSTEM_MESSAGE
|
2 |
+
|
3 |
+
class ChatState:
|
4 |
+
def __init__(self):
|
5 |
+
self.model = None
|
6 |
+
self.temperature = DEFAULT_TEMPERATURE
|
7 |
+
self.system_message = DEFAULT_SYSTEM_MESSAGE
|
8 |
+
self.available_models = []
|
9 |
+
|
10 |
+
chat_state = ChatState()
|
config.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Configuration and constants
|
2 |
+
OLLAMA_URL = "http://localhost:11434"
|
3 |
+
DEFAULT_TEMPERATURE = 0.7
|
4 |
+
DEFAULT_SYSTEM_MESSAGE = "You are a helpful AI assistant."
|
constraints.txt
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# requirements.txt
|
2 |
+
|
3 |
+
aiofiles==23.2.1
|
4 |
+
annotated-types==0.7.0
|
5 |
+
anyio==4.6.0
|
6 |
+
asyncio==3.4.3
|
7 |
+
certifi==2024.8.30
|
8 |
+
charset-normalizer==3.3.2
|
9 |
+
click==8.1.7
|
10 |
+
colorama==0.4.6
|
11 |
+
contourpy==1.3.0
|
12 |
+
cycler==0.12.1
|
13 |
+
fastapi==0.115.0
|
14 |
+
ffmpy==0.4.0
|
15 |
+
filelock==3.16.1
|
16 |
+
fonttools==4.53.1
|
17 |
+
fsspec==2024.9.0
|
18 |
+
gradio==4.44.0
|
19 |
+
gradio_client==1.3.0
|
20 |
+
grpcio==1.66.1
|
21 |
+
grpcio-tools==1.66.1
|
22 |
+
h11==0.14.0
|
23 |
+
h2==4.1.0
|
24 |
+
hpack==4.0.0
|
25 |
+
httpcore==1.0.5
|
26 |
+
httpx==0.27.2
|
27 |
+
huggingface-hub==0.25.0
|
28 |
+
hyperframe==6.0.1
|
29 |
+
idna==3.10
|
30 |
+
importlib_resources==6.4.5
|
31 |
+
Jinja2==3.1.4
|
32 |
+
joblib==1.4.2
|
33 |
+
kiwisolver==1.4.7
|
34 |
+
markdown-it-py==3.0.0
|
35 |
+
MarkupSafe==2.1.5
|
36 |
+
matplotlib==3.9.2
|
37 |
+
mdurl==0.1.2
|
38 |
+
mpmath==1.3.0
|
39 |
+
networkx==3.3
|
40 |
+
numpy==2.1.1
|
41 |
+
orjson==3.10.7
|
42 |
+
packaging==24.1
|
43 |
+
pandas==2.2.3
|
44 |
+
pillow==10.4.0
|
45 |
+
portalocker==2.10.1
|
46 |
+
protobuf==5.28.2
|
47 |
+
pydantic==2.9.2
|
48 |
+
pydantic_core==2.23.4
|
49 |
+
pydub==0.25.1
|
50 |
+
Pygments==2.18.0
|
51 |
+
pyparsing==3.1.4
|
52 |
+
python-dateutil==2.9.0.post0
|
53 |
+
python-multipart==0.0.10
|
54 |
+
pytz==2024.2
|
55 |
+
pywin32==306
|
56 |
+
PyYAML==6.0.2
|
57 |
+
qdrant-client==1.11.2
|
58 |
+
regex==2024.9.11
|
59 |
+
requests==2.32.3
|
60 |
+
rich==13.8.1
|
61 |
+
ruff==0.6.7
|
62 |
+
safetensors==0.4.5
|
63 |
+
scikit-learn==1.5.2
|
64 |
+
scipy==1.14.1
|
65 |
+
semantic-version==2.10.0
|
66 |
+
sentence-transformers==3.1.1
|
67 |
+
shellingham==1.5.4
|
68 |
+
six==1.16.0
|
69 |
+
sniffio==1.3.1
|
70 |
+
starlette==0.38.6
|
71 |
+
sympy==1.13.3
|
72 |
+
threadpoolctl==3.5.0
|
73 |
+
tiktoken==0.7.0
|
74 |
+
tokenizers==0.19.1
|
75 |
+
tomlkit==0.12.0
|
76 |
+
torch==2.4.1
|
77 |
+
tqdm==4.66.5
|
78 |
+
transformers==4.44.2
|
79 |
+
typer==0.12.5
|
80 |
+
typing_extensions==4.12.2
|
81 |
+
tzdata==2024.1
|
82 |
+
urllib3==2.2.3
|
83 |
+
uvicorn==0.30.6
|
84 |
+
websockets==12.0
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==gradio>=3.23.0
|
2 |
+
httpx==httpx>=0.24.0
|
3 |
+
asyncio
|
4 |
+
json
|
5 |
+
os
|