Spaces:
Running
Running
import spaces | |
import subprocess | |
from llama_cpp import Llama | |
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType | |
from llama_cpp_agent.providers import LlamaCppPythonProvider | |
from llama_cpp_agent.chat_history import BasicChatHistory | |
from llama_cpp_agent.chat_history.messages import Roles | |
import gradio as gr | |
from huggingface_hub import hf_hub_download | |
import os | |
import cv2 | |
huggingface_token = os.environ.get('HF_TOKEN') | |
# Download the Meta-Llama-3.1-8B-Instruct model | |
hf_hub_download( | |
repo_id="bartowski/Meta-Llama-3.1-8B-Instruct-GGUF", | |
filename="Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="bartowski/Mistral-Nemo-Instruct-2407-GGUF", | |
filename="Mistral-Nemo-Instruct-2407-Q5_K_M.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="bartowski/gemma-2-2b-it-GGUF", | |
filename="gemma-2-2b-it-Q6_K_L.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="bartowski/openchat-3.6-8b-20240522-GGUF", | |
filename="openchat-3.6-8b-20240522-Q6_K.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="bartowski/Llama-3-Groq-8B-Tool-Use-GGUF", | |
filename="Llama-3-Groq-8B-Tool-Use-Q6_K.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="bartowski/MiniCPM-V-2_6-GGUF", | |
filename="MiniCPM-V-2_6-Q6_K.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="CaioXapelaum/Llama-3.1-Storm-8B-Q5_K_M-GGUF", | |
filename="llama-3.1-storm-8b-q5_k_m.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="CaioXapelaum/Orca-2-7b-Patent-Instruct-Llama-2-Q5_K_M-GGUF", | |
filename="orca-2-7b-patent-instruct-llama-2-q5_k_m.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="HuggingFaceTB/smollm-135M-instruct-v0.2-Q8_0-GGUF", | |
filename="smollm-135m-instruct-add-basics-q8_0.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="bartowski/Llama-3.2-1B-Instruct-GGUF", | |
filename="Llama-3.2-1B-Instruct-Q8_0.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="bartowski/Mistral-Small-Instruct-2409-GGUF", | |
filename="Mistral-Small-Instruct-2409-Q5_K_M.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="bartowski/Llama-3.1-SuperNova-Lite-GGUF", | |
filename="Llama-3.1-SuperNova-Lite-Q5_K_M.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="TheBloke/MixtralOrochi8x7B-GGUF", | |
filename="mixtralorochi8x7b.Q4_K_M.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
hf_hub_download( | |
repo_id="bartowski/Phi-3.5-mini-instruct-GGUF", | |
filename="Phi-3.5-mini-instruct-Q6_K.gguf", | |
local_dir="./models", | |
token=huggingface_token | |
) | |
llm = None | |
llm_model = None | |
cv2.setNumThreads(1) | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
model, | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
top_k, | |
repeat_penalty, | |
): | |
chat_template = MessagesFormatterType.GEMMA_2 | |
global llm | |
global llm_model | |
# Load model only if it's not already loaded or if a new model is selected | |
if llm is None or llm_model != model: | |
try: | |
llm = Llama( | |
model_path=f"models/{model}", | |
flash_attn=True, | |
n_gpu_layers=81, # Adjust based on available GPU resources | |
n_batch=1024, | |
n_ctx=8192, | |
) | |
llm_model = model | |
except Exception as e: | |
return f"Error loading model: {str(e)}" | |
provider = LlamaCppPythonProvider(llm) | |
agent = LlamaCppAgent( | |
provider, | |
system_prompt=f"{system_message}", | |
predefined_messages_formatter_type=chat_template, | |
debug_output=True | |
) | |
settings = provider.get_provider_default_settings() | |
settings.temperature = temperature | |
settings.top_k = top_k | |
settings.top_p = top_p | |
settings.max_tokens = max_tokens | |
settings.repeat_penalty = repeat_penalty | |
settings.stream = True | |
messages = BasicChatHistory() | |
# Add user and assistant messages to the history | |
for msn in history: | |
user = {'role': Roles.user, 'content': msn[0]} | |
assistant = {'role': Roles.assistant, 'content': msn[1]} | |
messages.add_message(user) | |
messages.add_message(assistant) | |
# Stream the response | |
try: | |
stream = agent.get_chat_response( | |
message, | |
llm_sampling_settings=settings, | |
chat_history=messages, | |
returns_streaming_generator=True, | |
print_output=False | |
) | |
outputs = "" | |
for output in stream: | |
outputs += output | |
yield outputs | |
except Exception as e: | |
yield f"Error during response generation: {str(e)}" | |
demo = gr.ChatInterface( | |
fn=respond, | |
additional_inputs=[ | |
gr.Dropdown([ | |
'Meta-Llama-3.1-8B-Instruct-Q5_K_M.gguf', | |
'Mistral-Nemo-Instruct-2407-Q5_K_M.gguf', | |
'gemma-2-2b-it-Q6_K_L.gguf', | |
'openchat-3.6-8b-20240522-Q6_K.gguf', | |
'Llama-3-Groq-8B-Tool-Use-Q6_K.gguf', | |
'MiniCPM-V-2_6-Q6_K.gguf', | |
'llama-3.1-storm-8b-q5_k_m.gguf', | |
'orca-2-7b-patent-instruct-llama-2-q5_k_m.gguf', | |
'smollm-135m-instruct-add-basics-q8_0.gguf', | |
'Llama-3.2-1B-Instruct-Q8_0.gguf', | |
'Mistral-Small-Instruct-2409-Q5_K_M.gguf', | |
'Llama-3.1-SuperNova-Lite-Q5_K_M.gguf', | |
'mixtralorochi8x7b.Q4_K_M.gguf', | |
'Phi-3.5-mini-instruct-Q6_K.gguf' | |
], | |
value="gemma-2-2b-it-Q6_K_L.gguf", | |
label="Model" | |
), | |
gr.Textbox(value="You are a helpful assistant.", label="System message"), | |
gr.Slider(minimum=1, maximum=4096, value=2048, step=1, label="Max tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider( | |
minimum=0.1, | |
maximum=1.0, | |
value=0.95, | |
step=0.05, | |
label="Top-p", | |
), | |
gr.Slider( | |
minimum=0, | |
maximum=100, | |
value=40, | |
step=1, | |
label="Top-k", | |
), | |
gr.Slider( | |
minimum=0.0, | |
maximum=2.0, | |
value=1.1, | |
step=0.1, | |
label="Repetition penalty", | |
), | |
], | |
retry_btn="Retry", | |
undo_btn="Undo", | |
clear_btn="Clear", | |
submit_btn="Send", | |
title="Chat with lots of Models and LLMs using llama.cpp", | |
chatbot=gr.Chatbot( | |
scale=1, | |
likeable=False, | |
show_copy_button=True | |
) | |
) | |
if __name__ == "__main__": | |
demo.launch() |