Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import os | |
import cohere | |
""" | |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference | |
""" | |
client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct") | |
COHERE_API_KEY = os.getenv("COHERE_API_KEY") | |
client_cohere = cohere.Client(COHERE_API_KEY) | |
COHERE_MODEL = "command-r-plus" | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
): | |
use_cohere_api = False | |
system_message = "You are a friendly Chatbot." | |
messages = [{"role": "system", "content": system_message}] | |
for val in history: | |
if val[0]: | |
messages.append({"role": "user", "content": val[0]}) | |
if val[1]: | |
messages.append({"role": "assistant", "content": val[1]}) | |
messages.append({"role": "user", "content": message}) | |
response = "" | |
if use_cohere_api: | |
cohere_response = client_cohere.chat( | |
message=message, | |
model=COHERE_MODEL, | |
max_tokens=512 | |
) | |
response = cohere_response.text | |
yield response | |
else: | |
for message in client.chat_completion( | |
messages, | |
max_tokens=512, | |
stream=True, | |
): | |
token = message.choices[0].delta.content | |
response += token | |
yield response | |
""" | |
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface | |
""" | |
demo = gr.ChatInterface( | |
respond, | |
) | |
if __name__ == "__main__": | |
demo.launch() | |