Spaces:
Sleeping
Sleeping
File size: 4,558 Bytes
77d44bd f301806 77d44bd 53d2d87 cc575f5 53d2d87 77d44bd f301806 77d44bd f301806 77d44bd f301806 77d44bd 5cbde9d 77d44bd 581a078 c958fb8 5cbde9d 77d44bd 6a84f1a 77d44bd b297dae 77d44bd 327e87a b297dae 77d44bd 5ea3dc2 77d44bd f56b599 77d44bd f56b599 77d44bd e27fb9b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 |
import gradio as gr
import cohere
import os
import re
import uuid
from functools import partial
from urllib.error import HTTPError
cohere_api_key = os.getenv("COHERE_API_KEY")
co = cohere.Client(cohere_api_key)
history = []
chat = []
cid = str(uuid.uuid4())
def trigger_example(example):
chat, updated_history = generate_response(example)
return chat, updated_history
def generate_response(user_message, history=None):
if history is None:
history = []
history.append(user_message)
stream = co.chat_stream(message=user_message, conversation_id=cid, model='command-r', connectors=[], temperature=0.3)
output = ""
for idx, response in enumerate(stream):
if response.status_code == HTTPStatus.OK:
if response.event_type == "text-generation":
output += response.text
if idx == 0:
history.append(" " + output)
else:
history[-1] = output
chat = [
(history[i].strip(), history[i + 1].strip())
for i in range(0, len(history) - 1, 2)
]
yield chat, history
else:
raise HTTPError(f"Request id: {response.request_id}, Status code: {response.status_code},
error code: {response.code}, error message: {response.message}")
return chat, history
def clear_chat():
cid = str(uuid.uuid4())
return [], []
examples = [
"What are some good questions to get to know a stranger?",
"Create a list of unusual excuses people might use to get out of a work meeting",
"Write a python code to reverse a string",
"Explain the relativity theory in French",
"Como sair de um helicóptero que caiu na água?",
"Formally introduce the transformer architecture with notation.",
"¿Cómo le explicarías el aprendizaje automático a un extraterrestre?",
"Summarize recent news about the North American tech job market"
]
title = """<h1 align="center">Cohere for AI Command R</h1>"""
custom_css = """
#logo-img {
border: none !important;
}
#chat-message {
font-size: 14px;
min-height: 300px;
}
"""
with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
#gr.HTML(title)
with gr.Row():
with gr.Column(scale=1):
gr.Image("logo2.png", elem_id="logo-img", show_label=False, show_share_button=False, show_download_button=False)
with gr.Column(scale=3):
gr.Markdown("""C4AI Command-R is a research release of a 35 billion parameter highly performant generative model. C4AI Command-R is a large language model with open weights optimized for a variety of use cases including reasoning, summarization, and question answering. Command-R has the capability for multilingual generation evaluated in 10 languages and highly performant RAG and tool use capabilities.
<br/><br/>
**Model**: [c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01)
<br/>
**Developed by**: [Cohere](https://cohere.com/) and [Cohere for AI](https://cohere.com/research)
<br/>
**License**: CC-BY-NC, requires also adhering to [C4AI's Acceptable Use Policy](https://docs.cohere.com/docs/c4ai-acceptable-use-policy)
"""
)
with gr.Column():
with gr.Row():
chatbot = gr.Chatbot(show_label=False)
with gr.Row():
user_message = gr.Textbox(lines=1, placeholder="Ask anything ...", label="Input", show_label=False)
with gr.Row():
submit_button = gr.Button("Submit")
clear_button = gr.Button("Clear chat")
history = gr.State([])
cid = str(uuid.uuid4())
user_message.submit(fn=generate_response, inputs=[user_message, history], outputs=[chatbot, history], concurrency_limit=32)
submit_button.click(fn=generate_response, inputs=[user_message, history], outputs=[chatbot, history], concurrency_limit=32)
clear_button.click(fn=clear_chat, inputs=None, outputs=[chatbot, history], concurrency_limit=32)
with gr.Row():
gr.Examples(
examples=examples,
inputs=[user_message],
cache_examples=False,
fn=trigger_example,
outputs=[chatbot],
)
if __name__ == "__main__":
# demo.launch(debug=True)
demo.queue(api_open=False).launch(max_threads=32) |