CreativeWorks's picture
Update app.py
5f667f4 verified
raw
history blame
7.2 kB
import gradio as gr
import os
import spaces
from transformers import GemmaTokenizer, AutoModelForCausalLM
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from threading import Thread
# Set an environment variable
HF_TOKEN = os.environ.get("HF_TOKEN", None)
# Lê as variáveis de ambiente para autenticação e compartilhamento
#auth_users = os.environ.get("GRADIO_AUTH_USERS")
#auth_passwords = os.environ.get("GRADIO_AUTH_PASSWORDS")
# Converte as strings de usuários e senhas em listas
#auth_users = [user.strip() for user in auth_users.split(",")]
#auth_passwords = [password.strip() for password in auth_passwords.split(",")]
# Cria um dicionário de autenticação
#auth_credentials = dict(zip(auth_users, auth_passwords))
DESCRIPTION = '''
<div>
<h1 style="text-align: center;">Meta Llama3 8B</h1>
<p>This Space demonstrates the instruction-tuned model <a href="https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct"><b>Meta Llama3 8b Chat</b></a>. Meta Llama3 is the new open LLM and comes in two sizes: 8b and 70b. Feel free to play with it, or duplicate to run privately!</p>
<p>🔎 For more details about the Llama3 release and how to use the model with <code>transformers</code>, take a look <a href="https://huggingface.co/blog/llama3">at our blog post</a>.</p>
<p>🦕 Looking for an even more powerful model? Check out the <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> integration for Meta Llama 3 70b</p>
</div>
'''
LICENSE = """
<p/>
---
CreativeWoks AI: Intelligence System for Advanced Dialogue and Organized Responses Assistance
"""
PLACEHOLDER = """
<div style="position: relative; text-align: center;">
<h1 style="font-size: 2.5em; margin-top: 20px;">CreativeWorks Ai</h1>
<img src="https://utfs.io/f/4c8a3309-2ac3-453b-8441-04e5c5a3ed0f-361e80.svg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55; position: absolute; top: 50%; left: 50%; transform: translate(-50%, -50%); z-index: 0;">
<div style="background-color: rgba(255, 255, 255, 0.8); /* Ajuste a opacidade do fundo do texto aqui */
font-size: 1.2em; text-align: center; max-width: 800px; margin: auto; position: relative; z-index: 1; padding: 20px;">
<p>Este espaço demonstra o modelo customizado para o português brasileiro <a href="https://huggingface.co/mistralai/Mistral-7B-v0.3"><b>Mistral-7B-v0.3</b></a>. O Mistral-7B-v0.3 Large Language Model (LLM) é uma versão do Mistral-7B-v0.2 com vocabulário expandido. A CreativeWorks modificou e afinou o modelo para que seja mais rápido e alcance desempenho comparável aos principais modelos de código aberto existentes 10 vezes maiores, incluindo diversas melhorias e otimização para raciocínio lógico, com foco em RAG (Recuperação Aumentada por Geração).</p>
<p>🔎 Para mais detalhes sobre o modelo e como utilizá-lo com <code>transformers</code>, dê uma olhada <a href="https://huggingface.co/CreativeWorksAi/CreativeWorks_Mistral_7b_Chat_V1">em nosso model card.</a>.</p>
<p>🦕 Procurando um modelo ainda mais poderoso? Confira a integração do <a href="https://huggingface.co/chat/"><b>Hugging Chat</b></a> para modelos maiores.</p>
</div>
</div>
"""
css = """
h1 {
text-align: center;
display: block;
}
#duplicate-button {
margin: auto;
color: white;
background: #1565c0;
border-radius: 100vh;
}
"""
# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("CreativeWorksAi/CreativeWorks_Mistral_7b_Chat_V1")
model = AutoModelForCausalLM.from_pretrained("CreativeWorksAi/CreativeWorks_Mistral_7b_Chat_V1", token=HF_TOKEN, device_map="auto")
#model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto") # to("cuda:0")
terminators = [
tokenizer.eos_token_id,
tokenizer.convert_tokens_to_ids("</s>")
]
@spaces.GPU(duration=120)
def CreativeWorks_Mistral_7b_Chat_V1(message: str,
history: list,
temperature: float,
max_new_tokens: int
) -> str:
"""
Generate a streaming response using the Mistral model.
Args:
message (str): The input message.
history (list): The conversation history used by ChatInterface.
temperature (float): The temperature for generating the response.
max_new_tokens (int): The maximum number of new tokens to generate.
Returns:
str: The generated response.
"""
conversation = []
for user, assistant in history:
conversation.extend([{"from": "human", "value": user}, {"from": "assistant", "value": assistant}])
conversation.append({"from": "human", "value": message})
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
temperature=temperature,
eos_token_id=terminators,
pad_token_id=tokenizer.eos_token_id
)
# This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.
if temperature == 0:
generate_kwargs['do_sample'] = False
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
# Remove the unwanted prefix if present
text = text.replace("<|im_start|>assistant", " ")
outputs.append(text)
yield "".join(outputs)
# Gradio block
chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
with gr.Blocks(fill_height=True, css=css) as demo:
gr.Markdown(DESCRIPTION)
#gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
gr.ChatInterface(
fn=CreativeWorks_Mistral_7b_Chat_V1,
chatbot=chatbot,
fill_height=True,
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
additional_inputs=[
gr.Slider(minimum=0,
maximum=1,
step=0.1,
value=0.95,
label="Temperature",
render=False),
gr.Slider(minimum=256,
maximum=8192,
step=1,
value=512,
label="Max new tokens",
render=False ),
],
examples=[
['How to setup a human base on Mars? Give short answer.'],
['Explain theory of relativity to me like I’m 8 years old.'],
['What is 9,000 * 9,000?'],
['Write a pun-filled happy birthday message to my friend Alex.'],
['Justify why a penguin might make a good king of the jungle.']
],
cache_examples=False,
)
gr.Markdown(LICENSE)
if __name__ == "__main__":
demo.launch(auth=("teste", "teste@teste"), share=True)