Spaces:
Runtime error
Runtime error
# https://www.gradio.app/guides/using-hugging-face-integrations | |
import gradio as gr | |
from transformers import pipeline, Conversation | |
model = "mistralai/Mistral-7B-Instruct-v0.1" | |
model = "TinyLlama/TinyLlama-1.1B-Chat-v0.3" | |
title = "Shisa 7B" | |
description = "Test out Shisa 7B in either English or Japanese." | |
placeholder = "Type Here / γγγ«ε ₯εγγ¦γγ γγ" | |
examples = [ | |
"Hello, how are you?", | |
"γγγ«γ‘γ―γε ζ°γ§γγοΌ", | |
"γγ£γγε ζ°οΌ", | |
"γγγ«γ‘γ―γγγγγιγγγ§γγοΌ", | |
] | |
# Docs: https://github.com/huggingface/transformers/blob/main/src/transformers/pipelines/conversational.py | |
conversation = Conversation() | |
chatbot = pipeline('conversational', model) | |
''' | |
conversation = Conversation("Going to the movies tonight - any suggestions?") | |
conversation.add_message({"role": "assistant", "content": "The Big lebowski."}) | |
conversation.add_message({"role": "user", "content": "Is it good?"}) | |
conversation.messages[:-1] | |
''' | |
def chat(input, history=[]): | |
conversation.add_message({"role": "user", "content": input}) | |
# we do this shuffle so local shadow response doesn't get created | |
response_conversation = chatbot(conversation) | |
print(response_conversation) | |
print(response_conversation.messages) | |
print(response_conversation.messages[-1]["content"]) | |
conversation.add_message(response_conversation.messages[-1]) | |
response = conversation.messages[-1]["content"] | |
return response, history | |
gr.ChatInterface( | |
chat, | |
chatbot=gr.Chatbot(height=400), | |
textbox=gr.Textbox(placeholder=placeholder, container=False, scale=7), | |
title=title, | |
description=description, | |
theme="soft", | |
examples=examples, | |
cache_examples=False, | |
undo_btn="Delete Previous", | |
clear_btn="Clear", | |
).launch() | |
''' | |
gr.Interface.load( | |
"EleutherAI/gpt-j-6B", | |
inputs=gr.Textbox(lines=5, label="Input Text"), | |
title=title, | |
description=description, | |
article=article, | |
).launch() | |
# Doesn't support conversational pipelin | |
pipe = pipeline('conversational', model) | |
gr.Interface.from_pipeline(pipe).launch() | |
''' | |
# For async | |
# ).queue().launch() | |
''' | |
# Pipeline doesn't support conversational... | |
pipe = pipeline("conversational", model=model) | |
demo = gr.Interface.from_pipeline(pipe) | |
''' | |