Spaces:
Sleeping
Sleeping
import subprocess | |
subprocess.check_call(["pip", "install", "-q", "gradio", "transformers", "python-dotenv", "torch"]) | |
#!pip install -q gradio | |
#!pip install gradio | |
#!pip install --quiet gradio | |
#!pip install transformers | |
#!pip install python-dotenv | |
import gradio as gr | |
from transformers import GPTNeoForCausalLM, AutoTokenizer | |
tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B") | |
model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B") | |
def generate_text(prompt): | |
input_ids = tokenizer.encode(prompt, return_tensors="pt") | |
output = model.generate(input_ids, max_length=1024, temperature=0.5, do_sample=True) | |
return tokenizer.decode(output[0], skip_special_tokens=True) | |
def chatbot(input, history=[]): | |
output = generate_text(input) | |
history.append((input, output)) | |
return history, history | |
gr.Interface( | |
fn=chatbot, | |
inputs=["text", "state"], | |
outputs=["chatbot", "state"] | |
).launch(debug=True) |