Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# Wczytanie tokena z pliku konfiguracyjnego | |
with open("config.txt", "r") as f: | |
lines = f.readlines() | |
config = {} | |
for line in lines: | |
key, value = line.strip().split(" = ") | |
config[key] = value | |
repo_id = config["repo_id"] | |
repo_type = config["repo_type"] | |
token = config["token"] | |
# Utworzenie folderu Hugging Face z tokenem uwierzytelniającym | |
hf_folder = HfFolder(repo_id, repo_type, token=token) | |
# Wczytanie modelu chatbota z Hugging Face | |
model_name = "pp3232133/distilgpt2-wikitext2" | |
tokenizer = AutoTokenizer.from_pretrained(model_name, repo_path=hf_folder) | |
model = AutoModelForCausalLM.from_pretrained(model_name, repo_path=hf_folder) | |
# Wczytanie własnego modelu chatbota z Hugging Face | |
model_name = "pp3232133/pp3232133-distilgpt2-wikitext2" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Funkcja obsługująca wejście i wyjście dla interfejsu Gradio | |
def chatbot_interface(input_text): | |
input_ids = tokenizer.encode(input_text, return_tensors="pt") | |
chatbot_output = model.generate(input_ids, max_length=100)[0] | |
response = tokenizer.decode(chatbot_output, skip_special_tokens=True) | |
return response | |
# Interfejs Gradio dla chatbota | |
iface = gr.Interface( | |
fn=chatbot_interface, | |
inputs="text", | |
outputs="text", | |
title="Chatbot", | |
description="Custom chatbot based on your Hugging Face model. Start typing to chat with the bot.", | |
theme="compact" | |
) | |
# Uruchomienie interfejsu | |
iface.launch() | |