File size: 1,607 Bytes
761d779
e5f91d9
761d779
3834f72
 
 
 
 
 
 
 
 
 
b941105
3834f72
 
b941105
3834f72
 
 
 
 
 
e5f91d9
 
 
 
fdf845c
e5f91d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer

# Wczytanie tokena z pliku konfiguracyjnego
with open("config.txt", "r") as f:
    lines = f.readlines()
    config = {}
    for line in lines:
        key, value = line.strip().split(" = ")
        config[key] = value

repo_id = config["repo_id"]
repo_type = config["repo_type"]
token = config["token"]

# Utworzenie folderu Hugging Face z tokenem uwierzytelniającym
hf_folder = HfFolder(repo_id, repo_type, token=token)

# Wczytanie modelu chatbota z Hugging Face
model_name = "pp3232133/distilgpt2-wikitext2"
tokenizer = AutoTokenizer.from_pretrained(model_name, repo_path=hf_folder)
model = AutoModelForCausalLM.from_pretrained(model_name, repo_path=hf_folder)

# Wczytanie własnego modelu chatbota z Hugging Face
model_name = "pp3232133/pp3232133-distilgpt2-wikitext2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Funkcja obsługująca wejście i wyjście dla interfejsu Gradio
def chatbot_interface(input_text):
    input_ids = tokenizer.encode(input_text, return_tensors="pt")
    chatbot_output = model.generate(input_ids, max_length=100)[0]
    response = tokenizer.decode(chatbot_output, skip_special_tokens=True)
    return response

# Interfejs Gradio dla chatbota
iface = gr.Interface(
    fn=chatbot_interface, 
    inputs="text", 
    outputs="text",
    title="Chatbot",
    description="Custom chatbot based on your Hugging Face model. Start typing to chat with the bot.",
    theme="compact"
)

# Uruchomienie interfejsu
iface.launch()