File size: 1,424 Bytes
761d779
dd1472d
761d779
3834f72
 
 
 
 
a8c48fc
 
 
3834f72
56014e4
 
3834f72
e5f91d9
1f6b958
dd1472d
 
fdf845c
0be8229
 
f7b8382
 
 
 
e5f91d9
0be8229
 
 
 
 
 
 
 
396d0bd
0be8229
 
 
d07c664
 
396d0bd
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer

# Wczytanie tokena z pliku konfiguracyjnego
with open("config.txt", "r") as f:
    lines = f.readlines()
    config = {}
    for line in lines:
        if "=" in line:
            key, value = line.strip().split(" = ")
            config[key] = value

model_name = config.get("repo_id", "")
token = config.get("token", "")

# Wczytanie własnego modelu chatbota z Hugging Face
if model_name == "pp3232133/pp3232133-distilgpt2-wikitext2":
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(model_name)

    # Funkcja obsługująca wejście i wyjście dla interfejsu Gradio
    def chatbot_interface(input_text):
        input_ids = tokenizer.encode(input_text, return_tensors="pt")
        chatbot_output = model.generate(input_ids, max_length=100)[0]
        response = tokenizer.decode(chatbot_output, skip_special_tokens=True)
        return response

    # Interfejs Gradio dla chatbota
    iface = gr.Interface(
        fn=chatbot_interface, 
        inputs="text", 
        outputs="text",
        title="Chatbot",
        description="Custom chatbot based on your Hugging Face model. Start typing to chat with the bot.",
        theme="compact"
    )

    # Uruchomienie interfejsu
    iface.launch()

else:
    print("Nie można znaleźć nazwy modelu w pliku konfiguracyjnym.")