Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
# ืืขืื ืช ืืืืื ืื-tokenizer | |
model = AutoModelForCausalLM.from_pretrained("distilgpt2") | |
tokenizer = AutoTokenizer.from_pretrained("distilgpt2") | |
# ืคืื ืงืฆืื ืฉืืงืืืช ืคืจืืืคื ืืืืืืจื ืชืืืื | |
def generate_text(prompt): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
gen_tokens = model.generate( | |
inputs.input_ids, | |
attention_mask=inputs.attention_mask, | |
do_sample=True, | |
temperature=0.9, | |
max_length=100, | |
pad_token_id=tokenizer.eos_token_id | |
) | |
gen_text = tokenizer.decode(gen_tokens[0], skip_special_tokens=True) | |
return gen_text | |
# ืืฆืืจืช ืืืฉืง Gradio ืขื ืงืื ืฉื ืืงืกื ืืคืื ืฉื ืืงืกื | |
iface = gr.Interface(fn=generate_text, inputs="text", outputs="text") | |
# ืืคืขืืช ืืืคืืืงืฆืื | |
iface.launch() | |