Spaces:
Sleeping
Sleeping
File size: 640 Bytes
e4f9fa8 1a76661 09d32b7 5010a63 09d32b7 599bc02 eaca639 1c50f5e eaca639 1c50f5e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
import modelrun.py
from transformers import AutoTokenizer, MT5ForConditionalGeneration
from transformers import T5Tokenizer
import streamlit as st
import pandas as pd
from datasets import Dataset
import torch
from datasets import Dataset, DatasetDict
from transformers import Trainer, TrainingArguments
prompt = st.text_input("Enter your proverb: ")
# Tokenize the input prompt
input_ids = tokenizer.encode(prompt, return_tensors='pt')
# Generate the output
output_ids = model.generate(input_ids, max_length=256)
# Decode the output to text
output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True)
st.write(output_text)
|