text-generation / app.py
DevBM's picture
Rename a.py to app.py
f22c37a verified
import streamlit as st
from transformers import pipeline
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM
from transformers import T5Tokenizer, T5ForConditionalGeneration
st.subheader('Pipe5: Text-To-Text Generation -> Que. Generation',divider='orange')
if st.toggle(label='Show Pipe5'):
models = [
'google/flan-t5-base',
'meta-llama/Meta-Llama-3-8B',
'meta-llama/Meta-Llama-3-8B-Instruct'
]
model_name = st.selectbox(
label='Select Model',
options=models,
placeholder='google/vit-base-patch16-224',
)
if model_name == models[0]:
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)
else:
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
input_text = st.text_area(label='Enter the text from which question is to be generated:',value='Bruce Wayne is the Batman.')
input_text = 'Generate a question from this: ' + input_text
input_ids = tokenizer(input_text, return_tensors='pt').input_ids
outputs = model.generate(input_ids)
output_text = tokenizer.decode(outputs[0][1:len(outputs[0])-1])
if st.checkbox(label='Show Tokenized output'):
st.write(outputs)
st.write("Output is:")
st.write(f"{output_text}")
if st.toggle(label='Access model unrestricted'):
input_text = st.text_area('Enter text')
input_ids = tokenizer(input_text, return_tensors='pt').input_ids
outputs = model.generate(input_ids)
st.write(tokenizer.decode(outputs[0]))
st.write(outputs)