Spaces:
Runtime error
Runtime error
File size: 1,441 Bytes
7d5081f 4946d76 7d5081f 4946d76 7d5081f bd5628e c9ee852 99b6634 c9ee852 99b6634 7d5081f 8aa5b8d bd5628e 8aa5b8d 7d5081f 8aa5b8d bd5628e 7d5081f 99b6634 f11888f 7d5081f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import streamlit as st
import time
from transformers import pipeline
import torch
#from transformers import AutoModelForCausalLM, AutoTokenizer
#@st.cache(allow_output_mutation=True)
#def define_model():
# model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b", torch_dtype=torch.float16).cuda()
# tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b", use_fast=False)
# return model, tokenizer
generator = pipeline('text-generation', model="facebook/opt-1.3b", skip_special_tokens=True)
@st.cache(allow_output_mutation=True)
def define_model(prompt):
answer = generator(prompt, max_length=60)
return answer
#@st.cache(allow_output_mutation=True)
#def opt_model(prompt, model, tokenizer, num_sequences = 1, max_length = 50):
# input_ids = tokenizer(prompt, return_tensors="pt").input_ids.cuda()
# generated_ids = model.generate(input_ids, num_return_sequences=num_sequences, max_length=max_length)
# answer = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
# return answer
#model, tokenizer = define_model()
prompt= st.text_area('Your prompt here',
'''Hello, I'm am conscious and''')
#answer = opt_model(prompt, model, tokenizer,)
#lst = ['ciao come stai sjfsbd dfhsdf fuahfuf feuhfu wefwu ']
answer = define_model(prompt)
lst = answer[0]['generated_text']
t = st.empty()
for i in range(len(lst)):
t.markdown("### %s..." % lst[0:i])
time.sleep(0.04) |