Spaces:
Sleeping
Sleeping
File size: 1,882 Bytes
99c5f63 cf31e9b 99c5f63 cf31e9b ee17a02 6deae30 be3d74a cf31e9b cac961a 80f54d7 cac961a 090b788 cac961a e8a54d9 2e52e56 e8a54d9 80f54d7 e8a54d9 cac961a e8a54d9 6d7de7f e8a54d9 74fd993 e8a54d9 be3d74a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import torch
import streamlit as st
from transformers import AutoTokenizer, T5Tokenizer, T5ForConditionalGeneration, GenerationConfig, AutoModelForCausalLM
st.title('How do LLM choose their words?')
col1, col2 = st.columns(2)
with col1:
model_checkpoint = st.selectbox(
"Model:",
("google/flan-t5-base", "google/flan-t5-xl")
)
with col2:
temperature = st.number_input('Temperature: ', min_value=0.0, max_value=1.0, value=0.5, format='%f')
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
model = T5ForConditionalGeneration.from_pretrained(
model_checkpoint,
load_in_8bit=False,
device_map="auto"
)
instruction = st.text_area('Write an instruction:')
max_tokens = st.number_input('Max output length: ', min_value=1, max_value=64, format='%i')
prompts = [
f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
### Instruction: {instruction}
### Response:"""
]
inputs = tokenizer(
prompts[0],
return_tensors="pt",
)
input_ids = inputs["input_ids"]#.to("cuda")
generation_config = GenerationConfig(
do_sample=True,
temperature=temperature,
top_p=0.995, # default 0.75
top_k=100, # default 80
repetition_penalty=1.5,
max_new_tokens=max_tokens,
)
if instruction:
with torch.no_grad():
outputs = model.generate(
input_ids=input_ids,
attention_mask=torch.ones_like(input_ids),
generation_config=generation_config,
return_dict_in_generate=True,
output_scores=True
)
output_text = tokenizer.decode(
outputs['sequences'][0],#.cuda(),
skip_special_tokens=False
).strip()
st.write(output_text)
st.write(model.compute_transition_scores(outputs.sequences, outputs.scores, normalize_logits=False)) |