modifiquei a estrutura
Browse files- app.py +4 -2
- requirements.txt +3 -2
app.py
CHANGED
@@ -1,12 +1,14 @@
|
|
1 |
import streamlit as st
|
2 |
-
from transformers import pipeline
|
3 |
|
4 |
# Substitua 'seu-nome-de-usuário/nome-do-modelo' pelo caminho correto do seu modelo no Hugging Face Hub
|
5 |
MODEL_NAME = "exo-is/t5-small-60M-esg-keyword"
|
6 |
|
7 |
@st.cache_resource
|
8 |
def load_model():
|
9 |
-
|
|
|
|
|
10 |
|
11 |
st.title('Gerador de Texto')
|
12 |
|
|
|
1 |
import streamlit as st
|
2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
3 |
|
4 |
# Substitua 'seu-nome-de-usuário/nome-do-modelo' pelo caminho correto do seu modelo no Hugging Face Hub
|
5 |
MODEL_NAME = "exo-is/t5-small-60M-esg-keyword"
|
6 |
|
7 |
@st.cache_resource
|
8 |
def load_model():
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, use_fast=False)
|
10 |
+
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
|
11 |
+
return pipeline('text-generation', model=model, tokenizer=tokenizer)
|
12 |
|
13 |
st.title('Gerador de Texto')
|
14 |
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
streamlit==1.25.0
|
2 |
-
transformers==4.
|
3 |
-
torch==2.0.1
|
|
|
|
1 |
streamlit==1.25.0
|
2 |
+
transformers==4.33.2
|
3 |
+
torch==2.0.1
|
4 |
+
tokenizers==0.13.3
|