Spaces:
Running
Running
File size: 1,397 Bytes
1ab4362 3bbff8c b2820ed 3bbff8c b2820ed 3bbff8c b2820ed 401411c 3bbff8c f1f799c 401411c 3bbff8c e07cb10 3c02d56 11e71f5 3659c6a b797cc6 e07cb10 c9c4e90 b797cc6 5e8c0ad 4866071 b797cc6 005eb92 b797cc6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import gradio as gr
from transformers import AutoModel, pipeline, AutoTokenizer, AutoModelForSequenceClassification
access_token = "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"
model = AutoModelForSequenceClassification.from_pretrained("EkhiAzur/RoBERTA_3", token=access_token)
tokenizer = AutoTokenizer.from_pretrained(
"EkhiAzur/RoBERTA_3",
token = access_token,
use_fast=True,
add_prefix_space=True,
)
classifier = pipeline("text-classification", tokenizer=tokenizer, model=model, max_length=512,
padding=True, truncation=True, batch_size=1)
def prozesatu(Testua, request: gr.Request):
print(request.headers["Accept-Language"])
return request.headers["Accept-Language"]
prediction = prozesatu.classifier(Testua)[0]
if prediction["label"]=="GAI":
return {"Gai":prediction["score"], "Ez gai": 1-prediction["score"]}
else:
return {"Gai":1-prediction["score"], "Ez gai": prediction["score"]}
#return 'C1:{}. Probabilitatea:{:.2f}'.format(prediction["label"], round(prediction["score"], 2))
prozesatu.classifier = classifier
demo = gr.Interface(
fn=prozesatu,
inputs=gr.Textbox(label="Testua", placeholder="Idatzi hemen testua..."),
outputs="label",
interpretation="default",
examples=[["Gaur egungo teknologiak bikainak dira..."]]).launch()
#gr.Interface(fn=prozesatu, inputs="text", outputs="text").launch() |