Spaces:
Runtime error
Runtime error
Switch PAWS by XNLI
Browse files
app.py
CHANGED
|
@@ -34,20 +34,11 @@ PROMPT_LIST = [
|
|
| 34 |
|
| 35 |
PAWS_X_PROMPT_LIST = [
|
| 36 |
"Te amo.</s>Te adoro.",
|
| 37 |
-
"Te
|
| 38 |
-
"
|
| 39 |
]
|
| 40 |
|
| 41 |
|
| 42 |
-
@st.cache(show_spinner=False, persist=True)
|
| 43 |
-
def load_model(masked_text, model_url):
|
| 44 |
-
model = AutoModelForMaskedLM.from_pretrained(model_url)
|
| 45 |
-
tokenizer = AutoTokenizer.from_pretrained(model_url)
|
| 46 |
-
nlp = pipeline("fill-mask", model=model, tokenizer=tokenizer)
|
| 47 |
-
result = nlp(masked_text)
|
| 48 |
-
return result
|
| 49 |
-
|
| 50 |
-
|
| 51 |
@st.cache(show_spinner=False, persist=True)
|
| 52 |
def load_model(masked_text, model_url):
|
| 53 |
model = AutoModelForMaskedLM.from_pretrained(model_url)
|
|
@@ -64,8 +55,10 @@ def load_model_pair_classification(text, model_url_pair_classification):
|
|
| 64 |
nlp = pipeline("text-classification", model=model, tokenizer=tokenizer)
|
| 65 |
result = nlp(f"{text}</s>")
|
| 66 |
if result[0]["label"] == "LABEL_0":
|
| 67 |
-
return f"
|
| 68 |
-
|
|
|
|
|
|
|
| 69 |
|
| 70 |
|
| 71 |
# Page
|
|
@@ -141,11 +134,11 @@ if st.button("Fill the mask"):
|
|
| 141 |
st.markdown(
|
| 142 |
"""
|
| 143 |
### Fine-tuning to PAWS-X for paraphrase identification
|
| 144 |
-
Here you can play with the RoBERTa Base Gaussian Seq Len 512 model fine-tuned to
|
| 145 |
"""
|
| 146 |
)
|
| 147 |
|
| 148 |
-
pawsx_model_url = "bertin-project/bertin-base-
|
| 149 |
paraphrase_prompt = st.selectbox("Paraphrase Prompt", ["Random", "Custom"])
|
| 150 |
if paraphrase_prompt == "Custom":
|
| 151 |
paraphrase_prompt_box = "Enter two sentences separated by </s> here..."
|
|
|
|
| 34 |
|
| 35 |
PAWS_X_PROMPT_LIST = [
|
| 36 |
"Te amo.</s>Te adoro.",
|
| 37 |
+
"Te amo.</s>Te detesto.",
|
| 38 |
+
"Te amo.</s>Voy a caminar al campo."
|
| 39 |
]
|
| 40 |
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
@st.cache(show_spinner=False, persist=True)
|
| 43 |
def load_model(masked_text, model_url):
|
| 44 |
model = AutoModelForMaskedLM.from_pretrained(model_url)
|
|
|
|
| 55 |
nlp = pipeline("text-classification", model=model, tokenizer=tokenizer)
|
| 56 |
result = nlp(f"{text}</s>")
|
| 57 |
if result[0]["label"] == "LABEL_0":
|
| 58 |
+
return f"Entailment: {result[0]['score']:02f}"
|
| 59 |
+
if result[0]["label"] == "LABEL_1":
|
| 60 |
+
return f"Neutral: {result[0]['score']:02f}"
|
| 61 |
+
return f"Contradiction: {result[0]['score']:02f}"
|
| 62 |
|
| 63 |
|
| 64 |
# Page
|
|
|
|
| 134 |
st.markdown(
|
| 135 |
"""
|
| 136 |
### Fine-tuning to PAWS-X for paraphrase identification
|
| 137 |
+
Here you can play with the RoBERTa Base Gaussian Seq Len 512 model fine-tuned to XNLI.
|
| 138 |
"""
|
| 139 |
)
|
| 140 |
|
| 141 |
+
pawsx_model_url = "bertin-project/bertin-base-xnli-es"
|
| 142 |
paraphrase_prompt = st.selectbox("Paraphrase Prompt", ["Random", "Custom"])
|
| 143 |
if paraphrase_prompt == "Custom":
|
| 144 |
paraphrase_prompt_box = "Enter two sentences separated by </s> here..."
|