Spaces:
Runtime error
Runtime error
Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import streamlit as st
|
3 |
+
from transformers import RobertaTokenizer, RobertaForSequenceClassification
|
4 |
+
import re
|
5 |
+
import string
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
def tokenize_sentences(sentence):
|
10 |
+
encoded_dict = tokenizer.encode_plus(
|
11 |
+
sentence,
|
12 |
+
add_special_tokens=True,
|
13 |
+
max_length=128,
|
14 |
+
padding='max_length',
|
15 |
+
truncation=True,
|
16 |
+
return_attention_mask=True,
|
17 |
+
return_tensors='pt'
|
18 |
+
)
|
19 |
+
return torch.cat([encoded_dict['input_ids']], dim=0), torch.cat([encoded_dict['attention_mask']], dim=0)
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
def preprocess_query(query):
|
24 |
+
query = str(query).lower()
|
25 |
+
query = query.strip()
|
26 |
+
query=query.translate(str.maketrans("", "", string.punctuation))
|
27 |
+
return query
|
28 |
+
|
29 |
+
def predict_aspects(sentence, threshold):
|
30 |
+
input_ids, attention_mask = tokenize_sentences(sentence)
|
31 |
+
with torch.no_grad():
|
32 |
+
outputs = aspects_model(input_ids, attention_mask=attention_mask)
|
33 |
+
logits = outputs.logits
|
34 |
+
predicted_aspects = torch.sigmoid(logits).squeeze().tolist()
|
35 |
+
results = dict()
|
36 |
+
for label, prediction in zip(LABEL_COLUMNS_ASPECTS, predicted_aspects):
|
37 |
+
if prediction < threshold:
|
38 |
+
continue
|
39 |
+
precentage = round(float(prediction) * 100, 2)
|
40 |
+
results[label] = precentage
|
41 |
+
return results
|
42 |
+
|
43 |
+
# Load tokenizer and model
|
44 |
+
BERT_MODEL_NAME_FOR_ASPECTS_CLASSIFICATION = 'roberta-large'
|
45 |
+
tokenizer = RobertaTokenizer.from_pretrained(BERT_MODEL_NAME_FOR_ASPECTS_CLASSIFICATION, do_lower_case=True)
|
46 |
+
|
47 |
+
LABEL_COLUMNS_ASPECTS = ['FOOD-CUISINE', 'FOOD-DEALS', 'FOOD-DIET_OPTION', 'FOOD-EXPERIENCE', 'FOOD-FLAVOR', 'FOOD-GENERAL', 'FOOD-INGREDIENT', 'FOOD-KITCHEN', 'FOOD-MEAL', 'FOOD-MENU', 'FOOD-PORTION', 'FOOD-PRESENTATION', 'FOOD-PRICE', 'FOOD-QUALITY', 'FOOD-RECOMMENDATION', 'FOOD-TASTE', 'GENERAL-GENERAL', 'RESTAURANT-ATMOSPHERE', 'RESTAURANT-BUILDING', 'RESTAURANT-DECORATION', 'RESTAURANT-EXPERIENCE', 'RESTAURANT-FEATURES', 'RESTAURANT-GENERAL', 'RESTAURANT-HYGIENE', 'RESTAURANT-KITCHEN', 'RESTAURANT-LOCATION', 'RESTAURANT-OPTIONS', 'RESTAURANT-RECOMMENDATION', 'RESTAURANT-SEATING_PLAN', 'RESTAURANT-VIEW', 'SERVICE-BEHAVIOUR', 'SERVICE-EXPERIENCE', 'SERVICE-GENERAL', 'SERVICE-WAIT_TIME']
|
48 |
+
|
49 |
+
aspects_model = RobertaForSequenceClassification.from_pretrained(BERT_MODEL_NAME_FOR_ASPECTS_CLASSIFICATION, num_labels=len(LABEL_COLUMNS_ASPECTS))
|
50 |
+
aspects_model.load_state_dict(torch.load('./Aspects_Extraction_Model_updated.pth', map_location=torch.device('cpu')), strict=False)
|
51 |
+
aspects_model.eval()
|
52 |
+
|
53 |
+
# Streamlit App
|
54 |
+
st.title("Implicit and Explicit Aspect Extraction")
|
55 |
+
|
56 |
+
sentence = st.text_input("Enter a sentence:")
|
57 |
+
threshold = st.slider("Threshold", min_value=0.0, max_value=1.0, step=0.01, value=0.5)
|
58 |
+
|
59 |
+
if sentence:
|
60 |
+
processed_sentence = preprocess_query(sentence)
|
61 |
+
results = predict_aspects(processed_sentence, threshold)
|
62 |
+
if len(results) > 0:
|
63 |
+
st.write("Predicted Aspects:")
|
64 |
+
table_data = [["Category","Aspect", "Probability"]]
|
65 |
+
for aspect, percentage in results.items():
|
66 |
+
aspect_parts = aspect.split("-")
|
67 |
+
table_data.append(aspect_parts + [f"{percentage}%"])
|
68 |
+
st.table(table_data)
|
69 |
+
else:
|
70 |
+
st.write("No aspects above the threshold.")
|
71 |
+
|