Spaces:
Runtime error
Runtime error
Commit
·
b74a24a
1
Parent(s):
51ec97a
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,3 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
|
3 |
"""
|
4 |
@Author : Jiangjie Chen
|
5 |
@Time : 2021/12/13 17:17
|
@@ -41,5 +39,106 @@ config['fc_dir'] = os.path.join(model_dir, 'fact_checking/roberta-large/')
|
|
41 |
config['mrc_dir'] = os.path.join(model_dir, 'mrc_seq2seq/bart-base/')
|
42 |
config['er_dir'] = os.path.join(model_dir, 'evidence_retrieval/')
|
43 |
|
|
|
44 |
from src.loren import Loren
|
45 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
"""
|
2 |
@Author : Jiangjie Chen
|
3 |
@Time : 2021/12/13 17:17
|
|
|
39 |
config['mrc_dir'] = os.path.join(model_dir, 'mrc_seq2seq/bart-base/')
|
40 |
config['er_dir'] = os.path.join(model_dir, 'evidence_retrieval/')
|
41 |
|
42 |
+
|
43 |
from src.loren import Loren
|
44 |
|
45 |
+
|
46 |
+
loren = Loren(config, verbose=False)
|
47 |
+
try:
|
48 |
+
js = loren.check('Donald Trump won the 2020 U.S. presidential election.')
|
49 |
+
except Exception as e:
|
50 |
+
raise ValueError(e)
|
51 |
+
|
52 |
+
|
53 |
+
def highlight_phrase(text, phrase):
|
54 |
+
text = loren.fc_client.tokenizer.clean_up_tokenization(text)
|
55 |
+
return text.replace('<mask>', f'<i><b>{phrase}</b></i>')
|
56 |
+
|
57 |
+
|
58 |
+
def highlight_entity(text, entity):
|
59 |
+
return text.replace(entity, f'<i><b>{entity}</b></i>')
|
60 |
+
|
61 |
+
|
62 |
+
def gradio_formatter(js, output_type):
|
63 |
+
zebra_css = '''
|
64 |
+
tr:nth-child(even) {
|
65 |
+
background: #f1f1f1;
|
66 |
+
}
|
67 |
+
thead{
|
68 |
+
background: #f1f1f1;
|
69 |
+
}'''
|
70 |
+
if output_type == 'e':
|
71 |
+
data = {'Evidence': [highlight_entity(x, e) for x, e in zip(js['evidence'], js['entities'])]}
|
72 |
+
elif output_type == 'z':
|
73 |
+
p_sup, p_ref, p_nei = [], [], []
|
74 |
+
for x in js['phrase_veracity']:
|
75 |
+
max_idx = torch.argmax(torch.tensor(x)).tolist()
|
76 |
+
x = ['%.4f' % xx for xx in x]
|
77 |
+
x[max_idx] = f'<i><b>{x[max_idx]}</b></i>'
|
78 |
+
p_sup.append(x[2])
|
79 |
+
p_ref.append(x[0])
|
80 |
+
p_nei.append(x[1])
|
81 |
+
|
82 |
+
data = {
|
83 |
+
'Claim Phrase': js['claim_phrases'],
|
84 |
+
'Local Premise': [highlight_phrase(q, x[0]) for q, x in zip(js['cloze_qs'], js['evidential'])],
|
85 |
+
'p_SUP': p_sup,
|
86 |
+
'p_REF': p_ref,
|
87 |
+
'p_NEI': p_nei,
|
88 |
+
}
|
89 |
+
else:
|
90 |
+
raise NotImplementedError
|
91 |
+
data = pd.DataFrame(data)
|
92 |
+
pt = PrettyTable(field_names=list(data.columns),
|
93 |
+
align='l', border=True, hrules=1, vrules=1)
|
94 |
+
for v in data.values:
|
95 |
+
pt.add_row(v)
|
96 |
+
html = pt.get_html_string(attributes={
|
97 |
+
'style': 'border-width: 2px; bordercolor: black'
|
98 |
+
}, format=True)
|
99 |
+
html = f'<head> <style type="text/css"> {zebra_css} </style> </head>\n' + html
|
100 |
+
html = html.replace('<', '<').replace('>', '>')
|
101 |
+
return html
|
102 |
+
|
103 |
+
|
104 |
+
def run(claim):
|
105 |
+
try:
|
106 |
+
js = loren.check(claim)
|
107 |
+
except Exception as error_msg:
|
108 |
+
exc = traceback.format_exc()
|
109 |
+
msg = f'[Error]: {error_msg}.\n[Traceback]: {exc}'
|
110 |
+
loren.logger.error(claim)
|
111 |
+
loren.logger.error(msg)
|
112 |
+
return 'Oops, something went wrong.', '', ''
|
113 |
+
label = js['claim_veracity']
|
114 |
+
loren.logger.warning(label + str(js))
|
115 |
+
ev_html = gradio_formatter(js, 'e')
|
116 |
+
z_html = gradio_formatter(js, 'z')
|
117 |
+
return label, z_html, ev_html
|
118 |
+
|
119 |
+
|
120 |
+
iface = gr.Interface(
|
121 |
+
fn=run,
|
122 |
+
inputs="text",
|
123 |
+
outputs=[
|
124 |
+
'text',
|
125 |
+
'html',
|
126 |
+
'html',
|
127 |
+
],
|
128 |
+
examples=['Donald Trump won the U.S. 2020 presidential election.',
|
129 |
+
'The first inauguration of Bill Clinton was in the United States.',
|
130 |
+
'The Cry of the Owl is based on a book by an American.',
|
131 |
+
'Smriti Mandhana is an Indian woman.'],
|
132 |
+
title="LOREN",
|
133 |
+
layout='horizontal',
|
134 |
+
description="LOREN is an interpretable Fact Verification model using Wikipedia as its knowledge source. "
|
135 |
+
"This is a demo system for the AAAI 2022 paper: \"LOREN: Logic-Regularized Reasoning for Interpretable Fact Verification\"(https://arxiv.org/abs/2012.13577). "
|
136 |
+
"See the paper for more details. You can add a *FLAG* on the bottom to record interesting or bad cases! "
|
137 |
+
"(Note that the demo system directly retrieves evidence from an up-to-date Wikipedia, which is different from the evidence used in the paper.)",
|
138 |
+
flagging_dir='results/flagged/',
|
139 |
+
allow_flagging=True,
|
140 |
+
flagging_options=['Interesting!', 'Error: Claim Phrase Parsing', 'Error: Local Premise',
|
141 |
+
'Error: Require Commonsense', 'Error: Evidence Retrieval'],
|
142 |
+
enable_queue=True
|
143 |
+
)
|
144 |
+
iface.launch()
|