Spaces:
Build error
Build error
jiangjiechen
commited on
Commit
•
8c0f1a8
1
Parent(s):
a9a78de
update layout
Browse files
app.py
CHANGED
@@ -20,7 +20,7 @@ config = {
|
|
20 |
"logic_lambda": 0.5,
|
21 |
"prior": "random",
|
22 |
"mask_rate": 0.0,
|
23 |
-
"cand_k":
|
24 |
"max_seq1_length": 256,
|
25 |
"max_seq2_length": 128,
|
26 |
"max_num_questions": 8,
|
@@ -35,9 +35,6 @@ os.system('rm -r LOREN/results/')
|
|
35 |
os.system('rm -r LOREN/models/')
|
36 |
os.system('mv LOREN/* ./')
|
37 |
|
38 |
-
# os.makedirs('data/', exist_ok=True)
|
39 |
-
# os.system('wget -O data/fever.db https://s3-eu-west-1.amazonaws.com/fever.public/wiki_index/fever.db')
|
40 |
-
|
41 |
model_dir = snapshot_download('Jiangjie/loren')
|
42 |
config['fc_dir'] = os.path.join(model_dir, 'fact_checking/roberta-large/')
|
43 |
config['mrc_dir'] = os.path.join(model_dir, 'mrc_seq2seq/bart-base/')
|
@@ -49,15 +46,6 @@ from src.loren import Loren
|
|
49 |
|
50 |
loren = Loren(config)
|
51 |
try:
|
52 |
-
# js = {
|
53 |
-
# 'id': 0,
|
54 |
-
# 'evidence': ['EVIDENCE1', 'EVIDENCE2'],
|
55 |
-
# 'question': ['QUESTION1', 'QUESTION2'],
|
56 |
-
# 'claim_phrase': ['CLAIMPHRASE1', 'CLAIMPHRASE2'],
|
57 |
-
# 'local_premise': [['E1 ' * 100, 'E1' * 100, 'E1' * 10], ['E2', 'E2', 'E2']],
|
58 |
-
# 'phrase_veracity': [[0.1, 0.5, 0.4], [0.1, 0.7, 0.2]],
|
59 |
-
# 'claim_veracity': 'SUPPORT'
|
60 |
-
# }
|
61 |
js = loren.check('Donald Trump won the 2020 U.S. presidential election.')
|
62 |
except Exception as e:
|
63 |
raise ValueError(e)
|
@@ -65,31 +53,38 @@ except Exception as e:
|
|
65 |
|
66 |
def gradio_formatter(js, output_type):
|
67 |
if output_type == 'e':
|
68 |
-
data = {'Evidence': js['evidence']}
|
69 |
elif output_type == 'z':
|
70 |
data = {
|
71 |
-
'Claim Phrase': js['claim_phrases'],
|
72 |
-
'Local Premise': js['local_premises'],
|
73 |
-
'p_SUP': [
|
74 |
-
'p_REF': [
|
75 |
-
'p_NEI': [
|
76 |
}
|
77 |
else:
|
78 |
raise NotImplementedError
|
79 |
data = pd.DataFrame(data)
|
80 |
-
pt = PrettyTable(field_names=list(data.columns)
|
|
|
81 |
for v in data.values:
|
82 |
pt.add_row(v)
|
83 |
-
|
84 |
html = pt.get_html_string(attributes={
|
85 |
-
'style': 'border-width:
|
86 |
-
'align': 'left',
|
87 |
-
'border': '1'
|
88 |
}, format=True)
|
89 |
return html
|
90 |
|
91 |
|
92 |
def run(claim):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
js = loren.check(claim)
|
94 |
ev_html = gradio_formatter(js, 'e')
|
95 |
z_html = gradio_formatter(js, 'z')
|
@@ -110,7 +105,7 @@ iface = gr.Interface(
|
|
110 |
'The Cry of the Owl is based on a book by an American.',
|
111 |
'Smriti Mandhana is an Indian woman.'],
|
112 |
title="LOREN",
|
113 |
-
layout='
|
114 |
description="LOREN is an interpretable Fact Verification model against Wikipedia. "
|
115 |
"This is a demo system for \"LOREN: Logic-Regularized Reasoning for Interpretable Fact Verification\". "
|
116 |
"See the paper for technical details. You can add FLAG on the bottom to record interesting or bad cases! \n"
|
|
|
20 |
"logic_lambda": 0.5,
|
21 |
"prior": "random",
|
22 |
"mask_rate": 0.0,
|
23 |
+
"cand_k": 1,
|
24 |
"max_seq1_length": 256,
|
25 |
"max_seq2_length": 128,
|
26 |
"max_num_questions": 8,
|
|
|
35 |
os.system('rm -r LOREN/models/')
|
36 |
os.system('mv LOREN/* ./')
|
37 |
|
|
|
|
|
|
|
38 |
model_dir = snapshot_download('Jiangjie/loren')
|
39 |
config['fc_dir'] = os.path.join(model_dir, 'fact_checking/roberta-large/')
|
40 |
config['mrc_dir'] = os.path.join(model_dir, 'mrc_seq2seq/bart-base/')
|
|
|
46 |
|
47 |
loren = Loren(config)
|
48 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
js = loren.check('Donald Trump won the 2020 U.S. presidential election.')
|
50 |
except Exception as e:
|
51 |
raise ValueError(e)
|
|
|
53 |
|
54 |
def gradio_formatter(js, output_type):
|
55 |
if output_type == 'e':
|
56 |
+
data = {'Evidence': [f'* {x}' for x in js['evidence']]}
|
57 |
elif output_type == 'z':
|
58 |
data = {
|
59 |
+
'Claim Phrase': [f'* {x}' for x in js['claim_phrases']],
|
60 |
+
'Local Premise': [f'* {x}' for x in js['local_premises']],
|
61 |
+
'p_SUP': ['%.4f' % x[2] for x in js['phrase_veracity']],
|
62 |
+
'p_REF': ['%.4f' % x[0] for x in js['phrase_veracity']],
|
63 |
+
'p_NEI': ['%.4f' % x[1] for x in js['phrase_veracity']],
|
64 |
}
|
65 |
else:
|
66 |
raise NotImplementedError
|
67 |
data = pd.DataFrame(data)
|
68 |
+
pt = PrettyTable(field_names=list(data.columns),
|
69 |
+
align='l', border=True, hrules=1, vrules=1)
|
70 |
for v in data.values:
|
71 |
pt.add_row(v)
|
|
|
72 |
html = pt.get_html_string(attributes={
|
73 |
+
'style': 'border-width: 2px; bordercolor: black'
|
|
|
|
|
74 |
}, format=True)
|
75 |
return html
|
76 |
|
77 |
|
78 |
def run(claim):
|
79 |
+
# js = {
|
80 |
+
# 'id': 0,
|
81 |
+
# 'evidence': ['EVIDENCE1', 'EVIDENCE2'],
|
82 |
+
# 'question': ['QUESTION1', 'QUESTION2'],
|
83 |
+
# 'claim_phrases': ['CLAIMPHRASE1', 'CLAIMPHRASE2'],
|
84 |
+
# 'local_premises': [['E1 ' * 100, 'E1 ' * 100, 'E1 ' * 10], ['E2', 'E2', 'E2']],
|
85 |
+
# 'phrase_veracity': [[0.1, 0.5, 0.4], [0.1, 0.7, 0.2]],
|
86 |
+
# 'claim_veracity': 'SUPPORT'
|
87 |
+
# }
|
88 |
js = loren.check(claim)
|
89 |
ev_html = gradio_formatter(js, 'e')
|
90 |
z_html = gradio_formatter(js, 'z')
|
|
|
105 |
'The Cry of the Owl is based on a book by an American.',
|
106 |
'Smriti Mandhana is an Indian woman.'],
|
107 |
title="LOREN",
|
108 |
+
layout='vertical',
|
109 |
description="LOREN is an interpretable Fact Verification model against Wikipedia. "
|
110 |
"This is a demo system for \"LOREN: Logic-Regularized Reasoning for Interpretable Fact Verification\". "
|
111 |
"See the paper for technical details. You can add FLAG on the bottom to record interesting or bad cases! \n"
|