|
{ |
|
"paper_id": "2020", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T11:01:52.727870Z" |
|
}, |
|
"title": "Predicting Clinical Trial Results by Implicit Evidence Integration", |
|
"authors": [ |
|
{ |
|
"first": "Qiao", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tsinghua University", |
|
"location": {} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Chuanqi", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Alibaba Group", |
|
"institution": "", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Mosha", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Alibaba Group", |
|
"institution": "", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Xiaozhong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Indiana University", |
|
"location": { |
|
"settlement": "Bloomington" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Songfang", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "Alibaba Group", |
|
"institution": "", |
|
"location": {} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Clinical trials provide essential guidance for practicing Evidence-Based Medicine, though often accompanying with unendurable costs and risks. To optimize the design of clinical trials, we introduce a novel Clinical Trial Result Prediction (CTRP) task. In the CTRP framework, a model takes a PICO-formatted clinical trial proposal with its background as input and predicts the result, i.e. how the Intervention group compares with the Comparison group in terms of the measured Outcome in the studied Population. While structured clinical evidence is prohibitively expensive for manual collection, we exploit large-scale unstructured sentences from medical literature that implicitly contain PICOs and results as evidence. Specifically, we pre-train a model to predict the disentangled results from such implicit evidence and fine-tune the model with limited data on the downstream datasets. Experiments on the benchmark Evidence Integration dataset show that the proposed model outperforms the baselines by large margins, e.g., with a 10.7% relative gain over BioBERT in macro-F1. Moreover, the performance improvement is also validated on another dataset composed of clinical trials related to COVID-19.", |
|
"pdf_parse": { |
|
"paper_id": "2020", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Clinical trials provide essential guidance for practicing Evidence-Based Medicine, though often accompanying with unendurable costs and risks. To optimize the design of clinical trials, we introduce a novel Clinical Trial Result Prediction (CTRP) task. In the CTRP framework, a model takes a PICO-formatted clinical trial proposal with its background as input and predicts the result, i.e. how the Intervention group compares with the Comparison group in terms of the measured Outcome in the studied Population. While structured clinical evidence is prohibitively expensive for manual collection, we exploit large-scale unstructured sentences from medical literature that implicitly contain PICOs and results as evidence. Specifically, we pre-train a model to predict the disentangled results from such implicit evidence and fine-tune the model with limited data on the downstream datasets. Experiments on the benchmark Evidence Integration dataset show that the proposed model outperforms the baselines by large margins, e.g., with a 10.7% relative gain over BioBERT in macro-F1. Moreover, the performance improvement is also validated on another dataset composed of clinical trials related to COVID-19.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Shall COVID-19 patients be treated with hydroxychloroquine? In the era of Evidence-Based Medicine (EBM, Sackett 1997) , medical practice should be guided by well-designed and wellconducted clinical research, such as randomized controlled trials. However, conducting clinical trials is expensive and time-consuming. Furthermore, inappropriately designed studies can be devastating in a pandemic: a high-profile Remdesivir clinical trial fails to achieve statistically significant conclusions (Wang et al., 2020b) , partially because it does not attain the predetermined sample size when \"competing with\" other inappropriately designed trials that are unlikely to succeed or not so urgent to test (e.g.: physical exercises and dietary treatments). Therefore, it is crucial to carefully design and evaluate clinical trials before conducting them.", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 117, |
|
"text": "(EBM, Sackett 1997)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 491, |
|
"end": 511, |
|
"text": "(Wang et al., 2020b)", |
|
"ref_id": "BIBREF32" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Proposing new clinical trials requires support from previous evidence in medical literature or practice. For example, the World Health Organization (WHO) has launched a global megatrial, Solidarity (WHO, 2020), to prioritize clinical resources by recommending only four most promising therapies 1 . The rationale for this suggestion comes from the integration of evidence that they might be effective against coronaviruses or other related organisms in laboratory or clinical studies (Peymani et al., 2016; Sheahan et al., 2017; Morra et al., 2018) . However, manual integration of evidence is far from satisfying, as one study reports that about 86.2% of clinical trials fail (Wong et al., 2019) and even some of the Solidarity therapies do not get expected results (Mehra et al., 2020) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 484, |
|
"end": 506, |
|
"text": "(Peymani et al., 2016;", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 507, |
|
"end": 528, |
|
"text": "Sheahan et al., 2017;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 529, |
|
"end": 548, |
|
"text": "Morra et al., 2018)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 677, |
|
"end": 696, |
|
"text": "(Wong et al., 2019)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 767, |
|
"end": 787, |
|
"text": "(Mehra et al., 2020)", |
|
"ref_id": "BIBREF18" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To assist clinical trial designing, we introduce a novel task: Clinical Trial Result Prediction (CTRP), i.e. predicting the results of clinical trials without actually doing them ( \u00a73). Figure 1 shows the architecture of the CTRP task. We define the input to be a clinical trial proposal 2 , which contains free-texts of a Population (e.g.: \"COVID-19 patients with severe symptoms\"), an Intervention (e.g.: \"Active remdesivir (i.v.)\"), a Comparator (e.g.: \"Placebos matched remdesivir\") and an Outcome (e.g.:\"Time to clinical improvement\"), i.e. a PICO-formatted query (Huang et al., 2006) , and the background of the proposed trial. The output is the trial Result, denoting how (higher, lower, or no difference) I compares to C in terms of O for P. One particular challenge of this task is that evidence is entangled with other free-texts in the literature. Prior works have explored explicit methods for evidence integration through a pipeline of retrieval, extraction and inference on structured {P,I,C,O,R} evidence (Wallace et al., 2016; Singh et al., 2017; Jin and Szolovits, 2018; Lee and Sun, 2018; Nye et al., 2018; Lehman et al., 2019; DeYoung et al., 2020; Zhang et al., 2020) . However, they are limited in scale since getting domain-specific supervision for all clinical evidence is prohibitively expensive.", |
|
"cite_spans": [ |
|
{ |
|
"start": 569, |
|
"end": 589, |
|
"text": "(Huang et al., 2006)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 1020, |
|
"end": 1042, |
|
"text": "(Wallace et al., 2016;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 1043, |
|
"end": 1062, |
|
"text": "Singh et al., 2017;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 1063, |
|
"end": 1087, |
|
"text": "Jin and Szolovits, 2018;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 1088, |
|
"end": 1106, |
|
"text": "Lee and Sun, 2018;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 1107, |
|
"end": 1124, |
|
"text": "Nye et al., 2018;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 1125, |
|
"end": 1145, |
|
"text": "Lehman et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 1146, |
|
"end": 1167, |
|
"text": "DeYoung et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1168, |
|
"end": 1187, |
|
"text": "Zhang et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 186, |
|
"end": 194, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this work, we propose to implicitly learn from such evidence by pre-training, instead of relying on explicit evidence with purely supervised learning. There are more than 30 million articles in PubMed 3 , which stores almost all available medical evidence and thus is an ideal source for learning. We collect 12 million sentences from PubMed abstracts and PubMed Central 4 (PMC) articles with comparative semantics, which is commonly used to express clinical evidence ( \u00a74.1). P, I, C, O, and R are entangled with other free-texts in such sentences, which we denote as implicit evidence. Unlike previous efforts that seek to disentangle all of PICO and R, we only disentangle R out of the implicit evidence using simple heuristics ( \u00a74.2). For better learning the ordering function of I/C conditioned on P and O, we also use adversarial examples generated by reversing both the entangled PICO and the R in the pre-training ( \u00a74.3). Then, we pre-train a transformer encoder (Vaswani et al., 2017) to predict the disentangled R from the implicit evidence, which still contains PICO ( \u00a75.1). The model is named EBM-Net to reflect its utility for Evidence-Based 3 https://pubmed.ncbi.nlm.nih.gov/ 4 https://www.ncbi.nlm.nih.gov/pmc/ Medicine. Finally, we fine-tune the pre-trained EBM-Net on downstream datasets of the CTRP task ( \u00a75.2), which are typically small in scale ( \u00a76).", |
|
"cite_spans": [ |
|
{ |
|
"start": 976, |
|
"end": 998, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To evaluate model performance, we introduce a benchmark dataset, Evidence Integration ( \u00a76.1), by re-purposing the evidence inference dataset (Lehman et al., 2019; DeYoung et al., 2020) . Experiments show that our pre-trained EBM-Net outperforms the baselines ( \u00a76.2) by large margins ( \u00a76.3). Clustering analyses indicate that EBM-Net can effectively learn quantitative comparison results ( \u00a76.4). In addition, the EBM-Net model is further validated on a dataset composed of COVID-19 related clinical trials ( \u00a76.5).", |
|
"cite_spans": [ |
|
{ |
|
"start": 142, |
|
"end": 163, |
|
"text": "(Lehman et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 164, |
|
"end": 185, |
|
"text": "DeYoung et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contribution is two-fold. First, we propose a novel and meaningful task, CTRP, to predict clinical trial results before conducting them. Second, unlike previous efforts that depend on structured data to understand the totality of clinical evidence, we heuristically collect unstructured textual data, i.e. implicit evidence, and utilize large-scale pretraining to tackle the proposed CTRP task. The datasets and codes are publicly available at https: //github.com/Alibaba-NLP/EBM-Net.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Predicting Clinical Trial Results: Most relevant works typically use only specific types or sources of information for prediction (e.g.: chemical structures (Gayvert et al., 2016) , drug dosages or routes (Holford et al., 2000 (Holford et al., , 2010 ). Gayvert et al. (2016) predicts clinical trial results based on chemical properties of the candidate drugs. Clinical trial simulation (Holford et al., 2000 (Holford et al., , 2010 applies pharmacological models to predict the results of a specific intervention with different procedural factors, such as doses and sampling intervals. Some use closely related report information, e.g.: interim analyses (Broglio et al., 2014) or phase II data for just phase II trials (De Ridder, 2005) . Our task is (1) more generalizable, since all potential PICO elements can be represented by free-texts and thus modeled in our work; and (2) aimed at evaluating new clinical trial proposals.", |
|
"cite_spans": [ |
|
{ |
|
"start": 157, |
|
"end": 179, |
|
"text": "(Gayvert et al., 2016)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 205, |
|
"end": 226, |
|
"text": "(Holford et al., 2000", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 227, |
|
"end": 250, |
|
"text": "(Holford et al., , 2010", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 387, |
|
"end": 408, |
|
"text": "(Holford et al., 2000", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 409, |
|
"end": 432, |
|
"text": "(Holford et al., , 2010", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 655, |
|
"end": 677, |
|
"text": "(Broglio et al., 2014)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 720, |
|
"end": 737, |
|
"text": "(De Ridder, 2005)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Explicit Evidence Integration: It depends on the existence of structured evidence, i.e.: {P, I, C, O, R} (Wallace, 2019) . Consequently, collecting such explicit evidence is vital for further analyses, and is also the objective for most relevant works: Some seek to find relevant papers through retrieval (Lee and Sun, 2018) ; many works are aimed at extracting PICO elements from published literature (Wallace et al., 2016; Singh et al., 2017; Jin and Szolovits, 2018; Nye et al., 2018; Zhang et al., 2020) ; the evidence inference task extracts R for a given ICO query using the corresponding clinical trial report (Lehman et al., 2019; DeYoung et al., 2020) . However, since getting expert annotations is expensive, these works are typically limited in scale, with only thousands of labeled instances. Few works have been done to utilize the automatically collected structured data for analyses. In this paper, we adopt an end-to-end approach, where we use large-scale pre-training to implicitly learn from free-text clinical evidence.", |
|
"cite_spans": [ |
|
{ |
|
"start": 105, |
|
"end": 120, |
|
"text": "(Wallace, 2019)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 305, |
|
"end": 324, |
|
"text": "(Lee and Sun, 2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 424, |
|
"text": "(Wallace et al., 2016;", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 425, |
|
"end": 444, |
|
"text": "Singh et al., 2017;", |
|
"ref_id": "BIBREF26" |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 469, |
|
"text": "Jin and Szolovits, 2018;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 470, |
|
"end": 487, |
|
"text": "Nye et al., 2018;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 488, |
|
"end": 507, |
|
"text": "Zhang et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 617, |
|
"end": 638, |
|
"text": "(Lehman et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 639, |
|
"end": 660, |
|
"text": "DeYoung et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Works", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "The CTRP task is motivated to evaluate clinical trial proposals by predicting their results before actually conducting them, as discussed in \u00a71. Therefore, we formulate the task to take as input exactly the information required for proposing a new clinical trial: free-texts of a background description and a PICO query to be investigated. Formally, we denote the strings of the input background as B and PICO elements as P, I, C, and O, respectively. The task output is defined as one of the three possible comparison results: higher (\u2191), no difference (\u2192), or lower (\u2193) measurement O in intervention group I than in comparison group C for population P. We denote the result as R, and:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The CTRP Task", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "R(B, P, I, C, O) = \uf8f1 \uf8f4 \uf8f2 \uf8f4 \uf8f3 \u2191 O(I) > O(C) | P \u2193 O(I) < O(C) | P \u2192 O(I) \u223c O(C) | P", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The CTRP Task", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Main metrics include accuracy and 3-way macroaveraged F1. We also use 2-way (\u2191, \u2193) macroaveraged F1 to evaluate human expectations ( \u00a76.2).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The CTRP Task", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "In this section, we introduce the Implicit Evidence Integration, which is used to collect pre-training data for comparative language modeling ( \u00a75.1).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Evidence Integration", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Instead of collecting explicit evidence with structured {B, P, I, C, O \u2212 R} information, we utilize a simple observation to collect evidence implicitly: clinical evidence is naturally expressed by comparisons, e.g.: \"Blood oxygen is higher in the intervention group than in the placebo group\". Free-texts of P, I, C, O and R are entangled with other functional words that connect these elements in such comparative sentences, where R is a free-text version of the structured result R (e.g.: R = \"higher ... than\" translates into R = \u2191). We call these sentences entangled implicit evidence and denote them as E ent = {PICOR}. Then, we disentangle R out of the E ent by heuristics, getting R and the left E dis = {PICO}. We also include adversarial instances generated from the original ones. Several examples are shown in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 821, |
|
"end": 828, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Implicit Evidence Integration", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Details of implicit evidence collection, disentanglement, and adversarial data generation are introduced in \u00a74.1, \u00a74.2 and \u00a74.3, respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Implicit Evidence Integration", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "We collect implicit evidence from PubMed abstracts and PMC articles 5 , where most of the clinical evidence is published. PubMed contains more than 30 million abstracts, and PMC has over 6 million full-length articles. Each abstract is chunked into a background/method section and a result/conclusion section: For the unstructured abstracts, sentences before the first found implicit evidence are included in the background/method section. For the semi-structured abstracts where each section is labeled with a section name, the chunking is done by mapping the section name to either background/method or result/conclusion. Sentences in abstract result/conclusion sections and main texts that express comparative semantics (Kennedy, 2004) are collected as implicit evidence. They are identified by a pattern detection heuristic, similar to the keyword method described in Jindal and Liu (2006) : For expressions of superiority (\u2191) and inferiority (\u2193), we detect morpheme patterns of [more/less/-er .. \"Our results also showed that serum TSH levels were slightly higher in the chloroquine group than in the placebo group.\"", |
|
"cite_spans": [ |
|
{ |
|
"start": 723, |
|
"end": 738, |
|
"text": "(Kennedy, 2004)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 872, |
|
"end": 893, |
|
"text": "Jindal and Liu (2006)", |
|
"ref_id": "BIBREF10" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collection of Implicit Evidence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\"Our results also showed that serum TSH levels were [MASK] in the chloroquine group [MASK] in the placebo group.\"", |
|
"cite_spans": [ |
|
{ |
|
"start": 52, |
|
"end": 58, |
|
"text": "[MASK]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collection of Implicit Evidence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "\"slightly higher ... than\" corresponding B for the collected implicit evidence. These sentences are denoted as E ent , which contain entangled PICO-R.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collection of Implicit Evidence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "[HIGHER] \u2191 \"In conclusion,", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collection of Implicit Evidence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We have collected 11.8 million such sentences. Among them, 2.4 million (20.2%), 3.5 million (29.9%) and 5.9 million (49.9%) express inferiority, equality and superiority respectively.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Collection of Implicit Evidence", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To disentangle the free-text result R from implicit evidence E ent , we mask out the detected morphemes that express comparative semantics (e.g.: \"higher than\") as well as other functional tokens that might be exploited by the model to predict the result (e.g.: p values). This generates the masked out result R and the left part E dis ({PICO}) from E ent ({PICOR}), i.e.: R + E dis = E ent . R is mostly a phrase with a central comparative adjective/adverb (e.g.: \"significantly smaller than\") and can be directly mapped to R (\u2193 for the same example).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Disentanglement of Implicit Evidence", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Nevertheless, R contains richer information than the sole change direction because of the central adjective/adverb. To utilize such information, we map free-texts of R to a finer-grained result label r \u2208 C instead of the 3-way direction, where ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Disentanglement of Implicit Evidence", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "C = {[POORER], [LONGER], [SLOWER], ...}", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Disentanglement of Implicit Evidence", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We generate adversarial examples from the original ones using a simple rule of ordering: if the result r holds for the comparison I/C conditioned on P and O, the reversed result Rev(r) must hold for the reversed comparison C/I on the same condition. This is similar to generate adversarial examples for natural language inference task by logic rules (Minervini and Riedel, 2018; Wang et al., 2019) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 350, |
|
"end": 378, |
|
"text": "(Minervini and Riedel, 2018;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 379, |
|
"end": 397, |
|
"text": "Wang et al., 2019)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adversarial Data Generation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "However, Since E dis = {PICO} is only partially disentangled and P, I, C, O are still in their freetext forms, we cannot explicitly reverse I/C and generate such examples. As an alternative, we reverse the entire sentence order while keeping the word order between any two masked phrases in E dis , getting E rev . For example, if: ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adversarial Data Generation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "E dis = \"[", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adversarial Data Generation", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "We introduce the EBM-Net model in this section. Similar to BERT (Devlin et al., 2019) , EBM-Net is essentially a transformer encoder (Vaswani et al., 2017) , and follows the pre-training -fine-tuning approach: We pre-train EBM-Net by Comparative Language Modeling (CLM, \u00a75.1) that is designed to learn the conditional ordering function of I/C. The pre-trained EBM-Net is fine-tuned to solve the CTRP task on downstream datasets ( \u00a75.2).", |
|
"cite_spans": [ |
|
{ |
|
"start": 64, |
|
"end": 85, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 133, |
|
"end": 155, |
|
"text": "(Vaswani et al., 2017)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "EBM-Net", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "We show the CLM architecture in Figure 2 . CLM is adapted from the masked language modeling used ...", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 40, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF3" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Comparative Language Modeling", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CLM Pre-training of EBM-Net", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "...", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "[HIGHER]", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"Vehicle-treated animals [MASK] levels of viral antigen staining in lung sections of GS-5734-treated animals.\"", |
|
"cite_spans": [ |
|
{ |
|
"start": 25, |
|
"end": 31, |
|
"text": "[MASK]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "All available clinical evidence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Rev", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reversed", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Adversarial Implicit Evidence EBM-Net E rev E dis B r ori. inst.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reversed", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "adv. inst. [CLS] hidden state of the EBM-Net is used to predict the CLM label r with a linear layer followed by a softmax output unit:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reversed", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "r = SoftMax(W 1 h [CLS] + b 1 ) \u2208 [0, 1] |C|", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reversed", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We minimize the cross-entropy between the estimatedr and the empirical r distribution.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reversed", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "At input-level, the adversarial examples only differ from their original examples in word orders between E dis and E rev . However, their labels are totally reversed from r to Rev(r). By regularizing the model to learn such conditional ordering function, CLM prevents the pre-trained model from learning unwanted and possibly biased co-occurrences between evident elements and their results. , C] on the Evidence Integration dataset ( \u00a76.1). The sequence of PICO elements in E exp can be tuned empirically. EBM-Net learns from scratch another linear layer that maps from the predicted CLM label probabilitiesr to 3-way result label R logits. The final predictions are made by a softmax output unit:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Reversed", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "R = SoftMax(W 2r + b 2 ) \u2208 [0, 1] 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CTRP Fine-tuning", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Cross-entropy between the estimatedR and the empirical R distribution is minimized in fine-tuning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "CTRP Fine-tuning", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "The transformer weights of EBM-Net (L=12, H=768, A=12, #Params=110M) are initialized with BioBERT (Lee et al., 2020) , a variant of BERT that is also pre-trained on PubMed abstracts and PMC articles. The maximum sequence lengths for B, E dis , E rev , E exp are 256, 128, 128, and 128, respectively. We use Adam optimizer (Kingma and Ba, 2014) to minimize the cross-entropy losses. EBM-Net is implemented using Huggingface's Transformers library (Wolf et al., 2019) in PyTorch (Paszke et al., 2019) . Pre-training on 12M implicit evidence takes about 1k Tesla P100 GPU hours.", |
|
"cite_spans": [ |
|
{ |
|
"start": 98, |
|
"end": 116, |
|
"text": "(Lee et al., 2020)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 446, |
|
"end": 465, |
|
"text": "(Wolf et al., 2019)", |
|
"ref_id": "BIBREF34" |
|
}, |
|
{ |
|
"start": 477, |
|
"end": 498, |
|
"text": "(Paszke et al., 2019)", |
|
"ref_id": "BIBREF22" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Configuration", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "The Evidence Integration dataset serves as a benchmark for our task. We collect this dataset by repurposing the evidence inference dataset (Lehman et al., 2019; DeYoung et al., 2020) , which is essentially a machine reading comprehension task for extracting the structured result (i.e.: R) of a given structured ICO query 6 from the corresponding clinical trial report article. Since clinical trial reports already contain free-text result descriptions (i.e.: R) of the given ICO, solving the original task does not require the integration of previous clinical evidence. To test such capability for our proposed CTRP task, we remove the result/conclusion part and only keep the background/method part in the input clinical trial report. 34.6% tokens of the original abstracts are removed on average and the remained are used as the clinical trial backgrounds.", |
|
"cite_spans": [ |
|
{ |
|
"start": 139, |
|
"end": 160, |
|
"text": "(Lehman et al., 2019;", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 161, |
|
"end": 182, |
|
"text": "DeYoung et al., 2020)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Evidence Integration Dataset", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "Specifically, input of the Evidence Integration dataset includes free texts of ICO elements I, C and O which are the same as the original evidence inference dataset, and their clinical trial backgrounds B. The output is the comparison result R. Following the original dataset split, there are 8,164 instances for training, 1,002 for validation, and 965 for test.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Evidence Integration Dataset", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We also do experiments under the adversarial setting, where adversarial examples generated by reversing both the I/C order and the R label (similar to \u00a75.1) are added. This setting is used to test model robustness under adversarial attack.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "The Evidence Integration Dataset", |
|
"sec_num": "6.1" |
|
}, |
|
{ |
|
"text": "We compare to a variety of methods, ranging from trivial ones like Random and Majority to the stateof-the-art BioBERT model. Two major approaches in open-domain question answering (QA) are tested as well: the knowledge base (KB) approach (MeSH ontology) and the text/retrieval approach (Retrieval + Evidence Inference), since solving our task also requires reasoning over a large external corpus. Finally, we introduce some ablation settings and the evaluation of human expectations.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Random: we report the expected performance of randomly predicting the result for each instance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Majority: we report the performance of predicting the majority class (\u2192) for all test instances.n Bag-of-Words + Logistic Regression: we concatenate the TF-IDF weighted bag-of-word vectors of B, P, I, C and O as features and use logistic regression for learning.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "MeSH Ontology: Since no external KB is available for our task, we use the training set as an internal alternative: we map the I, C and O of the test instances to terms in the Medical Subject Headings (MeSH) 7 ontology by string matching. MeSH is a controlled and hierarchically-organized vocabulary for describing biomedical topics. Then, we find their nearest labeled instances in the training set, where the distance is defined by:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "d(i, j) = e\u2208{I,C,O} min TreeDist(m e i , m e j )", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "m e i and m e j are MeSH terms identified in ICO element e of instance i and j, respectively. TreeDist is defined as the number of edges between two nodes on the MeSH tree. The majority label of the nearest training instances is used as the prediction.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Retrieval + Evidence Inference: State-of-theart method on the evidence inference dataset (DeYoung et al., 2020) is a pipeline based on SciBERT (Beltagy et al., 2019) : (1) find the exact evidence sentences in the clinical trial report for the given ICO query, using a scoring function derived from a fine-tuned SciBERT; and (2) predict the result R based on the found evidence sentences and the given ICO query by fine-tuning another SciBERT.", |
|
"cite_spans": [ |
|
{ |
|
"start": 143, |
|
"end": 165, |
|
"text": "(Beltagy et al., 2019)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Our task needs an additional retrieval step to find relevant documents that might contain useful results of similar trials, as the input trial background does not contain the result information for the given ICO query. Documents are retrieved from the entire PubMed and PMC using a TF-IDF matching between their indexed MeSH terms and the MeSH terms identified in the ICO queries. We then apply the pipeline described above on the retrieved documents. This baseline is similar to but more domainspecific than BERTserini .", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "BioBERT: For this setting, we feed BioBERT with similar input to EBM-Net as is described in \u00a75 and fine-tune it to predict the R using its special [CLS] hidden state.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Ablations: We conduct two sets of ablation experiments with EBM-Net: (1) Pre-training level, where we exclude the adversarial examples in pretraining, to analyze the utility of CLM against traditional LM. (2) Input level, where we exclude different input elements (B, I, C, O) to study their relative importance.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "Human Expectations: We define the expected result (R e ) of a clinical trial (e.g.: R e = \u2193 for O = \"mortality rate\") as the Human Expectation (HE), which is the underlying motivation for conducting the corresponding trial. Generally, R e \u2208 {\u2191, \u2193} since significant results are expected. To make fair comparisons, we use the 2-way macro-average F1: F1 (2-way) = (F1(\u2191) + F1(\u2193))/2 as a main metric for evaluations of HE. HE performance is an overestimation of human performance: main biases are due to the shift of input trial distribution from the targeted proposal stage to the actual report stage, which contains fewer trials with unexpected results. Table 2 shows the main results on the Evidence Integration dataset, where accuracy and F1 (3-way) are used to compare model performance and F1 (2-way) is used for evaluating human expectations. Results show that EBM-Net outperforms other baselines by large margins in both standard and adversarial settings. While being the strongest baseline, BioBERT is 10.7% relatively lower in macro-F1 (54.33% v.s. 60.15%) and 9.6% relatively lower in accuracy (55.96% v.s. 61.35%) than EBM-Net. The open-domain QA baselines perform even worse: for the MeSH Ontology method, the internal KB of only 8k entries is far from complete; for the Retrieval + Evidence Inference method, the PICO queries are so specific that no exactly relevant evidence can be found in other trials and retrieving only a few trials has limited utilities.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 653, |
|
"end": 660, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Compared Methods", |
|
"sec_num": "6.2" |
|
}, |
|
{ |
|
"text": "We use |\u2206|, the absolute value of relative accuracy decrease to measure model robustness under adversarial attacks. The higher the |\u2206|, the more vulnerable a model is. BioBERT has about twice as much (5.1% v.s. 2.7%) |\u2206| in the adversarial setting as EBM-Net does. It suggests that EBM-Net is more robust to adversarial attacks, which is a vital property for healthcare applications. EBM-Net without adversarial pre-training is less robust than EBM-Net as well (3.0% v.s. 2.7%), but not as vulnerable as BioBERT, indicating that robustness can be learned by pre-training with original implicit evidence to some extent and further consolidated by the adversarial evidence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Main Results", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "Unsurprisingly, EBM-Net with full input consistently outperforms all input-level ablations. Among them, O is the most important input element as the performance decreases dramatically on its ablation. This is expected as O is the standard of comparisons. B is the second most important element, since B contains methodological details of how the clinical trials will be conducted, which is also vital for result prediction. The performance does not decrease as much without I or C, since there is redundant information of them in B.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Main Results", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "On the one hand, the accuracy of EBM-Net surpasses that of HE, mainly because the latter is practically a 2-way classifier. On the other hand, HE outperforms EBM-Net in terms of 2-way F1, but is still unsatisfying (68.86%). This suggests that the proposed CTRP task is hard and there is still room for further improvements.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Main Results", |
|
"sec_num": "6.3" |
|
}, |
|
{ |
|
"text": "We study how different numbers of pre-training and fine-tuning instances influence the EBM-Net performance, in comparison to the BioBERT. shows the results: (Left) The final performance of EBM-Net improves log-linearly as the pre-training dataset size increases, suggesting that there can be further improvements if more data is collected for pre-training but the marginal utility might be small. EBM-Net surpasses BioBERT when pre-trained by about 50k to 100k instances of implicit evidence, which are 5 to 10 times as many as the fine-tuning instances. (Right) EBM-Net is more robust in a few-shot learning setting: using only 10% of the training data, EBM-Net outperforms BioBERT fine-tuned with 100% of the training data. From zero-shot 8 to using all the training data, EBM-Net improves only by 26.6% relative F1 (from 47.52% to 60.15%) while BioBERT improves largely by 60.0% relative F1 (from 32.77% to 54.33%).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "6.4" |
|
}, |
|
{ |
|
"text": "We use t-SNE (Maaten and Hinton, 2008) to visualize the test instance representations derived from EBM-Net [CLS] hidden state in Figure 4 . It shows that EBM-Net effectively learns the relationships between comparative results: the points cluster into three results (\u2191, \u2193, \u2192). While there is a clear boundary between the \u2193 cluster (dashed-blue circle) and the \u2191 cluster (dashed-red circle), the boundaries between the \u2192 cluster (dashed-black circle) and the other two are relatively vague. It suggests that the learnt manifold follows a quantitatively continuous \"\u2193 -\u2192 -\u2191\" pattern.", |
|
"cite_spans": [ |
|
{ |
|
"start": 13, |
|
"end": 38, |
|
"text": "(Maaten and Hinton, 2008)", |
|
"ref_id": "BIBREF16" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 129, |
|
"end": 137, |
|
"text": "Figure 4", |
|
"ref_id": "FIGREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "6.4" |
|
}, |
|
{ |
|
"text": "Out of the 373 mistakes EBM-Net makes on the test set, significantly less (11.8%, p<0.001 by permutation test) predictions are opposite to the ground-truth (e.g.: predicting \u2191 when the label is \u2193), also suggesting that EBM-Net effectively learn the relationship between comparison results. In addition, we notice that there is a considerable proportion of instances whose results are not predictable without their exact reports. For example, some I and C differ only quantitatively, e.g.: \"4% lidocaine\" and \"2% lidocaine\", and modeling such differences is beyond the scope of our task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Discussions", |
|
"sec_num": "6.4" |
|
}, |
|
{ |
|
"text": "For analyzing COVID-19 related clinical trials, we further pre-train EBM-Net on the CORD-19 dataset (Wang et al., 2020a) 9 , also using the comparative language modeling ( \u00a75.1). It leads to a COVID-19 specific EBM-Net that is used in this section.", |
|
"cite_spans": [ |
|
{ |
|
"start": 100, |
|
"end": 122, |
|
"text": "(Wang et al., 2020a) 9", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Validation on COVID-19 Clinical Trials", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "We use leave-one-out validation to evaluate EBM-Net on the 22 completed clinical trials in COVID-evidence 10 , which is an expert-curated database of available evidence on interventions for COVID-19. Again, EBM-Net outperforms BioBERT by a large margin (59.1% v.s. 50.0% accuracy). Expectedly, their 3-way F1 results (45.5% v.s. 36.1%) are close to those in the zero-shot learning setting since not many trials have finished. Accuracy and 2-way F1 performance of HE are 54.5% and 68.9%, and are close to those in Table 2 . These further confirm the performance improvement of EBM-Net and the difficulty of the CTRP task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 513, |
|
"end": 520, |
|
"text": "Table 2", |
|
"ref_id": "TABREF5" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Validation on COVID-19 Clinical Trials", |
|
"sec_num": "6.5" |
|
}, |
|
{ |
|
"text": "In this paper, we introduce a novel task, CTRP, to predict clinical trial results without actually doing them. Instead of using structured evidence that is prohibitively expensive to annotate, we heuristically collect 12M unstructured sentences as implicit evidence, and use large-scale CLM pretraining to learn the conditional ordering function required for solving the CTRP task. Our EBM-Net model outperforms other strong baselines on the Evidence Integration dataset and is also validated on COVID-19 clinical trials.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Tengteng Zhang, Yiqin Yu, Jing Mei, Zefang Tang, Xiang Zhang, and Shaochun Li. 2020 . Unlocking the power of deep pico extraction:", |
|
"cite_spans": [ |
|
{ |
|
"start": 9, |
|
"end": 83, |
|
"text": "Zhang, Yiqin Yu, Jing Mei, Zefang Tang, Xiang Zhang, and Shaochun Li. 2020", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "Step-wise medical ner identification. arXiv preprint arXiv:2005.06601.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusions", |
|
"sec_num": "7" |
|
}, |
|
{ |
|
"text": "We show several mapping examples from section names to background/method or result/conclusion in Table 4 . The implicit evidence collection algorithm is shown in Algorithm 1, which uses the evidence detection algorithm described in Algorithm 2. Dataset statistics are shown in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 97, |
|
"end": 104, |
|
"text": "Table 4", |
|
"ref_id": "TABREF8" |
|
}, |
|
{ |
|
"start": 277, |
|
"end": 284, |
|
"text": "Table 3", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "A Collection of Implicit Evidence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Disentanglement of implicit evidence seeks to mask out the R in the implicit evidence and map it to r, which is a finer-grained label of comparison results. We show the distribution of the collected r in Figure 5 . When disentangling the free-text result R, we also mask out the following words:", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 204, |
|
"end": 212, |
|
"text": "Figure 5", |
|
"ref_id": "FIGREF8" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "B Disentanglement of Implicit Evidence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Numbers (e.g.: p-values);", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Disentanglement of Implicit Evidence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 tokens within parentheses (e.g.: interpretations of results);", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Disentanglement of Implicit Evidence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\u2022 Adverb before the central comparative adv/adj (e.g.: \"significantly\").", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Disentanglement of Implicit Evidence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "which can be exploited by the model to predict the r during pre-training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "B Disentanglement of Implicit Evidence", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In Table 5 , we show several originally collected instances E dis , r, together with their adversarially reversed instances E rev , Rev(r).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 5", |
|
"ref_id": "TABREF10" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "C Comparative Language Modeling", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "In Table 6 , we show several examples of Evidence Integration instances and their corresponding ones in evidence inference. The Evidence Integration dataset statistics are also shown in Table 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 3, |
|
"end": 10, |
|
"text": "Table 6", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 186, |
|
"end": 193, |
|
"text": "Table 3", |
|
"ref_id": "TABREF7" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "D The Evidence Integration Dataset", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We show the searched hyper-parameters and their chosen values of different experiments in Table 7 . We manually tune the hyper-parameters where the best combination is chosen based on macro-F1 metric in the validation set. The number of hyperparameter search trials is about 100. Training time of EBM-Net is about 3 min/epoch of standard Evidence Integration and 6 min/epoch of adversarial Evidence Integration on 2 Tesla P100 GPUs at the optimal hyper-parameter setting, and the Inference time is about 100 instances/s on 1 Tesla P100 GPU. All evaluation metrics are calculated by the sklearn.metrics package in Python.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 90, |
|
"end": 98, |
|
"text": "Table 7", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "E Settings", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Validation results of EBM-Net in comparison to BioBERT are shown in Table 8 . Results on 22 completed clinical trials in COVID-evidence dataset are shown in ", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 75, |
|
"text": "Table 8", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "F Results", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"BACKGROUND\" background/method \"METHODS\" background/method \"OBJECTIVES\" background/method \"INTRODUCTION\" background/method \"DESIGN\" background/method \"RESULTS\" result/conclusion \"CONCLUSIONS\" result/conclusion \"FINDINGS\" result/conclusion \"DISCUSSIONS\" result/conclusion \"SIGNIFICANCE\" result/conclusion \"Background: Self-management programs for patients with heart failure can reduce hospitalizations and mortality. However, no programs have analyzed their usefulness for patients with low literacy. We compared the efficacy of a heart failure self-management program designed for patients with low literacy versus usual care. Methods: We performed a 12-month randomized controlled trial. From November 2001 to April 2003, we enrolled participants aged 30-80, who had heart failure and took furosemide. Intervention patients received education on self-care emphasizing daily weight measurement, diuretic dose self-adjustment, and symptom recognition and response. Picture-based educational materials, a digital scale, and scheduled telephone follow-up were provided to reinforce adherence. Control patients received a generic heart failure brochure and usual care. Primary outcomes were combined hospitalization or death, and heart failure-related quality of life. Results: 123 patients (64 control, 59 intervention) participated; 41% had inadequate literacy. Patients in the intervention group had a lower rate of hospitalization or death (crude incidence rate ratio (IRR) = 0.69; CI 0.4, 1.2; adjusted IRR = 0.53; CI 0.32, 0.89). This difference was larger for patients with low literacy (IRR = 0.39; CI 0.16, 0.91) than for higher literacy (IRR = 0.56; CI 0.3, 1.04), but the interaction was not statistically significant. At 12 months, more patients in the intervention group reported monitoring weights daily (79% vs. 29%, p \u00a1 0.0001). After adjusting for baseline demographic and treatment differences, we found no difference in heart failure-related quality of life at 12 months (difference = -2; CI -5, +9). Conclusion: A primary care-based heart failure self-management program designed for patients with low literacy reduces the risk of hospitalizations or death.\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Section Name Section Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"Follow-up and thorough education on self-care\"", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Section Name Section Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "\"Standard information about self-care\" \"Knowledge about heart failure\" \u2191 \"Background: Afghanistan's national guidelines recommend chloroquine for the treatment of Plasmodium vivax infection, the parasite responsible for the majority of its malaria burden. Chloroquine resistance in P. vivax is emerging in Asia. Therapeutic responses across Afghanistan have not been evaluated in detail. Methods: Between July 2007 and February 2009, an open-label, randomized controlled trial of chloroquine and dihydroartemisinin-piperaquine in patients aged three months and over with slide-confirmed P. vivax mono-infections was conducted. Consistent with current national guidelines, primaquine was not administered. Subjects were followed up daily during the acute phase of illness (days 0-3) and weekly until day 56. The primary endpoint was the overall cumulative parasitological failure rate at day 56 after the start of treatment, with the hypothesis being that dihydroartemisinin-piperaquine was non-inferior compared to chloroquine (\u03b4 = 5% difference in proportion of failures). Results: Of 2,182 individuals with positive blood films for P. vivax, 536 were enrolled in the trial. The day 28 cure rate was 100% in both treatment groups. Parasite clearance was more rapid with dihydroartemisinin-piperaquine than chloroquine. At day 56, there were more recurrent infections in the chloroquine arm (8.9%, 95% CI 6.0-13.1%) than the dihydroartemisinin-piperaquine arm (2.8%, 95% CI 1.4-5.8%), a difference in cumulative recurrence rate of 6.1% (2-sided 90%CI +2.6 to +9.7%). The log-rank test comparing the survival curves confirmed the superiority of dihydroartemisinin-piperaquine over chloroquine (p = 0.003). Multivariate analysis showed that a lower initial haemoglobin concentration was also independently associated with recurrence. Both regimens were well tolerated and no serious adverse events were reported. Conclusions: Chloroquine remains an efficacious treatment for the treatment of vivax malaria in Afghanistan. In a setting where radical therapy cannot be administered, dihydroartemisinin-piperaquine provides additional benefit in terms of post-treatment prophylaxis, reducing the incidence of recurrence from 4-8 weeks after treatment.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Section Name Section Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Trial Registration: The trial was registered at ClinicalTrials.gov under identifier NCT00682578.\" \"Dihydroartemisinin -piperaquine\" \"Chloroquine\" \"Parasite clearance at day 2\" \u2192 Table 6 : Several examples of dataset instances. Strickethroughed texts are in the original evidence inference dataset but not in the Evidence Integration dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 178, |
|
"end": 185, |
|
"text": "Table 6", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Section Name Section Type", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Remdesivir, lopinavir/ritonavir, interferon beta-1a and chloroquine/hydroxychloroquine.2 The proposals need to be registered and approved before the clinical trials are conducted.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Articles in downstream experiment datasets are excluded.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "P is not included in the original dataset as the background of the trial report contains it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "https://www.nlm.nih.gov/mesh", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Pre-training Sizes (in log scale) Data (%) Used in Fine-tuning", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Zero-shot performance of BioBERT is defined as the expected results from random predictions.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The 05/12/2020 version. 10 covid-evidence.org (visited on 05/18/2020).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "We would like to thank the anonymous reviewers of EMNLP 2020 for their constructive comments. We are also grateful for Ning Ding, Yuxuan Lai, Yijia Liu, Yao Fu, Kun Liu and Rui Wang for helpful discussions at Alibaba DAMO Academy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgement", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Input: An abstract A which is a sequence of words (from PubMed). Output: A list of collected implicit evidence E and its background B from the input article. E \u2190 [] B \u2190 \"\" if A is structured thenwhere S i is the i-th sentence and L i is its section type labelwhere S i is the i-th sentence BG \u2190 True # BG controls whether a sentence is included in the background", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algorithm 1 Implicit Evidence Collection", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Input: A sentence S which is a sequence of words; Sets of adj/adv that suggest superiority (H), inferiority (L) and equality (E) Output: False if the sentence is not a piece of implicit evidence; Central comparison word c and the change direction R if the sentence is a piece of implicit evidence. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Algorithm 2 EvidenceDetector", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Scibert: A pretrained language model for scientific text", |
|
"authors": [ |
|
{ |
|
"first": "Iz", |
|
"middle": [], |
|
"last": "Beltagy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arman", |
|
"middle": [], |
|
"last": "Cohan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3606--3611", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. Scibert: A pretrained language model for scientific text. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Lan- guage Processing (EMNLP-IJCNLP), pages 3606- 3611.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Predicting clinical trial results based on announcements of interim analyses", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kristine R Broglio", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donald A", |
|
"middle": [], |
|
"last": "Stivers", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Berry", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Trials", |
|
"volume": "15", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kristine R Broglio, David N Stivers, and Donald A Berry. 2014. Predicting clinical trial results based on announcements of interim analyses. Trials, 15(1):73.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Predicting the outcome of phase iii trials using phase ii data: a case study of clinical trial simulation in late stage drug development", |
|
"authors": [ |
|
{ |
|
"first": "Filip", |
|
"middle": [], |
|
"last": "De Ridder", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Basic & clinical pharmacology & toxicology", |
|
"volume": "96", |
|
"issue": "3", |
|
"pages": "235--241", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Filip De Ridder. 2005. Predicting the outcome of phase iii trials using phase ii data: a case study of clinical trial simulation in late stage drug develop- ment. Basic & clinical pharmacology & toxicology, 96(3):235-241.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "A data-driven approach to predicting successes and failures of clinical trials", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Kaitlyn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Gayvert", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Neel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olivier", |
|
"middle": [], |
|
"last": "Madhukar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Elemento", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Cell chemical biology", |
|
"volume": "23", |
|
"issue": "10", |
|
"pages": "1294--1301", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kaitlyn M Gayvert, Neel S Madhukar, and Olivier Ele- mento. 2016. A data-driven approach to predicting successes and failures of clinical trials. Cell chemi- cal biology, 23(10):1294-1301.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Clinical trial simulation: a review", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [], |
|
"last": "Holford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "B", |
|
"middle": [ |
|
"A" |
|
], |
|
"last": "Ma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ploeger", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Clinical Pharmacology & Therapeutics", |
|
"volume": "88", |
|
"issue": "2", |
|
"pages": "166--182", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N Holford, SC Ma, and BA Ploeger. 2010. Clinical trial simulation: a review. Clinical Pharmacology & Therapeutics, 88(2):166-182.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Simulation of clinical trials", |
|
"authors": [ |
|
{ |
|
"first": "N", |
|
"middle": [ |
|
"H G" |
|
], |
|
"last": "Holford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "H", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Kimko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"P R" |
|
], |
|
"last": "Monteleone", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "C", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Peck", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2000, |
|
"venue": "Annual Review of Pharmacology and Toxicology", |
|
"volume": "40", |
|
"issue": "1", |
|
"pages": "209--234", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1146/annurev.pharmtox.40.1.209" |
|
], |
|
"PMID": [ |
|
"10836134" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "N. H. G. Holford, H. C. Kimko, J. P. R. Monteleone, and C. C. Peck. 2000. Simulation of clinical tri- als. Annual Review of Pharmacology and Toxicol- ogy, 40(1):209-234. PMID: 10836134.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Evaluation of pico as a knowledge representation for clinical questions", |
|
"authors": [ |
|
{ |
|
"first": "Xiaoli", |
|
"middle": [], |
|
"last": "Huang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dina", |
|
"middle": [], |
|
"last": "Demner-Fushman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "AMIA annual symposium proceedings", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaoli Huang, Jimmy Lin, and Dina Demner-Fushman. 2006. Evaluation of pico as a knowledge represen- tation for clinical questions. In AMIA annual sym- posium proceedings, volume 2006, page 359. Amer- ican Medical Informatics Association.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "PICO element detection in medical text via long short-term memory neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Szolovits", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the BioNLP 2018 workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "67--75", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W18-2308" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Di Jin and Peter Szolovits. 2018. PICO element detec- tion in medical text via long short-term memory neu- ral networks. In Proceedings of the BioNLP 2018 workshop, pages 67-75, Melbourne, Australia. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Mining comparative sentences and relations", |
|
"authors": [ |
|
{ |
|
"first": "Nitin", |
|
"middle": [], |
|
"last": "Jindal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2006, |
|
"venue": "Proceedings of the 21st National Conference on Artificial Intelligence", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1331--1336", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nitin Jindal and Bing Liu. 2006. Mining comparative sentences and relations. In Proceedings of the 21st National Conference on Artificial Intelligence -Vol- ume 2, AAAI'06, page 1331-1336. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Comparatives, semantics of. Concise Encyclopedia of Philosophy of Language and Linguistics", |
|
"authors": [ |
|
{ |
|
"first": "Christopher", |
|
"middle": [], |
|
"last": "Kennedy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2004, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "68--71", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Christopher Kennedy. 2004. Comparatives, semantics of. Concise Encyclopedia of Philosophy of Lan- guage and Linguistics, pages 68-71.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Adam: A method for stochastic optimization", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Diederik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Kingma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ba", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1412.6980" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Seed-driven document ranking for systematic reviews in evidencebased medicine", |
|
"authors": [ |
|
{ |
|
"first": "Grace", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aixin", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "The 41st International ACM SI-GIR Conference on Research & Development in Information Retrieval, SIGIR '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "455--464", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3209978.3209994" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Grace E. Lee and Aixin Sun. 2018. Seed-driven doc- ument ranking for systematic reviews in evidence- based medicine. In The 41st International ACM SI- GIR Conference on Research & Development in In- formation Retrieval, SIGIR '18, page 455-464, New York, NY, USA. Association for Computing Machin- ery.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Biobert: a pre-trained biomedical language representation model for biomedical text mining", |
|
"authors": [ |
|
{ |
|
"first": "Jinhyuk", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wonjin", |
|
"middle": [], |
|
"last": "Yoon", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sungdong", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Donghyeon", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunkyu", |
|
"middle": [], |
|
"last": "Kim", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chan", |
|
"middle": [], |
|
"last": "Ho So", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaewoo", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Bioinformatics", |
|
"volume": "36", |
|
"issue": "4", |
|
"pages": "1234--1240", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2020. Biobert: a pre-trained biomed- ical language representation model for biomedical text mining. Bioinformatics, 36(4):1234-1240.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Inferring which medical treatments work from reports of clinical trials", |
|
"authors": [ |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Lehman", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jay", |
|
"middle": [], |
|
"last": "Deyoung", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Regina", |
|
"middle": [], |
|
"last": "Barzilay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3705--3717", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1371" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Eric Lehman, Jay DeYoung, Regina Barzilay, and By- ron C. Wallace. 2019. Inferring which medical treat- ments work from reports of clinical trials. In Pro- ceedings of the 2019 Conference of the North Amer- ican Chapter of the Association for Computational Linguistics: Human Language Technologies, Vol- ume 1 (Long and Short Papers), pages 3705-3717, Minneapolis, Minnesota. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Visualizing data using t-sne", |
|
"authors": [ |
|
{ |
|
"first": "Laurens", |
|
"middle": [], |
|
"last": "Van Der Maaten", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Geoffrey", |
|
"middle": [], |
|
"last": "Hinton", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "Journal of machine learning research", |
|
"volume": "9", |
|
"issue": "", |
|
"pages": "2579--2605", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Laurens van der Maaten and Geoffrey Hinton. 2008. Visualizing data using t-sne. Journal of machine learning research, 9(Nov):2579-2605.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Automating biomedical evidence synthesis: RobotReviewer", |
|
"authors": [ |
|
{ |
|
"first": "Iain", |
|
"middle": [], |
|
"last": "Marshall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo\u00ebl", |
|
"middle": [], |
|
"last": "Kuiper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Banner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of ACL 2017, System Demonstrations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--12", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Iain Marshall, Jo\u00ebl Kuiper, Edward Banner, and By- ron C. Wallace. 2017. Automating biomedical ev- idence synthesis: RobotReviewer. In Proceedings of ACL 2017, System Demonstrations, pages 7-12, Vancouver, Canada. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Hydroxychloroquine or chloroquine with or without a macrolide for treatment of covid-19: a multinational registry analysis. The Lancet", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mandeep R Mehra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S", |
|
"middle": [], |
|
"last": "Sapan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Frank", |
|
"middle": [], |
|
"last": "Desai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amit N", |
|
"middle": [], |
|
"last": "Ruschitzka", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mandeep R Mehra, Sapan S Desai, Frank Ruschitzka, and Amit N Patel. 2020. Hydroxychloroquine or chloroquine with or without a macrolide for treat- ment of covid-19: a multinational registry analysis. The Lancet.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Adversarially regularising neural nli models to integrate logical background knowledge", |
|
"authors": [ |
|
{ |
|
"first": "Pasquale", |
|
"middle": [], |
|
"last": "Minervini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sebastian", |
|
"middle": [], |
|
"last": "Riedel", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 22nd Conference on Computational Natural Language Learning", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "65--74", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pasquale Minervini and Sebastian Riedel. 2018. Adver- sarially regularising neural nli models to integrate logical background knowledge. In Proceedings of the 22nd Conference on Computational Natural Lan- guage Learning, pages 65-74.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Clinical outcomes of current medical approaches for middle east respiratory syndrome: A systematic review and meta-analysis", |
|
"authors": [], |
|
"year": 2018, |
|
"venue": "Reviews in medical virology", |
|
"volume": "28", |
|
"issue": "3", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mostafa Ebraheem Morra, Le Van Thanh, Mo- hamed Gomaa Kamel, Ahmed Abdelmotaleb Ghazy, Ahmed MA Altibi, Lu Minh Dat, Tran Ngoc Xuan Thy, Nguyen Lam Vuong, Mostafa Reda Mostafa, Sarah Ibrahim Ahmed, et al. 2018. Clinical out- comes of current medical approaches for middle east respiratory syndrome: A systematic review and meta-analysis. Reviews in medical virology, 28(3):e1977.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "A corpus with multi-level annotations of patients, interventions and outcomes to support language processing for medical literature", |
|
"authors": [ |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Nye", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junyi", |
|
"middle": [ |
|
"Jessy" |
|
], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roma", |
|
"middle": [], |
|
"last": "Patel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yinfei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iain", |
|
"middle": [], |
|
"last": "Marshall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
|
"volume": "1", |
|
"issue": "", |
|
"pages": "197--207", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P18-1019" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Benjamin Nye, Junyi Jessy Li, Roma Patel, Yinfei Yang, Iain Marshall, Ani Nenkova, and Byron Wal- lace. 2018. A corpus with multi-level annotations of patients, interventions and outcomes to support language processing for medical literature. In Pro- ceedings of the 56th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 1: Long Papers), pages 197-207, Melbourne, Australia. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Pytorch: An imperative style, high-performance deep learning library", |
|
"authors": [ |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Paszke", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Gross", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francisco", |
|
"middle": [], |
|
"last": "Massa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Lerer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gregory", |
|
"middle": [], |
|
"last": "Chanan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Killeen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zeming", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Natalia", |
|
"middle": [], |
|
"last": "Gimelshein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Antiga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alban", |
|
"middle": [], |
|
"last": "Desmaison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andreas", |
|
"middle": [], |
|
"last": "Kopf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zachary", |
|
"middle": [], |
|
"last": "Devito", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Raison", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alykhan", |
|
"middle": [], |
|
"last": "Tejani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sasank", |
|
"middle": [], |
|
"last": "Chilamkurthy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benoit", |
|
"middle": [], |
|
"last": "Steiner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lu", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junjie", |
|
"middle": [], |
|
"last": "Bai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Soumith", |
|
"middle": [], |
|
"last": "Chintala", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "32", |
|
"issue": "", |
|
"pages": "8024--8035", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Te- jani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. Pytorch: An imperative style, high-performance deep learn- ing library. In Advances in Neural Information Pro- cessing Systems 32, pages 8024-8035. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Effect of chloroquine on some clinical and biochemical parameters in nonresponse chronic hepatitis c virus infection patients: pilot clinical trial", |
|
"authors": [ |
|
{ |
|
"first": "P", |
|
"middle": [], |
|
"last": "Peymani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ghavami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yeganeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tabrizi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sabour", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Geramizadeh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mr Fattahi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Ahmadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lankarani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Acta bio-medica: Atenei Parmensis", |
|
"volume": "87", |
|
"issue": "1", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "P Peymani, S Ghavami, B Yeganeh, R Tabrizi, S Sabour, B Geramizadeh, MR Fattahi, SM Ahmadi, and KB Lankarani. 2016. Effect of chloroquine on some clinical and biochemical parameters in non- response chronic hepatitis c virus infection patients: pilot clinical trial. Acta bio-medica: Atenei Parmen- sis, 87(1):46.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "Evidence-based medicine", |
|
"authors": [ |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "David", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sackett", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 1997, |
|
"venue": "Seminars in perinatology", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "3--5", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "David L Sackett. 1997. Evidence-based medicine. In Seminars in perinatology, pages 3-5. Elsevier.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Broad-spectrum antiviral gs-5734 inhibits both epidemic and zoonotic coronaviruses", |
|
"authors": [ |
|
{ |
|
"first": "Amy", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Timothy P Sheahan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rachel", |
|
"middle": [ |
|
"L" |
|
], |
|
"last": "Sims", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Graham", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "D", |
|
"middle": [], |
|
"last": "Vineet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lisa", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Menachery", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [ |
|
"B" |
|
], |
|
"last": "Gralinski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sarah", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Case", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Krzysztof", |
|
"middle": [], |
|
"last": "Leist", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Pyrc", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Y", |
|
"middle": [], |
|
"last": "Joy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iva", |
|
"middle": [], |
|
"last": "Feng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Trantcheva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Science translational medicine", |
|
"volume": "", |
|
"issue": "396", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Timothy P Sheahan, Amy C Sims, Rachel L Graham, Vineet D Menachery, Lisa E Gralinski, James B Case, Sarah R Leist, Krzysztof Pyrc, Joy Y Feng, Iva Trantcheva, et al. 2017. Broad-spectrum antiviral gs-5734 inhibits both epidemic and zoonotic coro- naviruses. Science translational medicine, 9(396).", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "A neural candidate-selector architecture for automatic structured clinical text annotation", |
|
"authors": [ |
|
{ |
|
"first": "Gaurav", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iain", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Marshall", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Thomas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Shawe-Taylor", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 2017 ACM on Conference on Information and Knowledge Management, CIKM '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1519--1528", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3132847.3132989" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Gaurav Singh, Iain J. Marshall, James Thomas, John Shawe-Taylor, and Byron C. Wallace. 2017. A neural candidate-selector architecture for automatic structured clinical text annotation. In Proceed- ings of the 2017 ACM on Conference on Informa- tion and Knowledge Management, CIKM '17, page 1519-1528, New York, NY, USA. Association for Computing Machinery.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Attention is all you need", |
|
"authors": [ |
|
{ |
|
"first": "Ashish", |
|
"middle": [], |
|
"last": "Vaswani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noam", |
|
"middle": [], |
|
"last": "Shazeer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Niki", |
|
"middle": [], |
|
"last": "Parmar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jakob", |
|
"middle": [], |
|
"last": "Uszkoreit", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llion", |
|
"middle": [], |
|
"last": "Jones", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aidan", |
|
"middle": [ |
|
"N" |
|
], |
|
"last": "Gomez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Illia", |
|
"middle": [], |
|
"last": "Kaiser", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Polosukhin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "30", |
|
"issue": "", |
|
"pages": "5998--6008", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141 ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Gar- nett, editors, Advances in Neural Information Pro- cessing Systems 30, pages 5998-6008. Curran Asso- ciates, Inc.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "What does the evidence say? models to help make sense of the biomedical literature", |
|
"authors": [ |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI-19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6416--6420", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.24963/ijcai.2019/899" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Byron C. Wallace. 2019. What does the evidence say? models to help make sense of the biomedi- cal literature. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intel- ligence, IJCAI-19, pages 6416-6420. International Joint Conferences on Artificial Intelligence Organi- zation.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Extracting pico sentences from clinical trial reports using supervised distant supervision", |
|
"authors": [ |
|
{ |
|
"first": "Byron", |
|
"middle": [ |
|
"C" |
|
], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jo\u00ebl", |
|
"middle": [], |
|
"last": "Kuiper", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aakash", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "(", |
|
"middle": [], |
|
"last": "Mingxi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": ")", |
|
"middle": [], |
|
"last": "Brian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Iain", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Marshall", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Journal of Machine Learning Research", |
|
"volume": "17", |
|
"issue": "132", |
|
"pages": "1--25", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Byron C. Wallace, Jo\u00ebl Kuiper, Aakash Sharma, Mingxi (Brian) Zhu, and Iain J. Marshall. 2016. Ex- tracting pico sentences from clinical trial reports us- ing supervised distant supervision. Journal of Ma- chine Learning Research, 17(132):1-25.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "What if we simply swap the two text fragments? a straightforward yet effective way to test the robustness of methods to confounding signals in nature language inference tasks", |
|
"authors": [ |
|
{ |
|
"first": "Haohan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Da", |
|
"middle": [], |
|
"last": "Sun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Xing", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "7136--7143", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haohan Wang, Da Sun, and Eric P Xing. 2019. What if we simply swap the two text fragments? a straight- forward yet effective way to test the robustness of methods to confounding signals in nature language inference tasks. In Proceedings of the AAAI Con- ference on Artificial Intelligence, volume 33, pages 7136-7143.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Cord-19: The covid-19 open research dataset", |
|
"authors": [ |
|
{ |
|
"first": "Lucy", |
|
"middle": [ |
|
"Lu" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kyle", |
|
"middle": [], |
|
"last": "Lo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yoganand", |
|
"middle": [], |
|
"last": "Chandrasekhar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Russell", |
|
"middle": [], |
|
"last": "Reas", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiangjiang", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Darrin", |
|
"middle": [], |
|
"last": "Eide", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kathryn", |
|
"middle": [], |
|
"last": "Funk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rodney", |
|
"middle": [], |
|
"last": "Kinney", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ziyang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "William", |
|
"middle": [], |
|
"last": "Merrill", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2004.10706" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lucy Lu Wang, Kyle Lo, Yoganand Chandrasekhar, Russell Reas, Jiangjiang Yang, Darrin Eide, Kathryn Funk, Rodney Kinney, Ziyang Liu, William Merrill, et al. 2020a. Cord-19: The covid-19 open research dataset. arXiv preprint arXiv:2004.10706.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Remdesivir in adults with severe covid-19: a randomised, double-blind, placebo-controlled, multicentre trial", |
|
"authors": [ |
|
{ |
|
"first": "Yeming", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dingyu", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guanhua", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ronghui", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianping", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shouzhi", |
|
"middle": [], |
|
"last": "Fu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ling", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenshun", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiaofa", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yeming Wang, Dingyu Zhang, Guanhua Du, Ronghui Du, Jianping Zhao, Yang Jin, Shouzhi Fu, Ling Gao, Zhenshun Cheng, Qiaofa Lu, et al. 2020b. Remde- sivir in adults with severe covid-19: a randomised, double-blind, placebo-controlled, multicentre trial. The Lancet.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Solidarity clinical trial for covid-19 treatments", |
|
"authors": [], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "WHO. 2020. Solidarity clinical trial for covid-19 treat- ments.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Huggingface's transformers: State-of-the-art natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Wolf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lysandre", |
|
"middle": [], |
|
"last": "Debut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Victor", |
|
"middle": [], |
|
"last": "Sanh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julien", |
|
"middle": [], |
|
"last": "Chaumond", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Clement", |
|
"middle": [], |
|
"last": "Delangue", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anthony", |
|
"middle": [], |
|
"last": "Moi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pierric", |
|
"middle": [], |
|
"last": "Cistac", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tim", |
|
"middle": [], |
|
"last": "Rault", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R'emi", |
|
"middle": [], |
|
"last": "Louf", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Morgan", |
|
"middle": [], |
|
"last": "Funtowicz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jamie", |
|
"middle": [], |
|
"last": "Brew", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "ArXiv", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R'emi Louf, Morgan Funtow- icz, and Jamie Brew. 2019. Huggingface's trans- formers: State-of-the-art natural language process- ing. ArXiv, abs/1910.03771.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "Estimation of clinical trial success rates and related parameters", |
|
"authors": [ |
|
{ |
|
"first": "Chi", |
|
"middle": [ |
|
"Heem" |
|
], |
|
"last": "Wong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kien", |
|
"middle": [ |
|
"Wei" |
|
], |
|
"last": "Siah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [ |
|
"W" |
|
], |
|
"last": "Lo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Biostatistics", |
|
"volume": "20", |
|
"issue": "2", |
|
"pages": "273--286", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chi Heem Wong, Kien Wei Siah, and Andrew W Lo. 2019. Estimation of clinical trial success rates and related parameters. Biostatistics, 20(2):273-286.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "End-to-end open-domain question answering with BERTserini", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuqing", |
|
"middle": [], |
|
"last": "Xie", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aileen", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xingyu", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luchen", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kun", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jimmy", |
|
"middle": [], |
|
"last": "Lin", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics (Demonstrations)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "72--77", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-4013" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Yang, Yuqing Xie, Aileen Lin, Xingyu Li, Luchen Tan, Kun Xiong, Ming Li, and Jimmy Lin. 2019. End-to-end open-domain question answering with BERTserini. In Proceedings of the 2019 Confer- ence of the North American Chapter of the Asso- ciation for Computational Linguistics (Demonstra- tions), pages 72-77, Minneapolis, Minnesota. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "trial proposal (B and PICO), predict its R, i.e. how I compares to C in terms of O and P.All available clinical evidenceIntegration Architecture of the proposed Clinical Trial Result Prediction (CTRP) task." |
|
}, |
|
"FIGREF1": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": ". than ...]. For expression of equality (\u2192), we detect morpheme patterns of [similar ... to ...] and [no difference ... between ... and ...]. The background/method section serves as the Eent E dis R r R" |
|
}, |
|
"FIGREF2": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Levels of viral antigen staining in lung sections of GS-5734-treated animals were] 0 [MASK] [vehicle-treated animals] 1 .\" and r = [LOWER], then the reversed evidence is: E rev = \"[Vehicle-treated animals] 1 [MASK] [levels of viral antigen staining in lung sections of GS-5734-treated animals were] 0 .\" and Rev(r) = [HIGHER]. This implicitly reverses the ordering direction of I and C without changing the P and O." |
|
}, |
|
"FIGREF3": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Architecture of CLM pre-training forEBM-Net. (ori.: original; adv.: adversarial; inst.: instance) in BERT(Devlin et al., 2019), but differentiates from it in that: (1) EBM-Net masks out phrases R that suggest comparative results and predicts a specific set of comparative labels C;(2) EBM-Net is also pre-trained on adversarial examples generated by comparison rules from the original examples.During pre-training, EBM-Net takes as input the concatenation of background B and the corre-sponding partially disentangled implicit evidence E, i.e.: Input = [[CLS], B, [SEP], E, [SEP]], where [CLS] and [SEP] are the special classification and separation tokens used in the original BERT and E \u2208 {E dis , E rev }. B and E are associated with two different segment types. The special [MASK] tokens are only used as placeholders for the masked out R." |
|
}, |
|
"FIGREF4": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "During fine-tuning, EBM-Net takes as input the [[CLS], B, [SEP], E exp , [SEP]], where E exp denotes the explicit evidence in the downstream datasets of the proposed CTRP task. For example, E exp = [I, [SEP], O, [SEP]" |
|
}, |
|
"FIGREF5": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Figure 3" |
|
}, |
|
"FIGREF6": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "Left: EBM-Net 3-way macro-F1 v.s. pretraining sizes compared to BioBERT; Right: EBM-Net and BioBERT 3-way macro-F1 v.s. fine-tuning sizes." |
|
}, |
|
"FIGREF7": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "T-SNE visualizations of EBM-Net representations of Evidence Integration test set instances. Red colored , blue colored , and green colored refer to the corresponding R equaling \u2191, \u2193 and \u2192, respectively." |
|
}, |
|
"FIGREF8": { |
|
"uris": null, |
|
"type_str": "figure", |
|
"num": null, |
|
"text": "The frequency (in log scale) of different comparative language modeling labels. Red, green and blue colors denote the corresponding label expressing superiority, equality and inferiority, respectively." |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"text": "Several examples of implicit evidence. Red, violet and blue denote superiority, equality and inferiority.", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF2": { |
|
"type_str": "table", |
|
"text": "is a manually-curated vocabulary for such labels and |C| = 34. Each element can be mapped to its antonym in C by a reversing function Rev: e.g.: Rev([SMALLER]) = [GREATER] and Rev([NODIFF]) = [NODIFF]. This enables us to generate adversarial examples used below.", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF5": { |
|
"type_str": "table", |
|
"text": "Main results on the benchmark Evidence Integration dataset. |\u2206| denotes the absolute value of relative accuracy decrease from the standard to the adversarial setting. All numbers are percentages. (w/o: without)", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF6": { |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table><tr><td>Statistic</td><td colspan=\"2\">Pre-training (original) Standard Evidence Integration (training)</td></tr><tr><td>Avg. Length of B</td><td>129.0</td><td>182.8</td></tr><tr><td>Avg. Length of E</td><td>16.8</td><td>16.7*</td></tr><tr><td>Avg. Length of I</td><td>NA</td><td>5.7</td></tr><tr><td>Avg. Length of C</td><td>NA</td><td>3.7</td></tr><tr><td>Avg. Length of O</td><td>NA</td><td>5.3</td></tr><tr><td>% of \u2191</td><td>49.9</td><td>31.4</td></tr><tr><td>% of \u2192</td><td>29.9</td><td>44.3</td></tr><tr><td>% of \u2193</td><td>20.2</td><td>24.3</td></tr></table>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF7": { |
|
"type_str": "table", |
|
"text": "This table shows the statistics of the pre-training and the downstream Evidence Integration dataset. *Concatenation of I, C, O and two [SEP] tokens.", |
|
"content": "<table/>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF8": { |
|
"type_str": "table", |
|
"text": "Several examples of section name to background/method or result/conclusion mapping.", |
|
"content": "<table><tr><td>1</td><td>[SIMILAR]</td><td>[MORE] [FEWER]</td><td>[HIGHER] [LOWER]</td><td>[MORE] [LESS]</td><td>[NODIFF]</td><td>[GREATER] [SMALLER]</td><td>[BETTER] [WORSE]</td><td>[LARGER] [SMALLER]</td><td>[LONGER] [SHORTER]</td><td>[STRONGER] [WEAKER]</td><td>[FASTER] [SLOWER]</td><td>[LATER] [EARLIER]</td><td>[FURTHER] [CLOSER]</td><td>[OLDER] [YOUNGER]</td><td>[RICHER] [POORER]</td><td>[WIDER] [NARROWER]</td><td>[BROADER] [NARROWER]</td><td>[BIGGER] [SMALLER]</td><td>[DEEPER] [SHALLOWER]</td><td>[COMMONER] [RARER]</td></tr></table>", |
|
"num": null, |
|
"html": null |
|
}, |
|
"TABREF10": { |
|
"type_str": "table", |
|
"text": "Several examples of comparative language modeling instances.", |
|
"content": "<table><tr><td>B</td></tr></table>", |
|
"num": null, |
|
"html": null |
|
} |
|
} |
|
} |
|
} |