ACL-OCL / Base_JSON /prefixC /json /clinicalnlp /2022.clinicalnlp-1.8.json
Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "2022",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T12:27:44.129620Z"
},
"title": "Learning to Ask Like a Physician",
"authors": [
{
"first": "Eric",
"middle": [],
"last": "Lehman",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Vladislav",
"middle": [],
"last": "Lialin",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Massachusetts Lowell",
"location": {}
},
"email": ""
},
{
"first": "Katelyn",
"middle": [
"Y"
],
"last": "Legaspi",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of the Philippines",
"location": {}
},
"email": ""
},
{
"first": "Janelle",
"middle": [
"R"
],
"last": "Sy",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "UERM Memorial Medical Center",
"location": {}
},
"email": ""
},
{
"first": "Patricia",
"middle": [],
"last": "Therese",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "S",
"middle": [],
"last": "Pile",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "UERM Memorial Medical Center",
"location": {}
},
"email": ""
},
{
"first": "Nicole",
"middle": [],
"last": "Rose",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "I",
"middle": [],
"last": "Alberto",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of the Philippines",
"location": {}
},
"email": ""
},
{
"first": "Raymund",
"middle": [
"R"
],
"last": "Richard",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "",
"middle": [],
"last": "Ragasa",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of the Philippines",
"location": {}
},
"email": ""
},
{
"first": "Victoria",
"middle": [
"M"
],
"last": "Corinna",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "",
"middle": [],
"last": "Puyat",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of the Philippines",
"location": {}
},
"email": ""
},
{
"first": "Isabelle",
"middle": [],
"last": "Rose",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Pia",
"middle": [],
"last": "Gabrielle",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "I",
"middle": [],
"last": "Alfonso",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of the Philippines",
"location": {}
},
"email": ""
},
{
"first": "Marianne",
"middle": [],
"last": "Tali\u00f1o",
"suffix": "",
"affiliation": {
"laboratory": "ASMPH",
"institution": "",
"location": {}
},
"email": ""
},
{
"first": "Dana",
"middle": [],
"last": "Moukheiber",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Byron",
"middle": [
"C"
],
"last": "Wallace",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Northeastern University",
"location": {}
},
"email": ""
},
{
"first": "Anna",
"middle": [],
"last": "Rumshisky",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Massachusetts Lowell",
"location": {}
},
"email": ""
},
{
"first": "Jennifer",
"middle": [
"J"
],
"last": "Liang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "IBM Research",
"location": {}
},
"email": ""
},
{
"first": "Preethi",
"middle": [],
"last": "Raghavan",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Leo",
"middle": [
"Anthony"
],
"last": "Celi",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Peter",
"middle": [],
"last": "Szolovits",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "",
"middle": [],
"last": "Mit",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Mit-Ibm",
"middle": [],
"last": "Watson",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "A",
"middle": [
"I"
],
"last": "Lab",
"suffix": "",
"affiliation": {},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "Existing question answering (QA) datasets derived from electronic health records (EHR) are artificially generated and consequently fail to capture realistic physician information needs. We present Discharge Summary Clinical Questions (DiSCQ), a newly curated question dataset composed of 2,000+ questions paired with the snippets of text (triggers) that prompted each question. The questions are generated by medical experts from 100+ MIMIC-III discharge summaries. We analyze this dataset to characterize the types of information sought by medical experts. We also train baseline models for trigger detection and question generation (QG), paired with unsupervised answer retrieval over EHRs. Our baseline model is able to generate high quality questions in over 62% of cases when prompted with human selected triggers. We release this dataset (and all code to reproduce baseline model results) to facilitate further research into realistic clinical QA and QG. 1",
"pdf_parse": {
"paper_id": "2022",
"_pdf_hash": "",
"abstract": [
{
"text": "Existing question answering (QA) datasets derived from electronic health records (EHR) are artificially generated and consequently fail to capture realistic physician information needs. We present Discharge Summary Clinical Questions (DiSCQ), a newly curated question dataset composed of 2,000+ questions paired with the snippets of text (triggers) that prompted each question. The questions are generated by medical experts from 100+ MIMIC-III discharge summaries. We analyze this dataset to characterize the types of information sought by medical experts. We also train baseline models for trigger detection and question generation (QG), paired with unsupervised answer retrieval over EHRs. Our baseline model is able to generate high quality questions in over 62% of cases when prompted with human selected triggers. We release this dataset (and all code to reproduce baseline model results) to facilitate further research into realistic clinical QA and QG. 1",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Physicians often query electronic health records (EHR) to make fully informed decisions about patient care (Demner-Fushman et al., 2009) . However, D'Alessandro et al. (2004) found that it takes an average of 8.3 minutes to answer a single question, even when physicians are trained to retrieve information from an EHR platform. Natural language technologies such as automatic question answering (QA) may partially address this problem.",
"cite_spans": [
{
"start": 107,
"end": 136,
"text": "(Demner-Fushman et al., 2009)",
"ref_id": "BIBREF14"
},
{
"start": 139,
"end": 174,
"text": "However, D'Alessandro et al. (2004)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "There have been several dataset collection efforts that aim to facilitate the training and evaluation of clinical QA models (Pampari et al., 2018; Yue et al., 2021; Raghavan et al., 2021; Kell et al., 2021) . However, template-based (Pampari et al., 2018; Raghavan et al., 2021) and other kinds of automated generation (Yue et al., 2021) are by nature brittle and have limited evidence of producing questions that medical professionals ask.",
"cite_spans": [
{
"start": 124,
"end": 146,
"text": "(Pampari et al., 2018;",
"ref_id": "BIBREF43"
},
{
"start": 147,
"end": 164,
"text": "Yue et al., 2021;",
"ref_id": "BIBREF66"
},
{
"start": 165,
"end": 187,
"text": "Raghavan et al., 2021;",
"ref_id": null
},
{
"start": 188,
"end": 206,
"text": "Kell et al., 2021)",
"ref_id": "BIBREF26"
},
{
"start": 233,
"end": 255,
"text": "(Pampari et al., 2018;",
"ref_id": "BIBREF43"
},
{
"start": 256,
"end": 278,
"text": "Raghavan et al., 2021)",
"ref_id": null
},
{
"start": 319,
"end": 337,
"text": "(Yue et al., 2021)",
"ref_id": "BIBREF66"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Datasets such as emrQA (Pampari et al., 2018) and emrKBQA (Raghavan et al., 2021) attempt to simulate physician queries by defining templates derived from actual questions posed by physicians and then performing slot-filling with clinical entities. This method yields questions that are structurally realistic, but not consistently medically relevant. Yue et al. (2020) found that sampling just 5% of the emrQA questions was sufficient for training a model. They further note that 96% of the questions in a subsection of emrQA contain key phrases that overlap with those in the selected answer.",
"cite_spans": [
{
"start": 23,
"end": 45,
"text": "(Pampari et al., 2018)",
"ref_id": "BIBREF43"
},
{
"start": 58,
"end": 81,
"text": "(Raghavan et al., 2021)",
"ref_id": null
},
{
"start": 352,
"end": 369,
"text": "Yue et al. (2020)",
"ref_id": "BIBREF65"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "In follow-up work, Yue et al. (2021) provide a new dataset of 975 questions generated using a diverse question generation model with a human-inthe-loop and 312 questions generated by medical experts from scratch, with the caveat that they must be answerable on the given discharge summary. However, a random sample of 100 questions from the former reveals that 96% of the 975 questions were slot-filled templates directly from emrQA. A separate random sample of 100 questions from the latter set reveals that 54% of the questions also use the same slot-filled templates from emrQA. Similarly, we find that 85% of the machine-generated questions and 75% of the human-generated questions contain the exact same key phrases as in the selected answer. Although Yue et al. (2020) does not discuss how they prompt physician questions, our analysis strongly suggests that even in the case of questions \"written\" by physicians, answer spans are likely identified in advance; this significantly constrains the set of questions a medical professional can ask.",
"cite_spans": [
{
"start": 19,
"end": 36,
"text": "Yue et al. (2021)",
"ref_id": "BIBREF66"
},
{
"start": 757,
"end": 774,
"text": "Yue et al. (2020)",
"ref_id": "BIBREF65"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "To address this paucity of natural, clinically relevant questions, we collect queries that might plausibly be asked by healthcare providers during patient handoff (i.e., transitions of care). We use patient discharge summaries from the Medical Information Mart for Intensive Care III (MIMIC-III) English dataset (Johnson et al., 2016) to mimic the handoff process. We expect this process to produce more natural questions than prior work. We work with 10 medical experts of varying skill levels. We ask them to review a given discharge summary as the receiving physician in a patient handoff and record any questions they have as well as the piece of text within the discharge summary (trigger) that prompted the question. A sample of questions and corresponding triggers can be seen in Figure 1 .",
"cite_spans": [
{
"start": 312,
"end": 334,
"text": "(Johnson et al., 2016)",
"ref_id": "BIBREF24"
}
],
"ref_spans": [
{
"start": 787,
"end": 795,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We train question trigger detection and question generation (QG) models on DiSCQ, paired with unsupervised answer retrieval over the EHR. Finally, we propose a new set of guidelines for human evaluation of clinical questions and evaluate the performance of our pipeline using these guidelines. Concretely, our contributions are summarized as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 We work with 10 medical experts to compile DiSCQ, a new dataset of 2000+ questions and 1000+ triggers from over 100+ discharge summaries, providing an important new resource for research in clinical NLP.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 We demonstrate the dataset's utility by training baseline models for trigger detection and question generation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "\u2022 We develop novel guidelines for human evaluation of clinical questions. Our experiments show that widely used automated QG metrics do not correlate with human-evaluated question quality.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "2 Related Work",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Clinical information retrieval, and in particular clinical question answering, is a challenging research task with direct potential applications in clinical practice. Several dataset collection efforts gather consumer health questions and pair them with answers from sources like WebMD and PubMED (Yu et al., 2007; Cao et al., 2011; Abacha and Zweigenbaum, 2015; Abacha et al., 2017; Zahid et al., 2018; Savery et al., 2020; Zhu et al., 2020; Abacha et al., 2019) . Likewise, Suster and Daelemans (2018) automatically generate 100,000+ information retrieval queries from over 11,000+ BMJ Case Reports. While these resources are helpful in testing a model's understanding and information retrieval ability on biomedical texts, these datasets consist of broad medical questions asked by the general population. Doctors will not only ask more specific and targeted questions, but also query the EHR to make fully informed decisions about patient care. The number of publicly available QA datasets derived from EHR systems is quite limited due to the labor intensiveness and high skill requirement needed to create such a dataset. As mentioned previously, to help alleviate this dearth of clinical questions, Pampari et al. (2018) introduced emrQA, a QA dataset constructed from templatized physician queries slot-filled with n2c2 annotations. 2 Fan (2019) extended emrQA by explicitly focusing on \"why\" questions. Soni et al. (2019) introduced a novel approach for constructing clinical questions that can be slot-filled into logical-forms. Yue et al. (2021) applied an emrQA-trained question generation model paired with a human-in-the-loop to collect 1287 questions conditioned on and answerable from the given context.",
"cite_spans": [
{
"start": 297,
"end": 314,
"text": "(Yu et al., 2007;",
"ref_id": "BIBREF64"
},
{
"start": 315,
"end": 332,
"text": "Cao et al., 2011;",
"ref_id": "BIBREF10"
},
{
"start": 333,
"end": 362,
"text": "Abacha and Zweigenbaum, 2015;",
"ref_id": "BIBREF2"
},
{
"start": 363,
"end": 383,
"text": "Abacha et al., 2017;",
"ref_id": "BIBREF0"
},
{
"start": 384,
"end": 403,
"text": "Zahid et al., 2018;",
"ref_id": "BIBREF67"
},
{
"start": 404,
"end": 424,
"text": "Savery et al., 2020;",
"ref_id": "BIBREF51"
},
{
"start": 425,
"end": 442,
"text": "Zhu et al., 2020;",
"ref_id": "BIBREF69"
},
{
"start": 443,
"end": 463,
"text": "Abacha et al., 2019)",
"ref_id": "BIBREF1"
},
{
"start": 476,
"end": 503,
"text": "Suster and Daelemans (2018)",
"ref_id": "BIBREF58"
},
{
"start": 1205,
"end": 1226,
"text": "Pampari et al. (2018)",
"ref_id": "BIBREF43"
},
{
"start": 1411,
"end": 1429,
"text": "Soni et al. (2019)",
"ref_id": "BIBREF56"
},
{
"start": 1538,
"end": 1555,
"text": "Yue et al. (2021)",
"ref_id": "BIBREF66"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Clinical Question Datasets",
"sec_num": "2.1"
},
{
"text": "In contrast, in our data collection process we do not restrict the medical expert to ask only questions answerable from a particular part of the discharge summary. This leads to more diverse and natural questions. Additionally, in DiSCQ each question is associated with a span of text that triggered the question.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Clinical Question Datasets",
"sec_num": "2.1"
},
{
"text": "Question Generation (QG) is a challenging task that requires a combination of reading comprehen-",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Question Generation",
"sec_num": "2.2"
},
{
"text": "Provider: Jane Doe, MD (1) Identification of triggers",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discharge Summary",
"sec_num": null
},
{
"text": "(2) Generation of questions given the discharge summary and trigger \"stent was placed and patient was treated with antibiotics\"",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discharge Summary",
"sec_num": null
},
{
"text": "Were antibiotics given?",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discharge Summary",
"sec_num": null
},
{
"text": "(3) Unsupervised retrieval of answer",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "QG Model",
"sec_num": null
},
{
"text": "Unstructured EHR Figure 2 : Schematic of the pipeline process used to generate and answer questions.",
"cite_spans": [],
"ref_spans": [
{
"start": 17,
"end": 25,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Discharge Summary",
"sec_num": null
},
{
"text": "sion and text generation. Successful QG models may aid in education (Heilman and Smith, 2010; Du et al., 2017) , creating dialogue systems or chatbots (Shang et al., 2015; Mostafazadeh et al., 2016; Shum et al., 2018) , building datasets or improving question answering models through data augmentation Dong et al., 2019; Puri et al., 2020; Yue et al., 2021) .",
"cite_spans": [
{
"start": 68,
"end": 93,
"text": "(Heilman and Smith, 2010;",
"ref_id": "BIBREF22"
},
{
"start": 94,
"end": 110,
"text": "Du et al., 2017)",
"ref_id": "BIBREF18"
},
{
"start": 151,
"end": 171,
"text": "(Shang et al., 2015;",
"ref_id": "BIBREF54"
},
{
"start": 172,
"end": 198,
"text": "Mostafazadeh et al., 2016;",
"ref_id": "BIBREF37"
},
{
"start": 199,
"end": 217,
"text": "Shum et al., 2018)",
"ref_id": "BIBREF55"
},
{
"start": 303,
"end": 321,
"text": "Dong et al., 2019;",
"ref_id": "BIBREF17"
},
{
"start": 322,
"end": 340,
"text": "Puri et al., 2020;",
"ref_id": "BIBREF46"
},
{
"start": 341,
"end": 358,
"text": "Yue et al., 2021)",
"ref_id": "BIBREF66"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Discharge Summary",
"sec_num": null
},
{
"text": "Most QG approaches can be broken down into either rule-based or neural methods. Rule-based approaches often involve slot filling templatized questions (Heilman and Smith, 2010; Mazidi and Nielsen, 2014; Labutov et al., 2015; Chali and Hasan, 2015; Pampari et al., 2018) . While often effective at generating numerous questions, these methods are very rigid, as virtually any domain change requires a new set of rules. This problem is particularly important in medical QG, as different types of practices may focus on varying aspects of a patient and therefore ask different questions.",
"cite_spans": [
{
"start": 151,
"end": 176,
"text": "(Heilman and Smith, 2010;",
"ref_id": "BIBREF22"
},
{
"start": 177,
"end": 202,
"text": "Mazidi and Nielsen, 2014;",
"ref_id": "BIBREF36"
},
{
"start": 203,
"end": 224,
"text": "Labutov et al., 2015;",
"ref_id": "BIBREF28"
},
{
"start": 225,
"end": 247,
"text": "Chali and Hasan, 2015;",
"ref_id": "BIBREF12"
},
{
"start": 248,
"end": 269,
"text": "Pampari et al., 2018)",
"ref_id": "BIBREF43"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Discharge Summary",
"sec_num": null
},
{
"text": "Compared to rule-based methods, sequence-tosequence models (Serban et al., 2016; Du et al., 2017) and more recently transformer-based models (Dong et al., 2019; Qi et al., 2020; Lelkes et al., 2021; Murakhovs'ka et al., 2021; Luo et al., 2021) allow for generation of more diverse questions and can potentially mitigate the problem of domain generalization via large-scale pre-training (Brown et al., 2020) or domain adaptation techniques. We choose to train both BART (Lewis et al., 2020) and T0 (Sanh et al., 2021) models for the task of question generation due to their high performance and ability to generalize to new tasks.",
"cite_spans": [
{
"start": 59,
"end": 80,
"text": "(Serban et al., 2016;",
"ref_id": "BIBREF53"
},
{
"start": 81,
"end": 97,
"text": "Du et al., 2017)",
"ref_id": "BIBREF18"
},
{
"start": 141,
"end": 160,
"text": "(Dong et al., 2019;",
"ref_id": "BIBREF17"
},
{
"start": 161,
"end": 177,
"text": "Qi et al., 2020;",
"ref_id": "BIBREF47"
},
{
"start": 178,
"end": 198,
"text": "Lelkes et al., 2021;",
"ref_id": "BIBREF30"
},
{
"start": 199,
"end": 225,
"text": "Murakhovs'ka et al., 2021;",
"ref_id": "BIBREF39"
},
{
"start": 226,
"end": 243,
"text": "Luo et al., 2021)",
"ref_id": "BIBREF35"
},
{
"start": 386,
"end": 406,
"text": "(Brown et al., 2020)",
"ref_id": "BIBREF25"
},
{
"start": 469,
"end": 489,
"text": "(Lewis et al., 2020)",
"ref_id": "BIBREF31"
},
{
"start": 497,
"end": 516,
"text": "(Sanh et al., 2021)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Discharge Summary",
"sec_num": null
},
{
"text": "We work with 10 medical experts of varying skill levels, ranging from senior medical students to practicing MDs, to construct a dataset of 2029 questions over 100+ discharge summaries from MIMIC-III (Johnson et al., 2016).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "DiSCQ Dataset",
"sec_num": "3"
},
{
"text": "The goal of our question collection is to gather questions that may be asked by healthcare providers during patient handoff (i.e., transitions of care). We use the patient discharge summary to simulate the handoff process, 3 where the discharge summary is the communication from the previous physician regarding the patient's care, treatment and current status. Annotators are asked to review the discharge summary as the receiving physician and ask any questions they may have as the physician taking over the care of this patient.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dataset Collection",
"sec_num": "3.1"
},
{
"text": "Annotators are instructed to read the discharge summary line-by-line and record (1) any questions that may be important with respect to the patient's future care, and, (2) the text within the note that triggered the question. This may mean that questions asked early on may be answered later in the discharge summary. Annotators are permitted to go back and ask questions if they feel the need to do so. To capture the annotators' natural thought processes, we purposely provide only minimal guidance to annotators on how to select a trigger or what type of questions to ask. We only ask that annotators use the minimum span of text when specifying a trigger. 4 We also encourage all questions to be asked in whatever format they feel appropriate. This leads to many informal queries, in which questions are incomplete or grammatically incorrect (Figure 1 ). Further, we encourage all types of questions to be asked, regardless of whether they could be answered based on the EHR. We also allow the annotators to ask an arbitrary number of questions. This allows for annotators to skip discharge summaries entirely should they not have any questions.",
"cite_spans": [
{
"start": 660,
"end": 661,
"text": "4",
"ref_id": null
}
],
"ref_spans": [
{
"start": 846,
"end": 855,
"text": "(Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Dataset Collection",
"sec_num": "3.1"
},
{
"text": "The trigger/question pairs are generated over entire discharge summaries. We instruct annotators to select the minimum span that they used as the trigger to their question; this leads to triggers of length 5.0 \u00b1 14.1 tokens. We additionally find that there are 1.86 \u00b1 1.56 questions per trigger. As mentioned previously, we encourage our medical experts to ask questions however they feel most comfortable. This led to a wide variety in how questions were asked, with some entirely self-contained (46%), others requiring the trigger for understanding (46%), and some requiring the entire sentence containing the trigger to comprehend (8%). 5 We also observe that 59% of the bi-grams in our questions are unique (i.e., over half of all bi-grams that appear in one question are not seen in any other question), demonstrating the diversity of how our questions are asked (Table 1) . We additionally examine where in the discharge summary annotators tend to select triggers from. We find that a majority of triggers are selected from the Hospital Course (13%) and History of Present Illness (39%) sections. This is unsurprising, as these are the narrative sections of the note where the patient's history prior to admission and their medical care during hospitalization are described. Further, we find that a majority of triggers selected are either a Problem or Sign/Symptom (Figure 3 ). This aligns with our intuition, as clinicians are often trained to organize patient information from a problem-oriented perspective. Moreover, developing a differential diagnosis usually begins with gathering details of the patient's clinical presentation.",
"cite_spans": [
{
"start": 640,
"end": 641,
"text": "5",
"ref_id": null
}
],
"ref_spans": [
{
"start": 868,
"end": 877,
"text": "(Table 1)",
"ref_id": "TABREF4"
},
{
"start": 1372,
"end": 1381,
"text": "(Figure 3",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Dataset Statistics",
"sec_num": "3.2"
},
{
"text": "In Figure 4 , we examine the types of information needs exhibited by our questions. We find that 83% and 80% of the questions cate- 5 Based on a sample of 100 questions. gorized as Sign/Symptom and Problem, respectively, stem from the same category of trigger. Sign/Symptom questions generated from Sign/Symptom triggers are usually asking about associated symptoms (e.g., Trigger: dysuria; Question: Any perineal rash or irritation?) or additional details about the trigger (e.g., onset, timing). Similarly, Problem questions generated from Problem triggers are usually asking about associated comorbid conditions or additional details of a diagnosis (e.g., date of diagnosis, severity). We interestingly find that 62% of the Treatment questions and 56% of the Test Results questions are derived from triggers of type Problem. This can be attributed to diagnostic tests being used to monitor disease progression and treatment questions asking about how a problem is managed. As a soundness check, we randomly sample 100 questions from our dataset and find that only 22% of them directly map to emrQA templates. Of the 22 that match, 17 of them map directly to |problem|? and |test|?. Additionally, we sample 100 questions to determine where a physician would hypothetically search the EHR should they choose to find the answers to these questions. 6 We find that one of the authors, a physician, would search external resources 3% of the time, the structured data 20% of the time and both the structured and unstructured data 21% of the time. The remaining 56% of questions would be answered solely from unstructured EHR data. This differs significantly from both emrQA and CliniQG4QA, in which all questions are answerable using unstructured EHR data.",
"cite_spans": [
{
"start": 132,
"end": 133,
"text": "5",
"ref_id": null
},
{
"start": 1349,
"end": 1350,
"text": "6",
"ref_id": null
}
],
"ref_spans": [
{
"start": 3,
"end": 11,
"text": "Figure 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Dataset Statistics",
"sec_num": "3.2"
},
{
"text": "As mentioned previously, we provide only minimal guidance on how to select a trigger or what type of question to ask, in order to capture the annotators' natural thought processes. The task is purposely presented in an open-ended fashion to encourage natural questions. This may lead to situations in which two annotators examining the same discharge summary focus on entirely different aspects of the patient. Such a scenario is likely to be common, as if most experts agree that a piece of information is important, then it would likely already be in the discharge summary. We can attempt to measure this variation between medical experts by calculating trigger level agreement in documents annotated by two different annotators (roughly 50% of discharge summaries in DiSCQ). We find a Cohen Kappa of 0.08. 7 This lower agreement can be expected, as different spans can express the same information due to information redundancy in clinical notes. Furthermore, clinical reasoning is not a linear process; therefore, different triggers can lead to the same question. For example, an expression of elevated blood pressure (\"blood pressure of 148 to 162/45 to 54\") and a diagnosis of hypertension (\"Hypertension\") led two annotators to both ask about the patient's normal blood pressure range. We do not measure agreement of questions asked, as this is an inherently subjective task and questions are asked because of differences between medical experts.",
"cite_spans": [
{
"start": 809,
"end": 810,
"text": "7",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Dataset Statistics",
"sec_num": "3.2"
},
{
"text": "We consider the task of generating questions that are relevant to a patient's care, given a discharge summary and a trigger. Afterwards, we attempt to find answers to these generated questions (Figure 2) . We also examine model performance for when the trigger is not provided and must instead be predicted. The task of generating questions without triggers can be viewed similarly to answer-agnostic question generation. We take a similar approach to (Subramanian et al., 2018) , in which we implement a pipeline system that first selects key phrases from the passage and then generates questions about the selected key phrases.",
"cite_spans": [
{
"start": 452,
"end": 478,
"text": "(Subramanian et al., 2018)",
"ref_id": "BIBREF57"
}
],
"ref_spans": [
{
"start": 193,
"end": 203,
"text": "(Figure 2)",
"ref_id": null
}
],
"eq_spans": [],
"section": "Task Setup",
"sec_num": "4"
},
{
"text": "While a majority of past works attempt to ensure that the generated question is answerable (Nema et al., 2019; Pan et al., 2020; Wang et al., 2020a; Huang et al., 2021) , we do not impose this constraint. In fact, we argue that the ability to generate unanswerable questions is necessary for realworld applications, as a question answering system should be able to identify such questions. These questions can be used as hard-negatives to train and calibrate QA systems.",
"cite_spans": [
{
"start": 91,
"end": 110,
"text": "(Nema et al., 2019;",
"ref_id": "BIBREF40"
},
{
"start": 111,
"end": 128,
"text": "Pan et al., 2020;",
"ref_id": "BIBREF44"
},
{
"start": 129,
"end": 148,
"text": "Wang et al., 2020a;",
"ref_id": "BIBREF61"
},
{
"start": 149,
"end": 168,
"text": "Huang et al., 2021)",
"ref_id": "BIBREF23"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Task Setup",
"sec_num": "4"
},
{
"text": "Pre-trained transformers have become ubiquitous in many natural language processing tasks (Devlin et al., 2019; Raffel et al., 2020; Sanh et al., 2021) , including natural language generation (Lewis et al., 2020; Bao et al., 2020) . Additionally, large-scale transformers have demonstrated the importance of parameter count for both upstream (Kaplan et al., 2020) and downstream tasks, especially in lowresource settings (Brown et al., 2020; Sanh et al., 2021) . As these results were mainly shown in nonclinical general domains, we find it important to evaluate both medium-sized and large models.",
"cite_spans": [
{
"start": 90,
"end": 111,
"text": "(Devlin et al., 2019;",
"ref_id": "BIBREF16"
},
{
"start": 112,
"end": 132,
"text": "Raffel et al., 2020;",
"ref_id": "BIBREF48"
},
{
"start": 133,
"end": 151,
"text": "Sanh et al., 2021)",
"ref_id": null
},
{
"start": 192,
"end": 212,
"text": "(Lewis et al., 2020;",
"ref_id": "BIBREF31"
},
{
"start": 213,
"end": 230,
"text": "Bao et al., 2020)",
"ref_id": "BIBREF6"
},
{
"start": 342,
"end": 363,
"text": "(Kaplan et al., 2020)",
"ref_id": "BIBREF25"
},
{
"start": 421,
"end": 441,
"text": "(Brown et al., 2020;",
"ref_id": "BIBREF25"
},
{
"start": 442,
"end": 460,
"text": "Sanh et al., 2021)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "5"
},
{
"text": "We formulate trigger detection as a tagging problem, for which we fine-tune ClinicalBERT (Alsentzer et al., 2019) . For question generation, we experiment with both BART (406M parameters) (Lewis et al., 2020) and T0 (11B parameters) (Sanh et al., 2021) . Question generation is formulated as a conditional generation problem and modelled via a sequence-to-sequence approach. During evaluation, we use greedy sampling to produce generated text.",
"cite_spans": [
{
"start": 89,
"end": 113,
"text": "(Alsentzer et al., 2019)",
"ref_id": "BIBREF3"
},
{
"start": 188,
"end": 208,
"text": "(Lewis et al., 2020)",
"ref_id": "BIBREF31"
},
{
"start": 233,
"end": 252,
"text": "(Sanh et al., 2021)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "5"
},
{
"text": "Reducing context size Due to memory constraints and the limited sequence length of pretrained models, we only select the part of the discharge summary containing the trigger. This is done in two possible ways: (1) extracting the sentence 8 with the trigger or multiple sentences if a trigger spans across sentence boundaries or (2) extracting a chunk of size 512 containing the trigger in it. To check if this context is actually used by the models we also fine-tune BART without extra discharge summary context (trigger text only).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "5"
},
{
"text": "Handling multiple questions 41% of the DiSCQ examples have multiple questions per trigger. Sometimes the questions depend on each other:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "5"
},
{
"text": "\u2022 What meds was used? dosage? and route of administration?",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "5"
},
{
"text": "\u2022 Any culture done? What were the findings?",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "5"
},
{
"text": "For this reason, we train and evaluate models in two different setups: split questions (by the ?-symbol) and combined questions. While the split-questions format might be more comparable to pre-existing work, the combined-questions setting likely models more realistic behavior of medical professionals.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "5"
},
{
"text": "Prompting Schick and Sch\u00fctze (2021) demonstrate that adding natural language instructions to the model input can significantly improve model quality. The area of prompting has recently gained widespread popularity ) and has had particular success in low-supervision scenarios (Schick and Sch\u00fctze, 2021) . T0 (Sanh et al., 2021) is a fine-tuned T5 (Raffel et al., 2020) model trained on 64 datasets and prompts from the Public Pool of Prompts (Bach et al., 2022) . Given a trigger and some context from the discharge summary, we finetune T0++ and BART with the following prompt: \"{context}After reading the above EMR, what question do you have about \"{trigger}\"? Question:\".",
"cite_spans": [
{
"start": 276,
"end": 302,
"text": "(Schick and Sch\u00fctze, 2021)",
"ref_id": "BIBREF52"
},
{
"start": 308,
"end": 327,
"text": "(Sanh et al., 2021)",
"ref_id": null
},
{
"start": 347,
"end": 368,
"text": "(Raffel et al., 2020)",
"ref_id": "BIBREF48"
},
{
"start": 434,
"end": 461,
"text": "Prompts (Bach et al., 2022)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "5"
},
{
"text": "We split 2029 questions into train (70%), validation (10%) and test (20%) sets 9 and fine-tune the models as described in Section 5. To evaluate trigger detection, we use token-level precision, recall and F1 score. For automated evaluation of question generation we use ROUGE-L (Lin, 2004) , METEOR (Banerjee and Lavie, 2005) and BERTScore metrics. To monitor the diversity of generated questions, we measure the fraction of unique questions on the evaluation set. As the question generation task has high variability of plausible generations, the utility of automatic metrics is debatable due to poor correlation with human evaluation (Callison-Burch et al., 2006; Novikova et al., 2017; Elliott and Keller, 2014; Bhandari et al., 2020) . For this reason, we additionally perform human evaluation (Section 7). 9 We use a document level split.",
"cite_spans": [
{
"start": 278,
"end": 289,
"text": "(Lin, 2004)",
"ref_id": "BIBREF33"
},
{
"start": 299,
"end": 325,
"text": "(Banerjee and Lavie, 2005)",
"ref_id": "BIBREF5"
},
{
"start": 636,
"end": 665,
"text": "(Callison-Burch et al., 2006;",
"ref_id": "BIBREF9"
},
{
"start": 666,
"end": 688,
"text": "Novikova et al., 2017;",
"ref_id": "BIBREF42"
},
{
"start": 689,
"end": 714,
"text": "Elliott and Keller, 2014;",
"ref_id": "BIBREF20"
},
{
"start": 715,
"end": 737,
"text": "Bhandari et al., 2020)",
"ref_id": "BIBREF7"
},
{
"start": 811,
"end": 812,
"text": "9",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "6"
},
{
"text": "As mentioned in Section 3, we collect triggers for each question asked. We train a simple Clinical-BERT model to predict whether or not each tokenpiece is a trigger. To ground these results, we additionally use ScispaCy Large (Neumann et al., 2019) to tag and classify all clinical entities as triggers. Results are shown in Table 2 . We see that our model exhibits poor performance likely due to the fact that there is low agreement between annotators about which spans to highlight when asking questions.",
"cite_spans": [
{
"start": 226,
"end": 248,
"text": "(Neumann et al., 2019)",
"ref_id": "BIBREF41"
}
],
"ref_spans": [
{
"start": 325,
"end": 332,
"text": "Table 2",
"ref_id": "TABREF6"
}
],
"eq_spans": [],
"section": "Trigger detection",
"sec_num": "6.1"
},
{
"text": "Automated metrics for question generation experiments are available in Table 4 . While generation diversity changes significantly between different models, ranging from 30% of unique questions to 79%, METEOR, ROUGE-L and BERTScore show very similar and low performance across the board.",
"cite_spans": [],
"ref_spans": [
{
"start": 71,
"end": 78,
"text": "Table 4",
"ref_id": "TABREF9"
}
],
"eq_spans": [],
"section": "Question generation",
"sec_num": "6.2"
},
{
"text": "However, upon observation, many of the generated questions seem reasonable (Table 3) , suggesting that these metrics might not fit the task. We hypothesize that this is caused by two reasons: (1) the short length of our questions and (2) a high number of potentially reasonable questions that could be generated. As we observe during the data collection process, different annotators seem to ask different questions despite citing the same trigger. For these reasons, human evaluation (Section 7) might be a more appropriate approach for testing the quality of these models.",
"cite_spans": [],
"ref_spans": [
{
"start": 75,
"end": 84,
"text": "(Table 3)",
"ref_id": "TABREF8"
}
],
"eq_spans": [],
"section": "Question generation",
"sec_num": "6.2"
},
{
"text": "In addition to identifying triggers and generating questions, we attempt to find answers to these questions. We only consider the unstructured portion of the EHR data. We train a ClinicalBERT model on emrQA augmented with unanswerable questions via negative sampling (Liang et al., 2022) . Due to the question's frequent dependency on the trigger, given a trigger and a question, we prompt the model with the following text: \"With respect to {trigger}, {question}?\". We first query the remainder of the discharge summary that the question was generated from. If we are unable to find an answer with probability above some threshold 10 , we query the model on prior patient notes. We then select the highest probability span and expand it to a sentence level prediction. We always return a prediction even in cases where all sentences are equally unlikely to be the answer.",
"cite_spans": [
{
"start": 267,
"end": 287,
"text": "(Liang et al., 2022)",
"ref_id": "BIBREF32"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Answer Selection",
"sec_num": "6.3"
},
{
"text": "Human evaluation is still the most reliable way to compare generative models for diverse tasks like question generation. Common categories for question generation to consider are grammar, difficulty, answerability and fluency (Nema et al., 2019; Tuan et al., 2019; Wang et al., 2020b; Huang et al., 2021) . However, not all of these categories are relevant to clinical question generation. We evaluate questions generated using our pipeline, as well as gold standard questions on the following four categories (binary scale):",
"cite_spans": [
{
"start": 226,
"end": 245,
"text": "(Nema et al., 2019;",
"ref_id": "BIBREF40"
},
{
"start": 246,
"end": 264,
"text": "Tuan et al., 2019;",
"ref_id": "BIBREF60"
},
{
"start": 265,
"end": 284,
"text": "Wang et al., 2020b;",
"ref_id": "BIBREF62"
},
{
"start": 285,
"end": 304,
"text": "Huang et al., 2021)",
"ref_id": "BIBREF23"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Human Evaluation",
"sec_num": "7"
},
{
"text": "Understandability Can an individual familiar with medical/clinical language understand the information needs expressed, even if the question is not a complete sentence or contains grammar/spelling errors?",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Human Evaluation",
"sec_num": "7"
},
{
"text": "Nontriviality Is the question unanswerable with respect to the sentence it was triggered/generated from? A question that would be considered trivial would be \"Did the patient have a fever?\" if the context presented was \"The patient had a fever\".",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Human Evaluation",
"sec_num": "7"
},
{
"text": "Relevancy to trigger Is the trigger or the sentence containing the trigger related to the question?",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Human Evaluation",
"sec_num": "7"
},
{
"text": "Clinical meaningfulness Will the answer to this question be helpful for further treatment of this patient or understanding the patient's current condition? Or alternatively, is it reasonable that a medical professional would ask this question given the provided context?",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Human Evaluation",
"sec_num": "7"
},
{
"text": "Annotations were divided evenly between medical experts. Each question is scored independently by two different annotators. However, due to time constraints, there are no discussions between annotators about their decisions. We also ensure that annotators did not receive discharge summaries that they had seen previously. Lastly, it is important to note that annotations were assigned blindly. Annotators were informed that they would be scoring both human and machine generated questions, but were not informed about (1) where the question was generated from (i.e., human or machine) and (2) the proportion of human:machine generated questions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Human Evaluation",
"sec_num": "7"
},
{
"text": "We score questions using the tree presented in Figure 5 . In cases in which the question is both understandable and nontrivial, we additionally ask medical experts to determine whether or not the proposed answer fully answers, partially answers or is irrelevant to the question. Results can be seen in Table 5 and Table 6 .",
"cite_spans": [],
"ref_spans": [
{
"start": 47,
"end": 55,
"text": "Figure 5",
"ref_id": "FIGREF2"
},
{
"start": 302,
"end": 321,
"text": "Table 5 and Table 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Human Evaluation",
"sec_num": "7"
},
{
"text": "We evaluate performance of both the best BART and T0 model with respect to ROUGE-L score. We select 400 questions generated from each model, half of which are generated with gold triggers and the other half with predicted triggers, as described in Section 6.1. Two medical experts score each question. Due to the subjective nature of the task, we find moderate agreement between annotators with respect to scoring questions (\u03ba = 0.46) and scoring answer sufficiency (\u03ba = 0.47). We use the \"Satisfies All\" column (i.e., satisfies all four human evaluation categories) to calculate agreement between questions.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "8"
},
{
"text": "Results show that the T0 model prompted with gold triggers successfully generates a high-quality question 62.5% of the time (Table 5) . This model significantly outperforms BART when given goldstandard triggers. However, the performance significantly drops when the triggers are no longer provided. We find that T0 produces a large number of trivial questions when given a predicted trigger. More testing and investigation is needed to further understand this large drop in performance, as we do not observe this same behavior with BART.",
"cite_spans": [],
"ref_spans": [
{
"start": 124,
"end": 133,
"text": "(Table 5)",
"ref_id": null
}
],
"eq_spans": [],
"section": "Discussion",
"sec_num": "8"
},
{
"text": "As human evaluation demonstrates, despite low automatic metric scores, both BART and T0 achieve reasonable success in generating coherent, relevant and clinically interesting questions. To evaluate if the automated metrics can capture the quality of generated questions, we calculate the Spearman's Rank Correlation Coefficient between human evaluation and automatic metrics. We find extremely low and statistically insignificant correlation for ROUGE-L (-0.09), METEOR (-0.04) and BERTScore (-0.04). This is unsurprising, as these automatic metrics are not designed to capture the categories we examine during human evaluation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "8"
},
{
"text": "We also score the answers selected by our Clin-icalBERT model trained on emrQA (Section 6.3). Interestingly, we find that of the answers the model successfully recovers, 44% are extracted from the remainder of the discharge summary used to gen-erate the question. The remaining 56% come from nursing notes, Radiology/ECG reports and previous discharge summaries. However, for a majority of the questions, we are unable to recover a sufficient answer (Table 6 ). We sample 50 gold standard questions whose suggested answers were marked as invalid, in order to determine if this was due to the model's poor performance. We find that 36% of the questions do in fact have answers in the EHR, thus demonstrating the need for improved clinical QA resources and models.",
"cite_spans": [],
"ref_spans": [
{
"start": 450,
"end": 458,
"text": "(Table 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Discussion",
"sec_num": "8"
},
{
"text": "We present Discharge Summary Clinical Questions (DiSCQ), a new human-generated clinical question dataset composed of 2000+ questions paired with the snippets of text that prompted each question. We train baseline models for trigger detection and question generation. We find that despite poor performance on automatic metrics, we are Table 5 : We present results of human evaluation on generated questions. Gold refers to questions generated by medical experts. We do not annotate whether or not a question is nontrivial, relevant and clinically meaningful if it is not understandable, thus lowering the number of questions that satisfy these categories.",
"cite_spans": [],
"ref_spans": [
{
"start": 334,
"end": 341,
"text": "Table 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "9"
},
{
"text": "Gold -15.0% 7.50% BART Gold 13.75% 7.75% T0 Gold 11.5% 6.00% BART Predicted 14.5% 6.25% T0 Predicted 9.75% 3.25% Table 6 : Percent of the time that the answer retrieved by our model partially answers and fully answers the question.",
"cite_spans": [],
"ref_spans": [
{
"start": 113,
"end": 120,
"text": "Table 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Model Triggers Partially Fully",
"sec_num": null
},
{
"text": "able to produce reasonable questions in a majority of cases when given triggers selected by medical experts. However, we find that performance significantly drops when given machine predicted triggers. Further, we find that baseline models trained on emrQA are insufficient for recovering answers to both human and machine generated questions. Our results demonstrate that existing machine learning systems, including large-scale neural networks, struggle with the tasks we propose. We encourage the community to improve on our baseline models. We release this dataset and our code to facilitate further research into realistic clinical question answering and generation here.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Model Triggers Partially Fully",
"sec_num": null
},
{
"text": "https://www.i2b2.org/NLP/DataSets/",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We discard any records pertaining to neonatal or deceased patients.4 Instructions given to annotators will be available here.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "We use the same sample of 100 questions as before. 7 This is calculated on a per-token level.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "Sentence splitting is performed using ScispaCy's en_core_sci_md.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "This threshold was chosen manually by examining question-answer pairs on a validation set.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "This work was supported and sponsored by the MIT-IBM Watson AI Lab. The authors would like to thank Sierra Tseng for feedback on a draft of this manuscript, as well as Melina Young and Maggie Liu for their help in designing some of the figures.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgements",
"sec_num": "10"
},
{
"text": "To run BART and T0, we make use of the Huggingface implementations (Wolf et al., 2019) . We additionally calculate automated metrics for question generation using Huggingface. For calculating Cohen Kappa, precision, recall, and F1 score, we use sklearn (Pedregosa et al., 2011) .",
"cite_spans": [
{
"start": 67,
"end": 86,
"text": "(Wolf et al., 2019)",
"ref_id": "BIBREF63"
},
{
"start": 253,
"end": 277,
"text": "(Pedregosa et al., 2011)",
"ref_id": "BIBREF45"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "A.1 Model and Metric Implementation",
"sec_num": null
},
{
"text": "We use a majority of the default settings provided by the Huggingface library (Wolf et al., 2019) . However, we do experiment with varying learning rates (2e-5, 2e-4, 3e-4, 4e-4), warm up steps (100, 200), and weight-decay (0, 1e-6, 1e-3, 1e-1). For the best BART model, we find that using a learning rate of 2e-4, warm up steps of 200, and weight decay of 1e-6 led to the best model. For the T0 model, we find that using a learning rate of 3e-4, running for 100 warmup steps and using a weight-decay of 0.1 led to the best performance. We run for 50 epochs on the BART model and 30 epochs on the T0 model. We use the best epoch with respect to evaluation loss. In our dev set evaluation, we use a beam search width of 5. We use a gradient accumulation step of 32 and 16 for our BART model and T0 model, respectively,",
"cite_spans": [
{
"start": 78,
"end": 97,
"text": "(Wolf et al., 2019)",
"ref_id": "BIBREF63"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "A.2 Model Hyperparameters",
"sec_num": null
},
{
"text": "For the BART models, we run on 4 GeForce GTX TITAN X. Due to the limited size of these GPUs, we only use a batch size of 1 per GPU. The BART style models take roughly 8 hours to finish training.For the T0 models, we train using eight V100 GPUs. We set batch size to be 2 per GPU. These models take roughly 24 hours to train.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "A.3 GPUs and Run Time",
"sec_num": null
},
{
"text": "We will release our code and data under MIMIC-III access. Carlini et al. (2021) warns against training large-scale transformer models (particularly ones for generation) on sensitive data. Although MIMIC-III notes consist of deidentified data, we will not release our model weights to the general public. With respect to the trigger detection system, there is less risk in releasing the model weights, as BERT has not been pretrained with generation tasks (Lehman et al., 2021) . We caution all follow up work to take these privacy concerns into account.",
"cite_spans": [
{
"start": 58,
"end": 79,
"text": "Carlini et al. (2021)",
"ref_id": null
},
{
"start": 455,
"end": 476,
"text": "(Lehman et al., 2021)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "A.4 Risk of Patient Privacy",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Overview of the medical question answering task at trec 2017 liveqa",
"authors": [
{
"first": "Asma",
"middle": [],
"last": "Ben Abacha",
"suffix": ""
},
{
"first": "Eugene",
"middle": [],
"last": "Agichtein",
"suffix": ""
},
{
"first": "Yuval",
"middle": [],
"last": "Pinter",
"suffix": ""
},
{
"first": "Dina",
"middle": [],
"last": "Demner-Fushman",
"suffix": ""
}
],
"year": 2017,
"venue": "TREC",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Asma Ben Abacha, Eugene Agichtein, Yuval Pinter, and Dina Demner-Fushman. 2017. Overview of the medical question answering task at trec 2017 liveqa. In TREC.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Bridging the gap between consumers' medication questions and trusted answers",
"authors": [
{
"first": "Asma",
"middle": [],
"last": "Ben Abacha",
"suffix": ""
},
{
"first": "Yassine",
"middle": [],
"last": "Mrabet",
"suffix": ""
},
{
"first": "Mark",
"middle": [
"E"
],
"last": "Sharp",
"suffix": ""
},
{
"first": "Travis",
"middle": [
"R"
],
"last": "Goodwin",
"suffix": ""
},
{
"first": "Sonya",
"middle": [
"E"
],
"last": "Shooshan",
"suffix": ""
},
{
"first": "Dina",
"middle": [],
"last": "Demner-Fushman",
"suffix": ""
}
],
"year": 2019,
"venue": "Studies in health technology and informatics",
"volume": "264",
"issue": "",
"pages": "25--29",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Asma Ben Abacha, Yassine Mrabet, Mark E. Sharp, Travis R. Goodwin, Sonya E. Shooshan, and Dina Demner-Fushman. 2019. Bridging the gap between consumers' medication questions and trusted an- swers. Studies in health technology and informatics, 264:25-29.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Means: A medical question-answering system combining nlp techniques and semantic web technologies",
"authors": [
{
"first": "Asma",
"middle": [],
"last": "Ben Abacha",
"suffix": ""
},
{
"first": "Pierre",
"middle": [],
"last": "Zweigenbaum",
"suffix": ""
}
],
"year": 2015,
"venue": "Inf. Process. Manag",
"volume": "51",
"issue": "",
"pages": "570--594",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Asma Ben Abacha and Pierre Zweigenbaum. 2015. Means: A medical question-answering system com- bining nlp techniques and semantic web technologies. Inf. Process. Manag., 51:570-594.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Publicly available clinical BERT embeddings",
"authors": [
{
"first": "Emily",
"middle": [],
"last": "Alsentzer",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Murphy",
"suffix": ""
},
{
"first": "William",
"middle": [],
"last": "Boag",
"suffix": ""
},
{
"first": "Wei-Hung",
"middle": [],
"last": "Weng",
"suffix": ""
},
{
"first": "Di",
"middle": [],
"last": "Jindi",
"suffix": ""
},
{
"first": "Tristan",
"middle": [],
"last": "Naumann",
"suffix": ""
},
{
"first": "Matthew",
"middle": [],
"last": "Mcdermott",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2nd Clinical Natural Language Processing Workshop",
"volume": "",
"issue": "",
"pages": "72--78",
"other_ids": {
"DOI": [
"10.18653/v1/W19-1909"
]
},
"num": null,
"urls": [],
"raw_text": "Emily Alsentzer, John Murphy, William Boag, Wei- Hung Weng, Di Jindi, Tristan Naumann, and Matthew McDermott. 2019. Publicly available clin- ical BERT embeddings. In Proceedings of the 2nd Clinical Natural Language Processing Workshop, pages 72-78, Minneapolis, Minnesota, USA. Associ- ation for Computational Linguistics.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Dragomir Radev, Mike Tian-Jian Jiang, and Alexander M. Rush. 2022. Promptsource: An integrated development environment and repository for natural language prompts",
"authors": [
{
"first": "H",
"middle": [],
"last": "Stephen",
"suffix": ""
},
{
"first": "Victor",
"middle": [],
"last": "Bach",
"suffix": ""
},
{
"first": "Zheng-Xin",
"middle": [],
"last": "Sanh",
"suffix": ""
},
{
"first": "Albert",
"middle": [],
"last": "Yong",
"suffix": ""
},
{
"first": "Colin",
"middle": [],
"last": "Webson",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Raffel",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Nihal",
"suffix": ""
},
{
"first": "Abheesht",
"middle": [],
"last": "Nayak",
"suffix": ""
},
{
"first": "Taewoon",
"middle": [],
"last": "Sharma",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Kim",
"suffix": ""
},
{
"first": "Thibault",
"middle": [],
"last": "Bari",
"suffix": ""
},
{
"first": "Zaid",
"middle": [],
"last": "Fevry",
"suffix": ""
},
{
"first": "Manan",
"middle": [],
"last": "Alyafeai",
"suffix": ""
},
{
"first": "Andrea",
"middle": [],
"last": "Dey",
"suffix": ""
},
{
"first": "Zhiqing",
"middle": [],
"last": "Santilli",
"suffix": ""
},
{
"first": "Srulik",
"middle": [],
"last": "Sun",
"suffix": ""
},
{
"first": "Canwen",
"middle": [],
"last": "Ben-David",
"suffix": ""
},
{
"first": "Gunjan",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Han",
"middle": [],
"last": "Chhablani",
"suffix": ""
},
{
"first": "Jason",
"middle": [
"Alan"
],
"last": "Wang",
"suffix": ""
},
{
"first": "Maged",
"middle": [
"S"
],
"last": "Fries",
"suffix": ""
},
{
"first": "Shanya",
"middle": [],
"last": "Al-Shaibani",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Sharma",
"suffix": ""
}
],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Stephen H. Bach, Victor Sanh, Zheng-Xin Yong, Al- bert Webson, Colin Raffel, Nihal V. Nayak, Ab- heesht Sharma, Taewoon Kim, M Saiful Bari, Thibault Fevry, Zaid Alyafeai, Manan Dey, An- drea Santilli, Zhiqing Sun, Srulik Ben-David, Can- wen Xu, Gunjan Chhablani, Han Wang, Jason Alan Fries, Maged S. Al-shaibani, Shanya Sharma, Ur- mish Thakker, Khalid Almubarak, Xiangru Tang, Dragomir Radev, Mike Tian-Jian Jiang, and Alexan- der M. Rush. 2022. Promptsource: An integrated development environment and repository for natural language prompts.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "METEOR: An automatic metric for MT evaluation with improved correlation with human judgments",
"authors": [
{
"first": "Satanjeev",
"middle": [],
"last": "Banerjee",
"suffix": ""
},
{
"first": "Alon",
"middle": [],
"last": "Lavie",
"suffix": ""
}
],
"year": 2005,
"venue": "Proceedings of the ACL Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization",
"volume": "",
"issue": "",
"pages": "65--72",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Satanjeev Banerjee and Alon Lavie. 2005. METEOR: An automatic metric for MT evaluation with im- proved correlation with human judgments. In Pro- ceedings of the ACL Workshop on Intrinsic and Ex- trinsic Evaluation Measures for Machine Transla- tion and/or Summarization, pages 65-72, Ann Arbor, Michigan. Association for Computational Linguis- tics.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Unilmv2: Pseudo-masked language models for unified language model pre-training",
"authors": [
{
"first": "Hangbo",
"middle": [],
"last": "Bao",
"suffix": ""
},
{
"first": "Li",
"middle": [],
"last": "Dong",
"suffix": ""
},
{
"first": "Furu",
"middle": [],
"last": "Wei",
"suffix": ""
},
{
"first": "Wenhui",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Nan",
"middle": [],
"last": "Yang",
"suffix": ""
},
{
"first": "Xiaodong",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Yu",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Songhao",
"middle": [],
"last": "Piao",
"suffix": ""
},
{
"first": "Jianfeng",
"middle": [],
"last": "Gao",
"suffix": ""
},
{
"first": "Ming",
"middle": [],
"last": "Zhou",
"suffix": ""
},
{
"first": "Hsiao-Wuen",
"middle": [],
"last": "Hon",
"suffix": ""
}
],
"year": 2020,
"venue": "ICML",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hangbo Bao, Li Dong, Furu Wei, Wenhui Wang, Nan Yang, Xiaodong Liu, Yu Wang, Songhao Piao, Jian- feng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2020. Unilmv2: Pseudo-masked language models for uni- fied language model pre-training. In ICML.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Reevaluating evaluation in text summarization",
"authors": [
{
"first": "Manik",
"middle": [],
"last": "Bhandari",
"suffix": ""
},
{
"first": "Pranav",
"middle": [],
"last": "Narayan Gour",
"suffix": ""
},
{
"first": "Atabak",
"middle": [],
"last": "Ashfaq",
"suffix": ""
},
{
"first": "Graham",
"middle": [],
"last": "Peng Fei Liu",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Neubig",
"suffix": ""
}
],
"year": 2020,
"venue": "ArXiv",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Manik Bhandari, Pranav Narayan Gour, Atabak Ash- faq, Peng fei Liu, and Graham Neubig. 2020. Re- evaluating evaluation in text summarization. ArXiv, abs/2010.07100.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners",
"authors": [
{
"first": "Tom",
"middle": [],
"last": "Brown",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Mann",
"suffix": ""
},
{
"first": "Nick",
"middle": [],
"last": "Ryder",
"suffix": ""
},
{
"first": "Melanie",
"middle": [],
"last": "Subbiah",
"suffix": ""
},
{
"first": "Jared",
"middle": [
"D"
],
"last": "Kaplan",
"suffix": ""
},
{
"first": "Prafulla",
"middle": [],
"last": "Dhariwal",
"suffix": ""
},
{
"first": "Arvind",
"middle": [],
"last": "Neelakantan",
"suffix": ""
},
{
"first": "Pranav",
"middle": [],
"last": "Shyam",
"suffix": ""
},
{
"first": "Girish",
"middle": [],
"last": "Sastry",
"suffix": ""
},
{
"first": "Amanda",
"middle": [],
"last": "Askell",
"suffix": ""
},
{
"first": "Sandhini",
"middle": [],
"last": "Agarwal",
"suffix": ""
},
{
"first": "Ariel",
"middle": [],
"last": "Herbert-Voss",
"suffix": ""
},
{
"first": "Gretchen",
"middle": [],
"last": "Krueger",
"suffix": ""
},
{
"first": "Tom",
"middle": [],
"last": "Henighan",
"suffix": ""
},
{
"first": "Rewon",
"middle": [],
"last": "Child",
"suffix": ""
},
{
"first": "Aditya",
"middle": [],
"last": "Ramesh",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Ziegler",
"suffix": ""
},
{
"first": "Jeffrey",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Clemens",
"middle": [],
"last": "Winter",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Hesse",
"suffix": ""
},
{
"first": "Mark",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Eric",
"middle": [],
"last": "Sigler",
"suffix": ""
},
{
"first": "Mateusz",
"middle": [],
"last": "Litwin",
"suffix": ""
}
],
"year": null,
"venue": "Advances in Neural Information Processing Systems",
"volume": "33",
"issue": "",
"pages": "1877--1901",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Ma- teusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In Ad- vances in Neural Information Processing Systems, volume 33, pages 1877-1901. Curran Associates, Inc.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Re-evaluating the role of Bleu in machine translation research",
"authors": [
{
"first": "Chris",
"middle": [],
"last": "Callison",
"suffix": ""
},
{
"first": "-",
"middle": [],
"last": "Burch",
"suffix": ""
},
{
"first": "Miles",
"middle": [],
"last": "Osborne",
"suffix": ""
},
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
}
],
"year": 2006,
"venue": "11th Conference of the European Chapter of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "249--256",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chris Callison-Burch, Miles Osborne, and Philipp Koehn. 2006. Re-evaluating the role of Bleu in ma- chine translation research. In 11th Conference of the European Chapter of the Association for Com- putational Linguistics, pages 249-256, Trento, Italy. Association for Computational Linguistics.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Askhermes: An online question answering system for complex clinical questions",
"authors": [
{
"first": "Yonggang",
"middle": [],
"last": "Cao",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Pippa",
"middle": [
"M"
],
"last": "Simpson",
"suffix": ""
},
{
"first": "Lamont",
"middle": [
"D"
],
"last": "Antieau",
"suffix": ""
},
{
"first": "Andrew",
"middle": [
"S"
],
"last": "Bennett",
"suffix": ""
},
{
"first": "James",
"middle": [
"J"
],
"last": "Cimino",
"suffix": ""
},
{
"first": "John",
"middle": [
"W"
],
"last": "Ely",
"suffix": ""
},
{
"first": "Hong",
"middle": [],
"last": "Yu",
"suffix": ""
}
],
"year": 2011,
"venue": "Journal of biomedical informatics",
"volume": "44",
"issue": "",
"pages": "277--88",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yonggang Cao, F. Liu, Pippa M Simpson, Lamont D. Antieau, Andrew S. Bennett, James J. Cimino, John W. Ely, and Hong Yu. 2011. Askhermes: An online question answering system for complex clini- cal questions. Journal of biomedical informatics, 44 2:277-88.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Dawn Xiaodong Song, \u00dalfar Erlingsson, Alina Oprea, and Colin Raffel. 2021. Extracting training data from large language models",
"authors": [
{
"first": "Nicholas",
"middle": [],
"last": "Carlini",
"suffix": ""
},
{
"first": "Florian",
"middle": [],
"last": "Tram\u00e8r",
"suffix": ""
},
{
"first": "Eric",
"middle": [],
"last": "Wallace",
"suffix": ""
},
{
"first": "Matthew",
"middle": [],
"last": "Jagielski",
"suffix": ""
},
{
"first": "Ariel",
"middle": [],
"last": "Herbert-Voss",
"suffix": ""
},
{
"first": "Katherine",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "Adam",
"middle": [],
"last": "Roberts",
"suffix": ""
},
{
"first": "Tom",
"middle": [
"B"
],
"last": "Brown",
"suffix": ""
}
],
"year": null,
"venue": "USENIX Security Symposium",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nicholas Carlini, Florian Tram\u00e8r, Eric Wallace, Matthew Jagielski, Ariel Herbert-Voss, Katherine Lee, Adam Roberts, Tom B. Brown, Dawn Xiaodong Song, \u00dalfar Erlingsson, Alina Oprea, and Colin Raf- fel. 2021. Extracting training data from large lan- guage models. In USENIX Security Symposium.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Towards topicto-question generation",
"authors": [
{
"first": "Yllias",
"middle": [],
"last": "Chali",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Sadid",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Hasan",
"suffix": ""
}
],
"year": 2015,
"venue": "Computational Linguistics",
"volume": "41",
"issue": "1",
"pages": "1--20",
"other_ids": {
"DOI": [
"10.1162/COLI_a_00206"
]
},
"num": null,
"urls": [],
"raw_text": "Yllias Chali and Sadid A. Hasan. 2015. Towards topic- to-question generation. Computational Linguistics, 41(1):1-20.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "An evaluation of informationseeking behaviors of general pediatricians",
"authors": [
{
"first": "Clarence",
"middle": [],
"last": "Donna D'alessandro",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Kreiter",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Peterson",
"suffix": ""
}
],
"year": 2004,
"venue": "Pediatrics",
"volume": "113",
"issue": "",
"pages": "64--73",
"other_ids": {
"DOI": [
"10.1542/peds.113.1.64"
]
},
"num": null,
"urls": [],
"raw_text": "Donna D'Alessandro, Clarence Kreiter, and Michael Peterson. 2004. An evaluation of information- seeking behaviors of general pediatricians. Pedi- atrics, 113:64-9.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "What can natural language processing do for clinical decision support",
"authors": [
{
"first": "Dina",
"middle": [],
"last": "Demner-Fushman",
"suffix": ""
},
{
"first": "Wendy",
"middle": [],
"last": "Chapman",
"suffix": ""
},
{
"first": "Clement",
"middle": [],
"last": "Mcdonald",
"suffix": ""
}
],
"year": 2009,
"venue": "Journal of biomedical informatics",
"volume": "42",
"issue": "",
"pages": "760--72",
"other_ids": {
"DOI": [
"10.1016/j.jbi.2009.08.007"
]
},
"num": null,
"urls": [],
"raw_text": "Dina Demner-Fushman, Wendy Chapman, and Clement Mcdonald. 2009. What can natural language pro- cessing do for clinical decision support? Journal of biomedical informatics, 42:760-72.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "Consumer health information and question answering: helping consumers find answers to their health-related information needs",
"authors": [
{
"first": "Dina",
"middle": [],
"last": "Demner-Fushman",
"suffix": ""
},
{
"first": "Yassine",
"middle": [],
"last": "Mrabet",
"suffix": ""
},
{
"first": "Asma",
"middle": [],
"last": "Ben Abacha",
"suffix": ""
}
],
"year": 2020,
"venue": "Journal of the American Medical Informatics Association : JAMIA",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dina Demner-Fushman, Yassine Mrabet, and Asma Ben Abacha. 2020. Consumer health information and question answering: helping consumers find answers to their health-related information needs. Journal of the American Medical Informatics Association : JAMIA.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding",
"authors": [
{
"first": "Jacob",
"middle": [],
"last": "Devlin",
"suffix": ""
},
{
"first": "Ming-Wei",
"middle": [],
"last": "Chang",
"suffix": ""
},
{
"first": "Kenton",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "Kristina",
"middle": [],
"last": "Toutanova",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"volume": "1",
"issue": "",
"pages": "4171--4186",
"other_ids": {
"DOI": [
"10.18653/v1/N19-1423"
]
},
"num": null,
"urls": [],
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Unified language model pre-training for natural language understanding and generation",
"authors": [
{
"first": "Li",
"middle": [],
"last": "Dong",
"suffix": ""
},
{
"first": "Nan",
"middle": [],
"last": "Yang",
"suffix": ""
},
{
"first": "Wenhui",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Furu",
"middle": [],
"last": "Wei",
"suffix": ""
},
{
"first": "Xiaodong",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Yu",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Jianfeng",
"middle": [],
"last": "Gao",
"suffix": ""
},
{
"first": "Ming",
"middle": [],
"last": "Zhou",
"suffix": ""
},
{
"first": "Hsiao-Wuen",
"middle": [],
"last": "Hon",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xi- aodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2019. Unified language model pre-training for natural language understanding and generation.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Learning to ask: Neural question generation for reading comprehension",
"authors": [
{
"first": "Xinya",
"middle": [],
"last": "Du",
"suffix": ""
},
{
"first": "Junru",
"middle": [],
"last": "Shao",
"suffix": ""
},
{
"first": "Claire",
"middle": [],
"last": "Cardie",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Xinya Du, Junru Shao, and Claire Cardie. 2017. Learn- ing to ask: Neural question generation for reading comprehension. CoRR, abs/1705.00106.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Question generation for question answering",
"authors": [
{
"first": "Nan",
"middle": [],
"last": "Duan",
"suffix": ""
},
{
"first": "Duyu",
"middle": [],
"last": "Tang",
"suffix": ""
},
{
"first": "Peng",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Ming",
"middle": [],
"last": "Zhou",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "866--874",
"other_ids": {
"DOI": [
"10.18653/v1/D17-1090"
]
},
"num": null,
"urls": [],
"raw_text": "Nan Duan, Duyu Tang, Peng Chen, and Ming Zhou. 2017. Question generation for question answering. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 866-874, Copenhagen, Denmark. Association for Computational Linguistics.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Comparing automatic evaluation measures for image description",
"authors": [
{
"first": "Desmond",
"middle": [],
"last": "Elliott",
"suffix": ""
},
{
"first": "Frank",
"middle": [],
"last": "Keller",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics",
"volume": "2",
"issue": "",
"pages": "452--457",
"other_ids": {
"DOI": [
"10.3115/v1/P14-2074"
]
},
"num": null,
"urls": [],
"raw_text": "Desmond Elliott and Frank Keller. 2014. Comparing automatic evaluation measures for image description. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 452-457, Baltimore, Maryland. Association for Computational Linguistics.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Annotating and characterizing clinical sentences with explicit why-QA cues",
"authors": [
{
"first": "Jungwei",
"middle": [],
"last": "Fan",
"suffix": ""
}
],
"year": 2019,
"venue": "Proceedings of the 2nd Clinical Natural Language Processing Workshop",
"volume": "",
"issue": "",
"pages": "101--106",
"other_ids": {
"DOI": [
"10.18653/v1/W19-1913"
]
},
"num": null,
"urls": [],
"raw_text": "Jungwei Fan. 2019. Annotating and characterizing clini- cal sentences with explicit why-QA cues. In Proceed- ings of the 2nd Clinical Natural Language Processing Workshop, pages 101-106, Minneapolis, Minnesota, USA. Association for Computational Linguistics.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Good question! statistical ranking for question generation",
"authors": [
{
"first": "Michael",
"middle": [],
"last": "Heilman",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Noah",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Smith",
"suffix": ""
}
],
"year": 2010,
"venue": "Human Language Technologies: The 2010 Annual Conference of the North American Chapter of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "609--617",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Michael Heilman and Noah A. Smith. 2010. Good question! statistical ranking for question generation. In Human Language Technologies: The 2010 An- nual Conference of the North American Chapter of the Association for Computational Linguistics, pages 609-617, Los Angeles, California. Association for Computational Linguistics.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Entity guided question generation with contextual structure and sequence information capturing",
"authors": [
{
"first": "Qingbao",
"middle": [],
"last": "Huang",
"suffix": ""
},
{
"first": "Mingyi",
"middle": [],
"last": "Fu",
"suffix": ""
},
{
"first": "Linzhang",
"middle": [],
"last": "Mo",
"suffix": ""
},
{
"first": "Yi",
"middle": [],
"last": "Cai",
"suffix": ""
},
{
"first": "Jingyun",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Pijian",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Qing",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Ho-Fung",
"middle": [],
"last": "Leung",
"suffix": ""
}
],
"year": 2021,
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence",
"volume": "35",
"issue": "",
"pages": "13064--13072",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Qingbao Huang, Mingyi Fu, Linzhang Mo, Yi Cai, Jingyun Xu, Pijian Li, Qing Li, and Ho-fung Leung. 2021. Entity guided question generation with con- textual structure and sequence information capturing. Proceedings of the AAAI Conference on Artificial Intelligence, 35(14):13064-13072.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Mimiciii, a freely accessible critical care database",
"authors": [
{
"first": "E",
"middle": [
"W"
],
"last": "Alistair",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Johnson",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Tom",
"suffix": ""
},
{
"first": "Lu",
"middle": [],
"last": "Pollard",
"suffix": ""
},
{
"first": "H Lehman",
"middle": [],
"last": "Shen",
"suffix": ""
},
{
"first": "Mengling",
"middle": [],
"last": "Li-Wei",
"suffix": ""
},
{
"first": "Mohammad",
"middle": [],
"last": "Feng",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Ghassemi",
"suffix": ""
},
{
"first": "Peter",
"middle": [],
"last": "Moody",
"suffix": ""
},
{
"first": "Leo",
"middle": [
"Anthony"
],
"last": "Szolovits",
"suffix": ""
},
{
"first": "Roger G",
"middle": [],
"last": "Celi",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Mark",
"suffix": ""
}
],
"year": 2016,
"venue": "Scientific data",
"volume": "3",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Alistair EW Johnson, Tom J Pollard, Lu Shen, H Lehman Li-wei, Mengling Feng, Moham- mad Ghassemi, Benjamin Moody, Peter Szolovits, Leo Anthony Celi, and Roger G Mark. 2016. Mimic- iii, a freely accessible critical care database. Scien- tific data, 3:160035.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Scaling laws for neural language models",
"authors": [
{
"first": "Jared",
"middle": [],
"last": "Kaplan",
"suffix": ""
},
{
"first": "Sam",
"middle": [],
"last": "Mccandlish",
"suffix": ""
},
{
"first": "T",
"middle": [
"J"
],
"last": "Henighan",
"suffix": ""
},
{
"first": "Tom",
"middle": [
"B"
],
"last": "Brown",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Chess",
"suffix": ""
},
{
"first": "Rewon",
"middle": [],
"last": "Child",
"suffix": ""
},
{
"first": "Scott",
"middle": [],
"last": "Gray",
"suffix": ""
},
{
"first": "Alec",
"middle": [],
"last": "Radford",
"suffix": ""
},
{
"first": "Jeff",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Dario",
"middle": [],
"last": "Amodei",
"suffix": ""
}
],
"year": 2020,
"venue": "ArXiv",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jared Kaplan, Sam McCandlish, T. J. Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeff Wu, and Dario Amodei. 2020. Scaling laws for neural language models. ArXiv, abs/2001.08361.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "What would it take to get biomedical QA systems into practice?",
"authors": [
{
"first": "Gregory",
"middle": [],
"last": "Kell",
"suffix": ""
},
{
"first": "Iain",
"middle": [],
"last": "Marshall",
"suffix": ""
},
{
"first": "Byron",
"middle": [],
"last": "Wallace",
"suffix": ""
},
{
"first": "Andre",
"middle": [],
"last": "Jaun",
"suffix": ""
}
],
"year": 2021,
"venue": "Proceedings of the 3rd",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"DOI": [
"10.18653/v1/2021.mrqa-1.3"
]
},
"num": null,
"urls": [],
"raw_text": "Gregory Kell, Iain Marshall, Byron Wallace, and Andre Jaun. 2021. What would it take to get biomedical QA systems into practice? In Proceedings of the 3rd",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Workshop on Machine Reading for Question Answering",
"authors": [],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "28--41",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Workshop on Machine Reading for Question Answer- ing, pages 28-41, Punta Cana, Dominican Republic. Association for Computational Linguistics.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Deep questions without deep understanding",
"authors": [
{
"first": "Igor",
"middle": [],
"last": "Labutov",
"suffix": ""
},
{
"first": "Sumit",
"middle": [],
"last": "Basu",
"suffix": ""
},
{
"first": "Lucy",
"middle": [],
"last": "Vanderwende",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing",
"volume": "1",
"issue": "",
"pages": "889--898",
"other_ids": {
"DOI": [
"10.3115/v1/P15-1086"
]
},
"num": null,
"urls": [],
"raw_text": "Igor Labutov, Sumit Basu, and Lucy Vanderwende. 2015. Deep questions without deep understanding. In Proceedings of the 53rd Annual Meeting of the As- sociation for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 889-898, Beijing, China. Association for Computational Lin- guistics.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "Does bert pretrained on clinical notes reveal sensitive data? ArXiv",
"authors": [
{
"first": "Eric",
"middle": [
"P"
],
"last": "Lehman",
"suffix": ""
},
{
"first": "Sarthak",
"middle": [],
"last": "Jain",
"suffix": ""
},
{
"first": "Karl",
"middle": [],
"last": "Pichotta",
"suffix": ""
},
{
"first": "Yoav",
"middle": [],
"last": "Goldberg",
"suffix": ""
},
{
"first": "Byron",
"middle": [
"C"
],
"last": "Wallace",
"suffix": ""
}
],
"year": 2021,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Eric P. Lehman, Sarthak Jain, Karl Pichotta, Yoav Gold- berg, and Byron C. Wallace. 2021. Does bert pre- trained on clinical notes reveal sensitive data? ArXiv, abs/2104.07762.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "Quiz-style question generation for news stories",
"authors": [
{
"first": "Adam",
"middle": [
"D"
],
"last": "Lelkes",
"suffix": ""
},
{
"first": "Q",
"middle": [],
"last": "Vinh",
"suffix": ""
},
{
"first": "Cong",
"middle": [],
"last": "Tran",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Yu",
"suffix": ""
}
],
"year": 2021,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Adam D. Lelkes, Vinh Q. Tran, and Cong Yu. 2021. Quiz-style question generation for news stories.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension",
"authors": [
{
"first": "Mike",
"middle": [],
"last": "Lewis",
"suffix": ""
},
{
"first": "Yinhan",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Naman",
"middle": [],
"last": "Goyal",
"suffix": ""
},
{
"first": "Marjan",
"middle": [],
"last": "Ghazvininejad",
"suffix": ""
},
{
"first": "Abdelrahman",
"middle": [],
"last": "Mohamed",
"suffix": ""
},
{
"first": "Omer",
"middle": [],
"last": "Levy",
"suffix": ""
},
{
"first": "Veselin",
"middle": [],
"last": "Stoyanov",
"suffix": ""
},
{
"first": "Luke",
"middle": [],
"last": "Zettlemoyer",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "7871--7880",
"other_ids": {
"DOI": [
"10.18653/v1/2020.acl-main.703"
]
},
"num": null,
"urls": [],
"raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and com- prehension. In Proceedings of the 58th Annual Meet- ing of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Towards generalizable methods for automating risk score calculation",
"authors": [
{
"first": "Jennifer",
"middle": [
"J"
],
"last": "Liang",
"suffix": ""
},
{
"first": "Eric",
"middle": [],
"last": "Lehman",
"suffix": ""
},
{
"first": "Ananya",
"middle": [
"S"
],
"last": "Iyengar",
"suffix": ""
},
{
"first": "Diwakar",
"middle": [],
"last": "Mahajan",
"suffix": ""
},
{
"first": "Preethi",
"middle": [],
"last": "Raghavan",
"suffix": ""
},
{
"first": "Cindy",
"middle": [
"Y"
],
"last": "Chang",
"suffix": ""
},
{
"first": "Peter",
"middle": [],
"last": "Szolovits",
"suffix": ""
}
],
"year": 2022,
"venue": "Proceedings of the 21st SIGBioMed Workshop on Biomedical Language Processing",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jennifer J. Liang, Eric Lehman, Ananya S. Iyengar, Di- wakar Mahajan, Preethi Raghavan, Cindy Y. Chang, and Peter Szolovits. 2022. Towards generalizable methods for automating risk score calculation. In Proceedings of the 21st SIGBioMed Workshop on Biomedical Language Processing. Association for Computational Linguistics.",
"links": null
},
"BIBREF33": {
"ref_id": "b33",
"title": "ROUGE: A package for automatic evaluation of summaries",
"authors": [
{
"first": "Chin-Yew",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 2004,
"venue": "Text Summarization Branches Out",
"volume": "",
"issue": "",
"pages": "74--81",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chin-Yew Lin. 2004. ROUGE: A package for auto- matic evaluation of summaries. In Text Summariza- tion Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.",
"links": null
},
"BIBREF34": {
"ref_id": "b34",
"title": "Pretrain, prompt, and predict: A systematic survey of prompting methods in natural language processing",
"authors": [
{
"first": "Pengfei",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Weizhe",
"middle": [],
"last": "Yuan",
"suffix": ""
},
{
"first": "Jinlan",
"middle": [],
"last": "Fu",
"suffix": ""
},
{
"first": "Zhengbao",
"middle": [],
"last": "Jiang",
"suffix": ""
},
{
"first": "Hiroaki",
"middle": [],
"last": "Hayashi",
"suffix": ""
},
{
"first": "Graham",
"middle": [],
"last": "Neubig",
"suffix": ""
}
],
"year": 2021,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pengfei Liu, Weizhe Yuan, Jinlan Fu, Zhengbao Jiang, Hiroaki Hayashi, and Graham Neubig. 2021. Pre- train, prompt, and predict: A systematic survey of prompting methods in natural language processing.",
"links": null
},
"BIBREF35": {
"ref_id": "b35",
"title": "Cooperative learning of zero-shot machine reading comprehension",
"authors": [
{
"first": "Hongyin",
"middle": [],
"last": "Luo",
"suffix": ""
},
{
"first": "Seunghak",
"middle": [],
"last": "Yu",
"suffix": ""
},
{
"first": "Shang-Wen",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "James",
"middle": [
"R"
],
"last": "Glass",
"suffix": ""
}
],
"year": 2021,
"venue": "ArXiv",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hongyin Luo, Seunghak Yu, Shang-Wen Li, and James R. Glass. 2021. Cooperative learning of zero-shot machine reading comprehension. ArXiv, abs/2103.07449.",
"links": null
},
"BIBREF36": {
"ref_id": "b36",
"title": "Linguistic considerations in automatic question generation",
"authors": [
{
"first": "Karen",
"middle": [],
"last": "Mazidi",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Rodney",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Nielsen",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics",
"volume": "2",
"issue": "",
"pages": "321--326",
"other_ids": {
"DOI": [
"10.3115/v1/P14-2053"
]
},
"num": null,
"urls": [],
"raw_text": "Karen Mazidi and Rodney D. Nielsen. 2014. Linguistic considerations in automatic question generation. In Proceedings of the 52nd Annual Meeting of the As- sociation for Computational Linguistics (Volume 2: Short Papers), pages 321-326, Baltimore, Maryland. Association for Computational Linguistics.",
"links": null
},
"BIBREF37": {
"ref_id": "b37",
"title": "A corpus and cloze evaluation for deeper understanding of commonsense stories",
"authors": [
{
"first": "Nasrin",
"middle": [],
"last": "Mostafazadeh",
"suffix": ""
},
{
"first": "Nathanael",
"middle": [],
"last": "Chambers",
"suffix": ""
},
{
"first": "Xiaodong",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "Devi",
"middle": [],
"last": "Parikh",
"suffix": ""
},
{
"first": "Dhruv",
"middle": [],
"last": "Batra",
"suffix": ""
},
{
"first": "Lucy",
"middle": [],
"last": "Vanderwende",
"suffix": ""
},
{
"first": "Pushmeet",
"middle": [],
"last": "Kohli",
"suffix": ""
},
{
"first": "James",
"middle": [],
"last": "Allen",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"DOI": [
"10.18653/v1/N16-1098"
]
},
"num": null,
"urls": [],
"raw_text": "Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. 2016. A corpus and cloze evaluation for deeper understanding of commonsense stories. In Proceedings of the 2016",
"links": null
},
"BIBREF38": {
"ref_id": "b38",
"title": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
"authors": [],
"year": null,
"venue": "",
"volume": "",
"issue": "",
"pages": "839--849",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 839-849, San Diego, California. Association for Computational Linguis- tics.",
"links": null
},
"BIBREF39": {
"ref_id": "b39",
"title": "Mixqg: Neural question generation with mixed answer types",
"authors": [
{
"first": "Lidiya",
"middle": [],
"last": "Murakhovs",
"suffix": ""
},
{
"first": "'",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "Chien Sheng",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Tong",
"middle": [],
"last": "Niu",
"suffix": ""
},
{
"first": "Wenhao",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Caiming",
"middle": [],
"last": "Xiong",
"suffix": ""
}
],
"year": 2021,
"venue": "ArXiv",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lidiya Murakhovs'ka, Chien Sheng Wu, Tong Niu, Wenhao Liu, and Caiming Xiong. 2021. Mixqg: Neural question generation with mixed answer types. ArXiv, abs/2110.08175.",
"links": null
},
"BIBREF40": {
"ref_id": "b40",
"title": "Let's ask again: Refine network for automatic question generation",
"authors": [
{
"first": "Preksha",
"middle": [],
"last": "Nema",
"suffix": ""
},
{
"first": "Akash",
"middle": [],
"last": "Kumar Mohankumar",
"suffix": ""
},
{
"first": "Mitesh",
"middle": [
"M"
],
"last": "Khapra",
"suffix": ""
},
{
"first": "Balaraman",
"middle": [],
"last": "Balaji Vasan Srinivasan",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ravindran",
"suffix": ""
}
],
"year": 2019,
"venue": "ArXiv",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Preksha Nema, Akash Kumar Mohankumar, Mitesh M. Khapra, Balaji Vasan Srinivasan, and Balaraman Ravindran. 2019. Let's ask again: Refine net- work for automatic question generation. ArXiv, abs/1909.05355.",
"links": null
},
"BIBREF41": {
"ref_id": "b41",
"title": "Scispacy: Fast and robust models for biomedical natural language processing",
"authors": [
{
"first": "Mark",
"middle": [],
"last": "Neumann",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "King",
"suffix": ""
},
{
"first": "Iz",
"middle": [],
"last": "Beltagy",
"suffix": ""
},
{
"first": "Waleed",
"middle": [],
"last": "Ammar",
"suffix": ""
}
],
"year": 2019,
"venue": "ArXiv",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mark Neumann, Daniel King, Iz Beltagy, and Waleed Ammar. 2019. Scispacy: Fast and robust models for biomedical natural language processing. ArXiv, abs/1902.07669.",
"links": null
},
"BIBREF42": {
"ref_id": "b42",
"title": "Why we need new evaluation metrics for NLG",
"authors": [
{
"first": "Jekaterina",
"middle": [],
"last": "Novikova",
"suffix": ""
},
{
"first": "Ond\u0159ej",
"middle": [],
"last": "Du\u0161ek",
"suffix": ""
},
{
"first": "Amanda",
"middle": [
"Cercas"
],
"last": "Curry",
"suffix": ""
},
{
"first": "Verena",
"middle": [],
"last": "Rieser",
"suffix": ""
}
],
"year": 2017,
"venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "2241--2252",
"other_ids": {
"DOI": [
"10.18653/v1/D17-1238"
]
},
"num": null,
"urls": [],
"raw_text": "Jekaterina Novikova, Ond\u0159ej Du\u0161ek, Amanda Cer- cas Curry, and Verena Rieser. 2017. Why we need new evaluation metrics for NLG. In Proceedings of the 2017 Conference on Empirical Methods in Natu- ral Language Processing, pages 2241-2252, Copen- hagen, Denmark. Association for Computational Lin- guistics.",
"links": null
},
"BIBREF43": {
"ref_id": "b43",
"title": "emrqa: A large corpus for question answering on electronic medical records",
"authors": [
{
"first": "Anusri",
"middle": [],
"last": "Pampari",
"suffix": ""
},
{
"first": "Preethi",
"middle": [],
"last": "Raghavan",
"suffix": ""
},
{
"first": "Jennifer",
"middle": [],
"last": "Liang",
"suffix": ""
},
{
"first": "Jian",
"middle": [],
"last": "Peng",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Anusri Pampari, Preethi Raghavan, Jennifer Liang, and Jian Peng. 2018. emrqa: A large corpus for question answering on electronic medical records.",
"links": null
},
"BIBREF44": {
"ref_id": "b44",
"title": "Semantic graphs for generating deep questions",
"authors": [
{
"first": "Liangming",
"middle": [],
"last": "Pan",
"suffix": ""
},
{
"first": "Yuxi",
"middle": [],
"last": "Xie",
"suffix": ""
},
{
"first": "Yansong",
"middle": [],
"last": "Feng",
"suffix": ""
},
{
"first": "Tat-Seng",
"middle": [],
"last": "Chua",
"suffix": ""
},
{
"first": "Min-Yen",
"middle": [],
"last": "Kan",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "1463--1475",
"other_ids": {
"DOI": [
"10.18653/v1/2020.acl-main.135"
]
},
"num": null,
"urls": [],
"raw_text": "Liangming Pan, Yuxi Xie, Yansong Feng, Tat-Seng Chua, and Min-Yen Kan. 2020. Semantic graphs for generating deep questions. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 1463-1475, Online. Asso- ciation for Computational Linguistics.",
"links": null
},
"BIBREF45": {
"ref_id": "b45",
"title": "Scikit-learn: Machine learning in Python",
"authors": [
{
"first": "F",
"middle": [],
"last": "Pedregosa",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Varoquaux",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Gramfort",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Michel",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Thirion",
"suffix": ""
},
{
"first": "O",
"middle": [],
"last": "Grisel",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Blondel",
"suffix": ""
},
{
"first": "P",
"middle": [],
"last": "Prettenhofer",
"suffix": ""
},
{
"first": "R",
"middle": [],
"last": "Weiss",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Dubourg",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Vanderplas",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Passos",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Cournapeau",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Brucher",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Perrot",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Duchesnay",
"suffix": ""
}
],
"year": 2011,
"venue": "Journal of Machine Learning Research",
"volume": "12",
"issue": "",
"pages": "2825--2830",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duch- esnay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825-2830.",
"links": null
},
"BIBREF46": {
"ref_id": "b46",
"title": "Training question answering models from synthetic data",
"authors": [
{
"first": "Raul",
"middle": [],
"last": "Puri",
"suffix": ""
},
{
"first": "Ryan",
"middle": [],
"last": "Spring",
"suffix": ""
},
{
"first": "Mohammad",
"middle": [],
"last": "Shoeybi",
"suffix": ""
},
{
"first": "Mostofa",
"middle": [],
"last": "Patwary",
"suffix": ""
},
{
"first": "Bryan",
"middle": [],
"last": "Catanzaro",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
"volume": "",
"issue": "",
"pages": "5811--5826",
"other_ids": {
"DOI": [
"10.18653/v1/2020.emnlp-main.468"
]
},
"num": null,
"urls": [],
"raw_text": "Raul Puri, Ryan Spring, Mohammad Shoeybi, Mostofa Patwary, and Bryan Catanzaro. 2020. Training question answering models from synthetic data. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5811-5826, Online. Association for Computa- tional Linguistics.",
"links": null
},
"BIBREF47": {
"ref_id": "b47",
"title": "ProphetNet: Predicting future n-gram for sequence-to-SequencePre-training",
"authors": [
{
"first": "Weizhen",
"middle": [],
"last": "Qi",
"suffix": ""
},
{
"first": "Yu",
"middle": [],
"last": "Yan",
"suffix": ""
},
{
"first": "Yeyun",
"middle": [],
"last": "Gong",
"suffix": ""
},
{
"first": "Dayiheng",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Nan",
"middle": [],
"last": "Duan",
"suffix": ""
},
{
"first": "Jiusheng",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Ruofei",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Ming",
"middle": [],
"last": "Zhou",
"suffix": ""
}
],
"year": 2020,
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020",
"volume": "",
"issue": "",
"pages": "2401--2410",
"other_ids": {
"DOI": [
"10.18653/v1/2020.findings-emnlp.217"
]
},
"num": null,
"urls": [],
"raw_text": "Weizhen Qi, Yu Yan, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, and Ming Zhou. 2020. ProphetNet: Predicting future n-gram for sequence-to-SequencePre-training. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 2401-2410, Online. Association for Computational Linguistics.",
"links": null
},
"BIBREF48": {
"ref_id": "b48",
"title": "Exploring the limits of transfer learning with a unified text-to-text transformer",
"authors": [
{
"first": "Colin",
"middle": [],
"last": "Raffel",
"suffix": ""
},
{
"first": "Noam",
"middle": [],
"last": "Shazeer",
"suffix": ""
},
{
"first": "Adam",
"middle": [],
"last": "Roberts",
"suffix": ""
},
{
"first": "Katherine",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "Sharan",
"middle": [],
"last": "Narang",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Matena",
"suffix": ""
},
{
"first": "Yanqi",
"middle": [],
"last": "Zhou",
"suffix": ""
},
{
"first": "Wei",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Peter",
"middle": [
"J"
],
"last": "Liu",
"suffix": ""
}
],
"year": 2020,
"venue": "Journal of Machine Learning Research",
"volume": "21",
"issue": "140",
"pages": "1--67",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Kather- ine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67.",
"links": null
},
"BIBREF49": {
"ref_id": "b49",
"title": "2021. emrK-BQA: A clinical knowledge-base question answering dataset",
"authors": [
{
"first": "Preethi",
"middle": [],
"last": "Raghavan",
"suffix": ""
},
{
"first": "Jennifer",
"middle": [
"J"
],
"last": "Liang",
"suffix": ""
},
{
"first": "Diwakar",
"middle": [],
"last": "Mahajan",
"suffix": ""
},
{
"first": "Rachita",
"middle": [],
"last": "Chandra",
"suffix": ""
},
{
"first": "Peter",
"middle": [],
"last": "Szolovits",
"suffix": ""
}
],
"year": null,
"venue": "Proceedings of the 20th Workshop on Biomedical Language Processing",
"volume": "",
"issue": "",
"pages": "64--73",
"other_ids": {
"DOI": [
"10.18653/v1/2021.bionlp-1.7"
]
},
"num": null,
"urls": [],
"raw_text": "Preethi Raghavan, Jennifer J Liang, Diwakar Mahajan, Rachita Chandra, and Peter Szolovits. 2021. emrK- BQA: A clinical knowledge-base question answering dataset. In Proceedings of the 20th Workshop on Biomedical Language Processing, pages 64-73, On- line. Association for Computational Linguistics.",
"links": null
},
"BIBREF51": {
"ref_id": "b51",
"title": "Question-driven summarization of answers to consumer health questions",
"authors": [
{
"first": "Max",
"middle": [
"E"
],
"last": "Savery",
"suffix": ""
},
{
"first": "Asma",
"middle": [],
"last": "Ben Abacha",
"suffix": ""
},
{
"first": "Soumya",
"middle": [],
"last": "Gayen",
"suffix": ""
},
{
"first": "Dina",
"middle": [],
"last": "Demner-Fushman",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Max E. Savery, Asma Ben Abacha, Soumya Gayen, and Dina Demner-Fushman. 2020. Question-driven sum- marization of answers to consumer health questions. Scientific Data, 7.",
"links": null
},
"BIBREF52": {
"ref_id": "b52",
"title": "It's not just size that matters: Small language models are also few-shot learners",
"authors": [
{
"first": "Timo",
"middle": [],
"last": "Schick",
"suffix": ""
},
{
"first": "Hinrich",
"middle": [],
"last": "Sch\u00fctze",
"suffix": ""
}
],
"year": 2021,
"venue": "ArXiv",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Timo Schick and Hinrich Sch\u00fctze. 2021. It's not just size that matters: Small language models are also few-shot learners. ArXiv, abs/2009.07118.",
"links": null
},
"BIBREF53": {
"ref_id": "b53",
"title": "Multiresolution recurrent neural networks: An application to dialogue response generation",
"authors": [
{
"first": "Iulian",
"middle": [],
"last": "Vlad Serban",
"suffix": ""
},
{
"first": "Tim",
"middle": [],
"last": "Klinger",
"suffix": ""
},
{
"first": "Gerald",
"middle": [],
"last": "Tesauro",
"suffix": ""
},
{
"first": "Kartik",
"middle": [],
"last": "Talamadupula",
"suffix": ""
},
{
"first": "Bowen",
"middle": [],
"last": "Zhou",
"suffix": ""
},
{
"first": "Yoshua",
"middle": [],
"last": "Bengio",
"suffix": ""
},
{
"first": "Aaron",
"middle": [
"C"
],
"last": "Courville",
"suffix": ""
}
],
"year": 2016,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Iulian Vlad Serban, Tim Klinger, Gerald Tesauro, Kartik Talamadupula, Bowen Zhou, Yoshua Bengio, and Aaron C. Courville. 2016. Multiresolution recurrent neural networks: An application to dialogue response generation. CoRR, abs/1606.00776.",
"links": null
},
"BIBREF54": {
"ref_id": "b54",
"title": "Neural responding machine for short-text conversation",
"authors": [
{
"first": "Lifeng",
"middle": [],
"last": "Shang",
"suffix": ""
},
{
"first": "Zhengdong",
"middle": [],
"last": "Lu",
"suffix": ""
},
{
"first": "Hang",
"middle": [],
"last": "Li",
"suffix": ""
}
],
"year": 2015,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lifeng Shang, Zhengdong Lu, and Hang Li. 2015. Neu- ral responding machine for short-text conversation. CoRR, abs/1503.02364.",
"links": null
},
"BIBREF55": {
"ref_id": "b55",
"title": "From eliza to xiaoice: Challenges and opportunities with social chatbots",
"authors": [
{
"first": "Heung-Yeung",
"middle": [],
"last": "Shum",
"suffix": ""
},
{
"first": "Xiaodong",
"middle": [],
"last": "He",
"suffix": ""
},
{
"first": "Di",
"middle": [],
"last": "Li",
"suffix": ""
}
],
"year": 2018,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Heung-Yeung Shum, Xiaodong He, and Di Li. 2018. From eliza to xiaoice: Challenges and opportunities with social chatbots.",
"links": null
},
"BIBREF56": {
"ref_id": "b56",
"title": "Using fhir to construct a corpus of clinical questions annotated with logical forms and answers. AMIA",
"authors": [
{
"first": "Sarvesh",
"middle": [],
"last": "Soni",
"suffix": ""
},
{
"first": "Meghana",
"middle": [],
"last": "Gudala",
"suffix": ""
},
{
"first": "Daisy",
"middle": [
"Zhe"
],
"last": "Wang",
"suffix": ""
},
{
"first": "Kirk",
"middle": [],
"last": "Roberts",
"suffix": ""
}
],
"year": 2019,
"venue": "Annual Symposium proceedings. AMIA Symposium",
"volume": "",
"issue": "",
"pages": "1207--1215",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sarvesh Soni, Meghana Gudala, Daisy Zhe Wang, and Kirk Roberts. 2019. Using fhir to construct a corpus of clinical questions annotated with logical forms and answers. AMIA ... Annual Symposium proceedings. AMIA Symposium, 2019:1207-1215.",
"links": null
},
"BIBREF57": {
"ref_id": "b57",
"title": "Neural models for key phrase extraction and question generation",
"authors": [
{
"first": "Sandeep",
"middle": [],
"last": "Subramanian",
"suffix": ""
},
{
"first": "Tong",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Xingdi",
"middle": [],
"last": "Yuan",
"suffix": ""
},
{
"first": "Saizheng",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Adam",
"middle": [],
"last": "Trischler",
"suffix": ""
},
{
"first": "Yoshua",
"middle": [],
"last": "Bengio",
"suffix": ""
}
],
"year": 2018,
"venue": "Proceedings of the Workshop on Machine Reading for Question Answering",
"volume": "",
"issue": "",
"pages": "78--88",
"other_ids": {
"DOI": [
"10.18653/v1/W18-2609"
]
},
"num": null,
"urls": [],
"raw_text": "Sandeep Subramanian, Tong Wang, Xingdi Yuan, Saizheng Zhang, Adam Trischler, and Yoshua Ben- gio. 2018. Neural models for key phrase extraction and question generation. In Proceedings of the Work- shop on Machine Reading for Question Answering, pages 78-88, Melbourne, Australia. Association for Computational Linguistics.",
"links": null
},
"BIBREF58": {
"ref_id": "b58",
"title": "Clicr: a dataset of clinical case reports for machine reading comprehension",
"authors": [
{
"first": "Simon",
"middle": [],
"last": "Suster",
"suffix": ""
},
{
"first": "Walter",
"middle": [],
"last": "Daelemans",
"suffix": ""
}
],
"year": 2018,
"venue": "NAACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Simon Suster and Walter Daelemans. 2018. Clicr: a dataset of clinical case reports for machine reading comprehension. In NAACL.",
"links": null
},
"BIBREF59": {
"ref_id": "b59",
"title": "Question answering and question generation as dual tasks",
"authors": [
{
"first": "Duyu",
"middle": [],
"last": "Tang",
"suffix": ""
},
{
"first": "Nan",
"middle": [],
"last": "Duan",
"suffix": ""
},
{
"first": "Tao",
"middle": [],
"last": "Qin",
"suffix": ""
},
{
"first": "Ming",
"middle": [],
"last": "Zhou",
"suffix": ""
}
],
"year": 2017,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Duyu Tang, Nan Duan, Tao Qin, and Ming Zhou. 2017. Question answering and question generation as dual tasks. CoRR, abs/1706.02027.",
"links": null
},
"BIBREF60": {
"ref_id": "b60",
"title": "Capturing greater context for question generation",
"authors": [
{
"first": "",
"middle": [],
"last": "Luu Anh Tuan",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Darsh",
"suffix": ""
},
{
"first": "Regina",
"middle": [],
"last": "Shah",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Barzilay",
"suffix": ""
}
],
"year": 2019,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Luu Anh Tuan, Darsh J Shah, and Regina Barzilay. 2019. Capturing greater context for question generation.",
"links": null
},
"BIBREF61": {
"ref_id": "b61",
"title": "Neural question generation with answer pivot",
"authors": [
{
"first": "Bingning",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Xiaochuan",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Ting",
"middle": [],
"last": "Tao",
"suffix": ""
},
{
"first": "Qi",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Jingfang",
"middle": [],
"last": "Xu",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence",
"volume": "34",
"issue": "",
"pages": "9138--9145",
"other_ids": {
"DOI": [
"10.1609/aaai.v34i05.6449"
]
},
"num": null,
"urls": [],
"raw_text": "Bingning Wang, Xiaochuan Wang, Ting Tao, Qi Zhang, and Jingfang Xu. 2020a. Neural question generation with answer pivot. Proceedings of the AAAI Confer- ence on Artificial Intelligence, 34(05):9138-9145.",
"links": null
},
"BIBREF62": {
"ref_id": "b62",
"title": "Answer-driven deep question generation based on reinforcement learning",
"authors": [
{
"first": "Liuyin",
"middle": [],
"last": "Wang",
"suffix": ""
},
{
"first": "Zihan",
"middle": [],
"last": "Xu",
"suffix": ""
},
{
"first": "Zibo",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Haitao",
"middle": [],
"last": "Zheng",
"suffix": ""
},
{
"first": "Ying",
"middle": [],
"last": "Shen",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 28th International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "5159--5170",
"other_ids": {
"DOI": [
"10.18653/v1/2020.coling-main.452"
]
},
"num": null,
"urls": [],
"raw_text": "Liuyin Wang, Zihan Xu, Zibo Lin, Haitao Zheng, and Ying Shen. 2020b. Answer-driven deep question gen- eration based on reinforcement learning. In Proceed- ings of the 28th International Conference on Com- putational Linguistics, pages 5159-5170, Barcelona, Spain (Online). International Committee on Compu- tational Linguistics.",
"links": null
},
"BIBREF63": {
"ref_id": "b63",
"title": "Huggingface's transformers: State-of-the-art natural language processing",
"authors": [
{
"first": "Thomas",
"middle": [],
"last": "Wolf",
"suffix": ""
},
{
"first": "Lysandre",
"middle": [],
"last": "Debut",
"suffix": ""
},
{
"first": "Victor",
"middle": [],
"last": "Sanh",
"suffix": ""
},
{
"first": "Julien",
"middle": [],
"last": "Chaumond",
"suffix": ""
},
{
"first": "Clement",
"middle": [],
"last": "Delangue",
"suffix": ""
},
{
"first": "Anthony",
"middle": [],
"last": "Moi",
"suffix": ""
},
{
"first": "Pierric",
"middle": [],
"last": "Cistac",
"suffix": ""
},
{
"first": "Tim",
"middle": [],
"last": "Rault",
"suffix": ""
},
{
"first": "R\u00e9mi",
"middle": [],
"last": "Louf",
"suffix": ""
},
{
"first": "Morgan",
"middle": [],
"last": "Funtowicz",
"suffix": ""
},
{
"first": "Jamie",
"middle": [],
"last": "Brew",
"suffix": ""
}
],
"year": 2019,
"venue": "ArXiv",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pier- ric Cistac, Tim Rault, R\u00e9mi Louf, Morgan Funtowicz, and Jamie Brew. 2019. Huggingface's transformers: State-of-the-art natural language processing. ArXiv, abs/1910.03771.",
"links": null
},
"BIBREF64": {
"ref_id": "b64",
"title": "Development, implementation, and a cognitive evaluation of a definitional question answering system for physicians",
"authors": [
{
"first": "Hong",
"middle": [],
"last": "Yu",
"suffix": ""
},
{
"first": "Minsuk",
"middle": [],
"last": "Lee",
"suffix": ""
},
{
"first": "David",
"middle": [
"R"
],
"last": "Kaufman",
"suffix": ""
},
{
"first": "John",
"middle": [
"W"
],
"last": "Ely",
"suffix": ""
},
{
"first": "Jerome",
"middle": [
"A"
],
"last": "Osheroff",
"suffix": ""
},
{
"first": "George",
"middle": [],
"last": "Hripcsak",
"suffix": ""
},
{
"first": "James",
"middle": [
"J"
],
"last": "Cimino",
"suffix": ""
}
],
"year": 2007,
"venue": "Journal of biomedical informatics",
"volume": "40",
"issue": "",
"pages": "236--51",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hong Yu, Minsuk Lee, David R. Kaufman, John W. Ely, Jerome A. Osheroff, George Hripcsak, and James J. Cimino. 2007. Development, implementation, and a cognitive evaluation of a definitional question an- swering system for physicians. Journal of biomedical informatics, 40 3:236-51.",
"links": null
},
"BIBREF65": {
"ref_id": "b65",
"title": "Clinical reading comprehension: A thorough analysis of the emrQA dataset",
"authors": [
{
"first": "Xiang",
"middle": [],
"last": "Yue",
"suffix": ""
},
{
"first": "Jimenez",
"middle": [],
"last": "Bernal",
"suffix": ""
},
{
"first": "Huan",
"middle": [],
"last": "Gutierrez",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Sun",
"suffix": ""
}
],
"year": 2020,
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "4474--4486",
"other_ids": {
"DOI": [
"10.18653/v1/2020.acl-main.410"
]
},
"num": null,
"urls": [],
"raw_text": "Xiang Yue, Bernal Jimenez Gutierrez, and Huan Sun. 2020. Clinical reading comprehension: A thorough analysis of the emrQA dataset. In Proceedings of the 58th Annual Meeting of the Association for Compu- tational Linguistics, pages 4474-4486, Online. Asso- ciation for Computational Linguistics.",
"links": null
},
"BIBREF66": {
"ref_id": "b66",
"title": "Cliniqg4qa: Generating diverse questions for domain adaptation of clinical question answering",
"authors": [
{
"first": "Xiang",
"middle": [],
"last": "Yue",
"suffix": ""
},
{
"first": "Xinliang",
"middle": [
"Frederick"
],
"last": "Zhang",
"suffix": ""
},
{
"first": "Ziyu",
"middle": [],
"last": "Yao",
"suffix": ""
},
{
"first": "Simon",
"middle": [],
"last": "Lin",
"suffix": ""
},
{
"first": "Huan",
"middle": [],
"last": "Sun",
"suffix": ""
}
],
"year": 2021,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Xiang Yue, Xinliang Frederick Zhang, Ziyu Yao, Simon Lin, and Huan Sun. 2021. Cliniqg4qa: Generating diverse questions for domain adaptation of clinical question answering.",
"links": null
},
"BIBREF67": {
"ref_id": "b67",
"title": "Cliniqa: A machine intelligence based clinical question answering system",
"authors": [
{
"first": "M",
"middle": [
"A H"
],
"last": "Zahid",
"suffix": ""
},
{
"first": "Ankush",
"middle": [],
"last": "Mittal",
"suffix": ""
},
{
"first": "Ramesh",
"middle": [
"Chandra"
],
"last": "Joshi",
"suffix": ""
},
{
"first": "Gowtham",
"middle": [],
"last": "Atluri",
"suffix": ""
}
],
"year": 2018,
"venue": "ArXiv",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "M. A. H. Zahid, Ankush Mittal, Ramesh Chandra Joshi, and Gowtham Atluri. 2018. Cliniqa: A machine intelligence based clinical question answering system. ArXiv, abs/1805.05927.",
"links": null
},
"BIBREF68": {
"ref_id": "b68",
"title": "Bertscore: Evaluating text generation with bert",
"authors": [
{
"first": "Tianyi",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Varsha",
"middle": [],
"last": "Kishore",
"suffix": ""
},
{
"first": "Felix",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Kilian",
"middle": [
"Q"
],
"last": "Weinberger",
"suffix": ""
},
{
"first": "Yoav",
"middle": [],
"last": "Artzi",
"suffix": ""
}
],
"year": 2020,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. Bertscore: Evalu- ating text generation with bert.",
"links": null
},
"BIBREF69": {
"ref_id": "b69",
"title": "Question answering with long multiple-span answers",
"authors": [
{
"first": "Ming",
"middle": [],
"last": "Zhu",
"suffix": ""
},
{
"first": "Aman",
"middle": [],
"last": "Ahuja",
"suffix": ""
},
{
"first": "Da-Cheng",
"middle": [],
"last": "Juan",
"suffix": ""
},
{
"first": "Wei",
"middle": [],
"last": "Wei",
"suffix": ""
},
{
"first": "Chandan",
"middle": [
"K"
],
"last": "Reddy",
"suffix": ""
}
],
"year": 2020,
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2020",
"volume": "",
"issue": "",
"pages": "3840--3849",
"other_ids": {
"DOI": [
"10.18653/v1/2020.findings-emnlp.342"
]
},
"num": null,
"urls": [],
"raw_text": "Ming Zhu, Aman Ahuja, Da-Cheng Juan, Wei Wei, and Chandan K. Reddy. 2020. Question answering with long multiple-span answers. In Findings of the Asso- ciation for Computational Linguistics: EMNLP 2020, pages 3840-3849, Online. Association for Computa- tional Linguistics.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"num": null,
"uris": null,
"text": "Example of an annotated discharge summary section. The highlighted portion shows the \"trigger\" for the questions.",
"type_str": "figure"
},
"FIGREF1": {
"num": null,
"uris": null,
"text": "We randomly sample 100 gold triggers and have one of the authors, a physician, categorize the type of information that the trigger contains.",
"type_str": "figure"
},
"FIGREF2": {
"num": null,
"uris": null,
"text": "A breakdown of how questions are annotated.",
"type_str": "figure"
},
"TABREF0": {
"type_str": "table",
"content": "<table><tr><td>(1)</td><td>prostate cancer, benign prostatic</td></tr><tr><td/><td>hypertrophy</td></tr><tr><td/><td>Date of diagnosis? Any interventions done</td></tr><tr><td/><td>(RT, surgery)?</td></tr><tr><td>(2)</td><td>hypothyroidism</td></tr><tr><td/><td>Maintenance medications?</td></tr></table>",
"num": null,
"text": "methodsHis past medical history is signi cant for prostate cancer, benign prostatic hypertrophy, hypothyroidism, status post radiation for non Hodgkin's lymphoma, chronic painless hematuria, degenerative joint disease and history of a murmur.",
"html": null
},
"TABREF4": {
"type_str": "table",
"content": "<table/>",
"num": null,
"text": "",
"html": null
},
"TABREF6": {
"type_str": "table",
"content": "<table/>",
"num": null,
"text": "Trigger detection results on the test set.",
"html": null
},
"TABREF8": {
"type_str": "table",
"content": "<table><tr><td>Model Type</td><td colspan=\"6\">Context Split Qs Unique Question Ratio METEOR BERTScore ROUGE-L</td></tr><tr><td>BART</td><td>Trigger</td><td>N</td><td>0.301</td><td>3.6</td><td>0.856</td><td>10.2</td></tr><tr><td>BART</td><td>Trigger</td><td>Y</td><td>0.037</td><td>0.1</td><td>0.838</td><td>3.4</td></tr><tr><td>BART</td><td>Sentence</td><td>N</td><td>0.526</td><td>6.1</td><td>0.860</td><td>10.2</td></tr><tr><td>BART</td><td>Sentence</td><td>Y</td><td>0.468</td><td>7.8</td><td>0.858</td><td>12.0</td></tr><tr><td>BART</td><td>Chunk</td><td>N</td><td>0.741</td><td>7.9</td><td>0.861</td><td>11.9</td></tr><tr><td>BART</td><td>Chunk</td><td>Y</td><td>0.619</td><td>7.2</td><td>0.861</td><td>11.6</td></tr><tr><td>T0-11B T0-11B T0-11B T0-11B</td><td>Sentence Sentence Chunk Chunk</td><td>N Y N Y</td><td>0.779 0.410 0.398 0.400</td><td>3.9 8.4 3.7 6.7</td><td>0.861 0.884 0.860 0.879</td><td>11.9 12.2 12.4 10.9</td></tr></table>",
"num": null,
"text": "Example T0 model generations, cherry-picked. This model examines single sentences and is trained with combined questions. Trigger phrases are italicized.",
"html": null
},
"TABREF9": {
"type_str": "table",
"content": "<table/>",
"num": null,
"text": "Automated metrics for baseline models on the question generation task. Sentence and Chunk contexts include both the text surrounding the trigger and the trigger itself. Trigger context only includes trigger text. Split Qs means splitting multiple questions for a trigger into multiple examples (unique question ratio of these models should not be compared). Results given on dev set.",
"html": null
},
"TABREF10": {
"type_str": "table",
"content": "<table><tr><td>Gold</td><td>-</td><td>93.8%</td><td>86.0%</td><td>83.3%</td><td>82.3%</td><td>80.5%</td></tr><tr><td>BART</td><td>Gold</td><td>81.5%</td><td>59.8%</td><td>52.3%</td><td>54.8%</td><td>47.8%</td></tr><tr><td>T0</td><td>Gold</td><td>85.8%</td><td>72.3%</td><td>68.0%</td><td>66.5%</td><td>62.5%</td></tr><tr><td colspan=\"2\">BART Predicted</td><td>78.3%</td><td>57.3%</td><td>49.3%</td><td>49.8%</td><td>41.8%</td></tr><tr><td>T0</td><td>Predicted</td><td>76.8%</td><td>49.0%</td><td>45.0%</td><td>44.5%</td><td>41.0%</td></tr></table>",
"num": null,
"text": "ModelTriggers Understandable Nontrivial Relevant Clinically Meaningful Satisfies All",
"html": null
}
}
}
}