{ "paper_id": "2022", "header": { "generated_with": "S2ORC 1.0.0", "date_generated": "2023-01-19T16:31:33.021201Z" }, "title": "Docalog: Multi-document Dialogue System using Transformer-based Span Retrieval", "authors": [ { "first": "Sayed", "middle": [], "last": "Hesam Alavian", "suffix": "", "affiliation": { "laboratory": "", "institution": "Sharif University of Technology", "location": { "settlement": "Tehran", "country": "Iran" } }, "email": "" }, { "first": "Ali", "middle": [], "last": "Satvaty", "suffix": "", "affiliation": { "laboratory": "", "institution": "Sharif University of Technology", "location": { "settlement": "Tehran", "country": "Iran" } }, "email": "" }, { "first": "Sadra", "middle": [], "last": "Sabouri", "suffix": "", "affiliation": { "laboratory": "", "institution": "Sharif University of Technology", "location": { "settlement": "Tehran", "country": "Iran" } }, "email": "" }, { "first": "Ehsaneddin", "middle": [], "last": "Asgari", "suffix": "", "affiliation": { "laboratory": "", "institution": "Volkswagen AG", "location": { "settlement": "Munich", "country": "Germany" } }, "email": "asgari@berkeley.edu" }, { "first": "Hossein", "middle": [], "last": "Sameti", "suffix": "", "affiliation": { "laboratory": "", "institution": "Sharif University of Technology", "location": { "settlement": "Tehran", "country": "Iran" } }, "email": "sameti@sharif.edu" } ], "year": "", "venue": null, "identifiers": {}, "abstract": "Information-seeking dialogue systems, including knowledge identification and response generation, aim to respond to users with fluent, coherent, and informative answers based on users' needs. This paper discusses our proposed approach, Docalog, for the DialDoc-22 (Multi-Doc2Dial) shared task. Docalog identifies the most relevant knowledge in the associated document, in a multi-document setting. Docalog, is a three-stage pipeline consisting of (1) a document retriever model (DR. TEIT), (2) an answer span prediction model, and (3) an ultimate span picker deciding on the most likely answer span, out of all predicted spans. In the test phase of MultiDoc2Dial 2022, Docalog achieved f1scores of 36.07% and 28.44% and SacreBLEU scores of 23.70% and 20.52%, respectively on the MDD-SEEN and MDD-UNSEEN folds.", "pdf_parse": { "paper_id": "2022", "_pdf_hash": "", "abstract": [ { "text": "Information-seeking dialogue systems, including knowledge identification and response generation, aim to respond to users with fluent, coherent, and informative answers based on users' needs. This paper discusses our proposed approach, Docalog, for the DialDoc-22 (Multi-Doc2Dial) shared task. Docalog identifies the most relevant knowledge in the associated document, in a multi-document setting. Docalog, is a three-stage pipeline consisting of (1) a document retriever model (DR. TEIT), (2) an answer span prediction model, and (3) an ultimate span picker deciding on the most likely answer span, out of all predicted spans. In the test phase of MultiDoc2Dial 2022, Docalog achieved f1scores of 36.07% and 28.44% and SacreBLEU scores of 23.70% and 20.52%, respectively on the MDD-SEEN and MDD-UNSEEN folds.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Abstract", "sec_num": null } ], "body_text": [ { "text": "Introducing a machine-generated dialogue with a human level of intelligence has been consistently among dreams of artificial intelligence with a vast number of applications in different domains, ranging from entertainment (Baena-Perez et al., 2020) to healthcare systems (Montenegro et al., 2019; Bharti et al., 2020) . In such a system, the machine has to (i) understand the flow of conversation, (ii) raise informative questions, and (iii) answer problems in different domains of interest, and in some cases it has to act as an all-knowing agent (Dazeley et al., 2021) . Recent advances in NLP have made this dream closer to reality. In the last decade, the success of the neural language model in language understanding and generation has encouraged more and more contributions from both academia and industry in the area of conversational artificial intelligence (Fu et al., 2020) .", "cite_spans": [ { "start": 222, "end": 248, "text": "(Baena-Perez et al., 2020)", "ref_id": null }, { "start": 271, "end": 296, "text": "(Montenegro et al., 2019;", "ref_id": "BIBREF17" }, { "start": 297, "end": 317, "text": "Bharti et al., 2020)", "ref_id": "BIBREF1" }, { "start": 548, "end": 570, "text": "(Dazeley et al., 2021)", "ref_id": "BIBREF7" }, { "start": 867, "end": 884, "text": "(Fu et al., 2020)", "ref_id": "BIBREF11" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "The major efforts in conversational artificial intelligence can be categorized into three subareas (Zaib et al., 2021) :", "cite_spans": [ { "start": 99, "end": 118, "text": "(Zaib et al., 2021)", "ref_id": "BIBREF26" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "(i) chat-oriented systems, where the aim is to engage the users through a natural and fluent conversation (Nio et al., 2014) , the examples are Alexa 1 , Siri 2 , or Cortana 3 ; (ii) task-oriented systems, which are designed for a particular action, such as reserving a restaurant or planning an event by understanding the conversation (Yan et al., 2017) ; and (iii) QA dialog systems attempting to answer the user exploiting information deducted from a collection of seen documents or a knowledge base, for instance CoQA (Reddy et al., 2019) , QuAC (Choi et al., 2018) . Our work in this paper also falls in the third category.", "cite_spans": [ { "start": 106, "end": 124, "text": "(Nio et al., 2014)", "ref_id": "BIBREF18" }, { "start": 336, "end": 354, "text": "(Yan et al., 2017)", "ref_id": "BIBREF25" }, { "start": 522, "end": 542, "text": "(Reddy et al., 2019)", "ref_id": "BIBREF20" }, { "start": 550, "end": 569, "text": "(Choi et al., 2018)", "ref_id": "BIBREF4" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "In this system paper, we present our work on the DialDoc Shared Task 2022 centered on developing a QA dialogue system. A common approach to this problem comprises two subtasks of (i) knowledge identification (KI) to retrieve the knowledge from the documents and (ii) response generation (RG) to generate an answer based on the retrieved knowledge (Feng et al., 2020b; Kim et al., 2021) . The multi-document scenario, meaning that the related documents have to be retrieved before the answer generation, is the main distinction between the DialDoc Shared Tasks in 2021 and 2022. To tackle this problem, we propose a three-stage pipeline, called Docalog, consisting of (1) document retriever model (DR. TEIT), (2) an answer span prediction model, a state-of-the-art transformer-based model taking single documents (DR. TEIT results) as input and outputting the answer span for every input document, and (3) an ultimate span picker deciding on the most likely answer span, out of all predicted spans in the step (2). In Multidoc2dial 2022 challenge, during the test phase, DocAlog achieved an f1-score of 36.07% and a SacreBLEU of 23.70% on the MDD-SEEN, and an f1-score of 28.44% and a SacreBLEU of 20.52% on the MDD-UNSEEN.", "cite_spans": [ { "start": 287, "end": 291, "text": "(RG)", "ref_id": null }, { "start": 347, "end": 367, "text": "(Feng et al., 2020b;", "ref_id": "BIBREF10" }, { "start": 368, "end": 385, "text": "Kim et al., 2021)", "ref_id": "BIBREF14" } ], "ref_spans": [], "eq_spans": [], "section": "Introduction", "sec_num": "1" }, { "text": "The main focus of DialDoc shared tasks has been on developing task-oriented information-seeking dialogue systems, an important setting in the domain of conversational AI (Feng et al., 2021) . Some of the performing models in this domain have been CAiRE (Xu et al., 2021) , SCIRDT (Li et al., 2021) , and RWTH (Daheim et al., 2021) . The proposed approaches of CAiRE and SCIRDT utilize additional data for the augmentation of pre-trained language models in span detection, and RWTH (Daheim et al., 2021) model uses neural retrievers for obtaining the most relevant document passages.", "cite_spans": [ { "start": 170, "end": 189, "text": "(Feng et al., 2021)", "ref_id": "BIBREF9" }, { "start": 253, "end": 270, "text": "(Xu et al., 2021)", "ref_id": "BIBREF23" }, { "start": 280, "end": 297, "text": "(Li et al., 2021)", "ref_id": "BIBREF15" }, { "start": 309, "end": 330, "text": "(Daheim et al., 2021)", "ref_id": "BIBREF6" }, { "start": 481, "end": 502, "text": "(Daheim et al., 2021)", "ref_id": "BIBREF6" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "In a broader context, the major work in document-grounded dialogue modeling can be divided into the following categories: (i) QA in an unstructured content, e.g., CoQA (Reddy et al., 2019) , QuAC (Choi et al., 2018) , ShARC (Saeidi et al., 2018) , DoQA (Campos et al., 2020), and Doc2Dial (Feng et al., 2020b ) (ii) QA in a semi-structured content, such as tables or lists, e.g., SQA (Iyyer et al., 2017) , and HybridQA (Chen et al., 2020) and thirdly (iii) QA in a multimedia content (images and videos with associated textual descriptions), e.g., RecipeQA (Yagcioglu et al., 2018) , PsTuts-VQA (Colas et al., 2020) , and MIMOQA (Singh et al., 2021) .", "cite_spans": [ { "start": 168, "end": 188, "text": "(Reddy et al., 2019)", "ref_id": "BIBREF20" }, { "start": 196, "end": 215, "text": "(Choi et al., 2018)", "ref_id": "BIBREF4" }, { "start": 224, "end": 245, "text": "(Saeidi et al., 2018)", "ref_id": "BIBREF21" }, { "start": 289, "end": 308, "text": "(Feng et al., 2020b", "ref_id": "BIBREF10" }, { "start": 384, "end": 404, "text": "(Iyyer et al., 2017)", "ref_id": "BIBREF12" }, { "start": 420, "end": 439, "text": "(Chen et al., 2020)", "ref_id": "BIBREF3" }, { "start": 558, "end": 582, "text": "(Yagcioglu et al., 2018)", "ref_id": "BIBREF24" }, { "start": 596, "end": 616, "text": "(Colas et al., 2020)", "ref_id": "BIBREF5" }, { "start": 630, "end": 650, "text": "(Singh et al., 2021)", "ref_id": "BIBREF22" } ], "ref_spans": [], "eq_spans": [], "section": "Related Work", "sec_num": "2" }, { "text": "Training material used in this shared task is derived from the MultiDoc2Dial, a new dataset constructed based on Doc2Dial dataset V1.0.1 (Feng et al., 2020b) . It contains a collection of documents and conversations exchanged between the user(s) and an agent grounded in the associated documents.", "cite_spans": [ { "start": 137, "end": 157, "text": "(Feng et al., 2020b)", "ref_id": "BIBREF10" } ], "ref_spans": [], "eq_spans": [], "section": "MultiDoc2Dial Shared Task Dataset", "sec_num": "3.1" }, { "text": "The three-stage workflow of Docalog is depicted in Figure 1 . Firstly, DR. TEIT predicts the N best documents based on the user input (q t ), and a query history of the respective user (q 1:(t\u22121) ). Afterward, the span prediction model finds matching spans for a given query for each of the N best documents in the step before. Eventually, the ultimate span picker selects the most related span among predicted spans using a combination of the cosine similarity between the query and the span embeddings, as well as char-level TF-IDF-based cosine similarity between the query and the span vectors.", "cite_spans": [], "ref_spans": [ { "start": 51, "end": 59, "text": "Figure 1", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Model", "sec_num": "3.2" }, { "text": "In our retrieval model to encode the texts, we use a pre-trained language-agnostic BERT sentence embedding (LaBSE) (Feng et al., 2020a) . One of our contributions here is to include the dialogue history in our document retriever model. We also found that the title tokens and their synonyms are extremely useful in document-changing dialogues, i.e., questions changing the context document during the conversation. Our document retriever model, Document Retriever with Title Embedding and IDF on Texts (DR. TEIT), uses two scoring measures and aggregates them through a hyper-parameter in a convex combination (Eq. 1).", "cite_spans": [ { "start": 115, "end": 135, "text": "(Feng et al., 2020a)", "ref_id": "BIBREF8" } ], "ref_spans": [], "eq_spans": [], "section": "Document Retriever", "sec_num": "3.2.1" }, { "text": "EQUATION", "cite_spans": [], "ref_spans": [], "eq_spans": [ { "start": 0, "end": 8, "text": "EQUATION", "ref_id": "EQREF", "raw_str": "\u03bbS T E + (1 \u2212 \u03bb)S T I ,", "eq_num": "(1)" } ], "section": "Document Retriever", "sec_num": "3.2.1" }, { "text": "where S T E is the title embedding based on the similarity between the sequence of query and the history (q 1..t ) and the document titles. S T I is a character n-gram (2 \u2264 n \u2264 8) similarity score calculated between the aggregation of the query and the history (q 1..t ) and the document texts using TF-IDF-based cosine similarity (Figure 1-c) .", "cite_spans": [], "ref_spans": [ { "start": 331, "end": 343, "text": "(Figure 1-c)", "ref_id": "FIGREF0" } ], "eq_spans": [], "section": "Document Retriever", "sec_num": "3.2.1" }, { "text": "Our span predictor is a RoBERTa language model (Zhuang et al., 2021) fine-tuned to predict the start and the end positions of the answer span, similar to CAiRE (Xu et al., 2021) , one of the best performing models in DialDoc-2021. To model the history of questions, we append the last two history turns to the current question, as also proposed in (Ohsugi et al., 2019) , and feed it to the model as part of the current question. Prior to training our model on the DialDoc 2022 dataset, to gain more global knowledge in question answering, the span predictor of Docalog undergoes a pre-training phase on several CQA datasets such as CoQA (Reddy et al., 2019) , QuAC (Choi et al., 2018 ), DoQA (Campos et al., 2020 , and Doc2Dial (Feng et al., 2020b) . Next, we fine-tune this model on the MultiDoc2Dial dataset using the grounding documents for each question. In this fine-tuning stage, we consider the task as a single-document question answering task. Therefore, at each training step, we only feed the model with the grounding document. The reason behind having a standalone span prediction model is to prevent the propagation of the retrieval error in the training phase.", "cite_spans": [ { "start": 47, "end": 68, "text": "(Zhuang et al., 2021)", "ref_id": "BIBREF27" }, { "start": 160, "end": 177, "text": "(Xu et al., 2021)", "ref_id": "BIBREF23" }, { "start": 348, "end": 369, "text": "(Ohsugi et al., 2019)", "ref_id": "BIBREF19" }, { "start": 638, "end": 658, "text": "(Reddy et al., 2019)", "ref_id": "BIBREF20" }, { "start": 666, "end": 684, "text": "(Choi et al., 2018", "ref_id": "BIBREF4" }, { "start": 685, "end": 713, "text": "), DoQA (Campos et al., 2020", "ref_id": null }, { "start": 729, "end": 749, "text": "(Feng et al., 2020b)", "ref_id": "BIBREF10" } ], "ref_spans": [], "eq_spans": [], "section": "Span Predictor", "sec_num": "3.2.2" }, { "text": "As discussed, the span detector provides the mostlikely spans for each of the N best documents by the retriever. Since the answer-span probabilities are not comparable across documents, we need to rank the top-N identified spans searching for the ultimate answer. Therefore, similar to our document retriever, we use a convex combination between the embedding-based and character-level-based co-sine similarities of the query and the detected spans through a hyper-parameter \u03b1 that can be tuned on a validation set:", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Ultimate Span Picker", "sec_num": "3.2.3" }, { "text": "EQUATION", "cite_spans": [], "ref_spans": [], "eq_spans": [ { "start": 0, "end": 8, "text": "EQUATION", "ref_id": "EQREF", "raw_str": "\u03b1S SE + (1 \u2212 \u03b1)S SI ,", "eq_num": "(2)" } ], "section": "Ultimate Span Picker", "sec_num": "3.2.3" }, { "text": "where S SE is the span embedding similarity and S SI is character-level TF-IDF similarity.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Ultimate Span Picker", "sec_num": "3.2.3" }, { "text": "To summarize the workflow of Docalog, (1) a document retriever model using both embedding and character-level information retrieves the N most relevant documents to the current question. Based on the validation data we choose the hyper-parameter N in a way that we ensure selecting the answer document. (2) Using a trained span detector model, for each N document we detect the answer spans. 3We use another document retriever model, this time to select the best-detected span, and the ultimate answer to the question is the post-processed version of this final span.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Ultimate Span Picker", "sec_num": "3.2.3" }, { "text": "For the span prediction, we use a large RoBERTa language model 4 (Liu et al., 2019) . During the training and the prediction phase, we feed the Table 1 : Docalog results on Multidoc2dial 2022 challenge. Docalog@k indicates our method when working on the best k documents retrieved by the document retriever for the span detection and providing the final answer. documents to the model with a stride size of 128 tokens. We pre-train our span-prediction model for 1 epoch on the CQA datasets and then fine-tuning was done on the MultiDoc2Dial dataset for 3 epochs. Our pre-training lasted around 13 hours and our fine-tuning step 15 hours, both of which were processed on a GeForce RTX 3070 GPU with 12GB memory.", "cite_spans": [ { "start": 65, "end": 83, "text": "(Liu et al., 2019)", "ref_id": "BIBREF16" } ], "ref_spans": [ { "start": 144, "end": 151, "text": "Table 1", "ref_id": null } ], "eq_spans": [], "section": "Experimental Settings", "sec_num": "3.2.4" }, { "text": "Availablity: Our implementation of Docalog is available at github 5 .", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Experimental Settings", "sec_num": "3.2.4" }, { "text": "Document Retriever: in our experiments, Dr. TEIT achieved a Precision@5 of 86% and a Mean Reciprocal Rank (MRR) of 0.72 indicating that on average, the hit is among the first two retrieved documents and it would be more than sufficient to take top-5 documents to the next step, i.e., span detection.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Results", "sec_num": "4" }, { "text": "Docalog Results: In our final model, we combine DR. TEIT, as the retriever with our span predictor model. The comprehensive report of Docalog is provided in Table 1 . We obtained the best F1 score of 36.07% with Docalog@1, suggesting that the ultimate span picker needs further improvements.", "cite_spans": [], "ref_spans": [ { "start": 157, "end": 164, "text": "Table 1", "ref_id": null } ], "eq_spans": [], "section": "Results", "sec_num": "4" }, { "text": "5 https://github.com/Sharif-SLPL-NLP/Docalog-2022", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Results", "sec_num": "4" }, { "text": "We proposed Docalog, a solution for the DialDoc-22 challenge. Docalog is a three-stage pipeline consisting of (1) a document retriever model (DR. TEIT), (2) an answer span prediction model, and (3) an ultimate span picker deciding on the most likely answer span, out of all predicted spans. Our experiments show that combining contextualized embedding information with character-level similarities between the answer and the question history can effectively help in the prediction of the ultimate answer. In the test phase of Multi-Doc2Dial 2022, Docalog achieved f1-scores of 36.07% and 28.44% and SacreBLEU scores of 23.70% and 20.52%, respectively on the MDD-SEEN and MDD-UNSEEN folds.", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "Conclusions", "sec_num": "5" }, { "text": "https://developer.amazon.com/en-US/alexa 2 https://www.apple.com/uk/siri/ 3 https://www.microsoft.com/en-us/cortana", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null }, { "text": "https://github.com/huggingface/transformers", "cite_spans": [], "ref_spans": [], "eq_spans": [], "section": "", "sec_num": null } ], "back_matter": [], "bib_entries": { "BIBREF0": { "ref_id": "b0", "title": "Juan Manuel Dodero, and Miguel Angel Bolivar. 2020. A framework to create conversational agents for the development of video games by end-users", "authors": [ { "first": "Rub\u00e9n", "middle": [], "last": "Baena-Perez", "suffix": "" }, { "first": "Iv\u00e1n", "middle": [], "last": "Ruiz-Rube", "suffix": "" } ], "year": null, "venue": "International Conference on Optimization and Learning", "volume": "", "issue": "", "pages": "216--226", "other_ids": {}, "num": null, "urls": [], "raw_text": "Rub\u00e9n Baena-Perez, Iv\u00e1n Ruiz-Rube, Juan Manuel Do- dero, and Miguel Angel Bolivar. 2020. A framework to create conversational agents for the development of video games by end-users. In International Confer- ence on Optimization and Learning, pages 216-226. Springer.", "links": null }, "BIBREF1": { "ref_id": "b1", "title": "Medbot: Conversational artificial intelligence powered chatbot for delivering tele-health after covid-19", "authors": [ { "first": "Urmil", "middle": [], "last": "Bharti", "suffix": "" }, { "first": "Deepali", "middle": [], "last": "Bajaj", "suffix": "" }, { "first": "Hunar", "middle": [], "last": "Batra", "suffix": "" }, { "first": "Shreya", "middle": [], "last": "Lalit", "suffix": "" }, { "first": "Shweta", "middle": [], "last": "Lalit", "suffix": "" }, { "first": "Aayushi", "middle": [], "last": "Gangwani", "suffix": "" } ], "year": 2020, "venue": "2020 5th international conference on communication and electronics systems (ICCES)", "volume": "", "issue": "", "pages": "870--875", "other_ids": {}, "num": null, "urls": [], "raw_text": "Urmil Bharti, Deepali Bajaj, Hunar Batra, Shreya Lalit, Shweta Lalit, and Aayushi Gangwani. 2020. Medbot: Conversational artificial intelligence powered chatbot for delivering tele-health after covid-19. In 2020 5th international conference on communication and electronics systems (ICCES), pages 870-875. IEEE.", "links": null }, "BIBREF2": { "ref_id": "b2", "title": "DoQA -accessing domain-specific FAQs via conversational QA", "authors": [ { "first": "Jon", "middle": [], "last": "Ander Campos", "suffix": "" }, { "first": "Arantxa", "middle": [], "last": "Otegi", "suffix": "" }, { "first": "Aitor", "middle": [], "last": "Soroa", "suffix": "" }, { "first": "Jan", "middle": [], "last": "Deriu", "suffix": "" }, { "first": "Mark", "middle": [], "last": "Cieliebak", "suffix": "" }, { "first": "Eneko", "middle": [], "last": "Agirre", "suffix": "" } ], "year": 2020, "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", "volume": "", "issue": "", "pages": "7302--7314", "other_ids": { "DOI": [ "10.18653/v1/2020.acl-main.652" ] }, "num": null, "urls": [], "raw_text": "Jon Ander Campos, Arantxa Otegi, Aitor Soroa, Jan De- riu, Mark Cieliebak, and Eneko Agirre. 2020. DoQA -accessing domain-specific FAQs via conversational QA. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7302-7314, Online. Association for Computational Linguistics.", "links": null }, "BIBREF3": { "ref_id": "b3", "title": "Hy-bridQA: A dataset of multi-hop question answering over tabular and textual data", "authors": [ { "first": "Wenhu", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Hanwen", "middle": [], "last": "Zha", "suffix": "" }, { "first": "Zhiyu", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Wenhan", "middle": [], "last": "Xiong", "suffix": "" }, { "first": "Hong", "middle": [], "last": "Wang", "suffix": "" }, { "first": "William", "middle": [ "Yang" ], "last": "Wang", "suffix": "" } ], "year": 2020, "venue": "Findings of the Association for Computational Linguistics: EMNLP 2020", "volume": "", "issue": "", "pages": "1026--1036", "other_ids": { "DOI": [ "10.18653/v1/2020.findings-emnlp.91" ] }, "num": null, "urls": [], "raw_text": "Wenhu Chen, Hanwen Zha, Zhiyu Chen, Wenhan Xiong, Hong Wang, and William Yang Wang. 2020. Hy- bridQA: A dataset of multi-hop question answering over tabular and textual data. In Findings of the Asso- ciation for Computational Linguistics: EMNLP 2020, pages 1026-1036, Online. Association for Computa- tional Linguistics.", "links": null }, "BIBREF4": { "ref_id": "b4", "title": "QuAC: Question answering in context", "authors": [ { "first": "Eunsol", "middle": [], "last": "Choi", "suffix": "" }, { "first": "He", "middle": [], "last": "He", "suffix": "" }, { "first": "Mohit", "middle": [], "last": "Iyyer", "suffix": "" }, { "first": "Mark", "middle": [], "last": "Yatskar", "suffix": "" }, { "first": "Wentau", "middle": [], "last": "Yih", "suffix": "" }, { "first": "Yejin", "middle": [], "last": "Choi", "suffix": "" }, { "first": "Percy", "middle": [], "last": "Liang", "suffix": "" }, { "first": "Luke", "middle": [], "last": "Zettlemoyer", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "2174--2184", "other_ids": { "DOI": [ "10.18653/v1/D18-1241" ] }, "num": null, "urls": [], "raw_text": "Eunsol Choi, He He, Mohit Iyyer, Mark Yatskar, Wen- tau Yih, Yejin Choi, Percy Liang, and Luke Zettle- moyer. 2018. QuAC: Question answering in context. In Proceedings of the 2018 Conference on Empiri- cal Methods in Natural Language Processing, pages 2174-2184, Brussels, Belgium. Association for Com- putational Linguistics.", "links": null }, "BIBREF5": { "ref_id": "b5", "title": "TutorialVQA: Question answering dataset for tutorial videos", "authors": [ { "first": "Anthony", "middle": [], "last": "Colas", "suffix": "" }, { "first": "Seokhwan", "middle": [], "last": "Kim", "suffix": "" }, { "first": "Franck", "middle": [], "last": "Dernoncourt", "suffix": "" }, { "first": "Siddhesh", "middle": [], "last": "Gupte", "suffix": "" }, { "first": "Zhe", "middle": [], "last": "Wang", "suffix": "" }, { "first": "Doo Soon", "middle": [], "last": "Kim", "suffix": "" } ], "year": 2020, "venue": "Proceedings of the 12th Language Resources and Evaluation Conference", "volume": "", "issue": "", "pages": "5450--5455", "other_ids": {}, "num": null, "urls": [], "raw_text": "Anthony Colas, Seokhwan Kim, Franck Dernoncourt, Siddhesh Gupte, Zhe Wang, and Doo Soon Kim. 2020. TutorialVQA: Question answering dataset for tutorial videos. In Proceedings of the 12th Lan- guage Resources and Evaluation Conference, pages 5450-5455, Marseille, France. European Language Resources Association.", "links": null }, "BIBREF6": { "ref_id": "b6", "title": "Cascaded span extraction and response generation for document-grounded dialog", "authors": [ { "first": "Nico", "middle": [], "last": "Daheim", "suffix": "" }, { "first": "David", "middle": [], "last": "Thulke", "suffix": "" }, { "first": "Christian", "middle": [], "last": "Dugast", "suffix": "" }, { "first": "Hermann", "middle": [], "last": "Ney", "suffix": "" } ], "year": 2021, "venue": "Proceedings of the 1st Workshop on Documentgrounded Dialogue and Conversational Question Answering", "volume": "", "issue": "", "pages": "57--62", "other_ids": { "DOI": [ "10.18653/v1/2021.dialdoc-1.8" ] }, "num": null, "urls": [], "raw_text": "Nico Daheim, David Thulke, Christian Dugast, and Hermann Ney. 2021. Cascaded span extraction and response generation for document-grounded dialog. In Proceedings of the 1st Workshop on Document- grounded Dialogue and Conversational Question An- swering (DialDoc 2021), pages 57-62, Online. Asso- ciation for Computational Linguistics.", "links": null }, "BIBREF7": { "ref_id": "b7", "title": "Levels of explainable artificial intelligence for human-aligned conversational explanations", "authors": [ { "first": "Richard", "middle": [], "last": "Dazeley", "suffix": "" }, { "first": "Peter", "middle": [], "last": "Vamplew", "suffix": "" }, { "first": "Cameron", "middle": [], "last": "Foale", "suffix": "" }, { "first": "Charlotte", "middle": [], "last": "Young", "suffix": "" }, { "first": "Sunil", "middle": [], "last": "Aryal", "suffix": "" }, { "first": "Francisco", "middle": [], "last": "Cruz", "suffix": "" } ], "year": 2021, "venue": "Artificial Intelligence", "volume": "299", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Richard Dazeley, Peter Vamplew, Cameron Foale, Charlotte Young, Sunil Aryal, and Francisco Cruz. 2021. Levels of explainable artificial intelligence for human-aligned conversational explanations. Artifi- cial Intelligence, 299:103525.", "links": null }, "BIBREF8": { "ref_id": "b8", "title": "Languageagnostic bert sentence embedding", "authors": [ { "first": "Fangxiaoyu", "middle": [], "last": "Feng", "suffix": "" }, { "first": "Yinfei", "middle": [], "last": "Yang", "suffix": "" }, { "first": "Daniel", "middle": [], "last": "Cer", "suffix": "" }, { "first": "Naveen", "middle": [], "last": "Arivazhagan", "suffix": "" }, { "first": "Wei", "middle": [], "last": "Wang", "suffix": "" } ], "year": 2020, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2007.01852" ] }, "num": null, "urls": [], "raw_text": "Fangxiaoyu Feng, Yinfei Yang, Daniel Cer, Naveen Arivazhagan, and Wei Wang. 2020a. Language- agnostic bert sentence embedding. arXiv preprint arXiv:2007.01852.", "links": null }, "BIBREF9": { "ref_id": "b9", "title": "MultiDoc2Dial: Modeling dialogues grounded in multiple documents", "authors": [ { "first": "Song", "middle": [], "last": "Feng", "suffix": "" }, { "first": "Sankalp", "middle": [], "last": "Siva", "suffix": "" }, { "first": "Hui", "middle": [], "last": "Patel", "suffix": "" }, { "first": "Sachindra", "middle": [], "last": "Wan", "suffix": "" }, { "first": "", "middle": [], "last": "Joshi", "suffix": "" } ], "year": 2021, "venue": "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "6162--6176", "other_ids": { "DOI": [ "10.18653/v1/2021.emnlp-main.498" ] }, "num": null, "urls": [], "raw_text": "Song Feng, Siva Sankalp Patel, Hui Wan, and Sachindra Joshi. 2021. MultiDoc2Dial: Modeling dialogues grounded in multiple documents. In Proceedings of the 2021 Conference on Empirical Methods in Natu- ral Language Processing, pages 6162-6176, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.", "links": null }, "BIBREF10": { "ref_id": "b10", "title": "doc2dial: A goal-oriented document-grounded dialogue dataset", "authors": [ { "first": "Song", "middle": [], "last": "Feng", "suffix": "" }, { "first": "Hui", "middle": [], "last": "Wan", "suffix": "" }, { "first": "Chulaka", "middle": [], "last": "Gunasekara", "suffix": "" }, { "first": "Siva", "middle": [], "last": "Patel", "suffix": "" }, { "first": "Sachindra", "middle": [], "last": "Joshi", "suffix": "" }, { "first": "Luis", "middle": [], "last": "Lastras", "suffix": "" } ], "year": 2020, "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", "volume": "", "issue": "", "pages": "8118--8128", "other_ids": { "DOI": [ "10.18653/v1/2020.emnlp-main.652" ] }, "num": null, "urls": [], "raw_text": "Song Feng, Hui Wan, Chulaka Gunasekara, Siva Patel, Sachindra Joshi, and Luis Lastras. 2020b. doc2dial: A goal-oriented document-grounded dialogue dataset. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8118-8128, Online. Association for Computa- tional Linguistics.", "links": null }, "BIBREF11": { "ref_id": "b11", "title": "Tutorial on conversational recommendation systems", "authors": [ { "first": "Zuohui", "middle": [], "last": "Fu", "suffix": "" }, { "first": "Yikun", "middle": [], "last": "Xian", "suffix": "" }, { "first": "Yongfeng", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Yi", "middle": [], "last": "Zhang", "suffix": "" } ], "year": 2020, "venue": "Fourteenth ACM Conference on Recommender Systems", "volume": "", "issue": "", "pages": "751--753", "other_ids": {}, "num": null, "urls": [], "raw_text": "Zuohui Fu, Yikun Xian, Yongfeng Zhang, and Yi Zhang. 2020. Tutorial on conversational recommendation systems. In Fourteenth ACM Conference on Recom- mender Systems, pages 751-753.", "links": null }, "BIBREF12": { "ref_id": "b12", "title": "Search-based neural structured learning for sequential question answering", "authors": [ { "first": "Mohit", "middle": [], "last": "Iyyer", "suffix": "" }, { "first": "Yih", "middle": [], "last": "Wen-Tau", "suffix": "" }, { "first": "Ming-Wei", "middle": [], "last": "Chang", "suffix": "" } ], "year": 2017, "venue": "Proceedings of the 55th", "volume": "", "issue": "", "pages": "", "other_ids": { "DOI": [ "10.18653/v1/P17-1167" ] }, "num": null, "urls": [], "raw_text": "Mohit Iyyer, Wen-tau Yih, and Ming-Wei Chang. 2017. Search-based neural structured learning for sequen- tial question answering. In Proceedings of the 55th", "links": null }, "BIBREF13": { "ref_id": "b13", "title": "Annual Meeting of the Association for Computational Linguistics", "authors": [], "year": null, "venue": "", "volume": "1", "issue": "", "pages": "1821--1831", "other_ids": {}, "num": null, "urls": [], "raw_text": "Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1821- 1831, Vancouver, Canada. Association for Computa- tional Linguistics.", "links": null }, "BIBREF14": { "ref_id": "b14", "title": "Document-grounded goal-oriented dialogue systems on pre-trained language model with diverse input representation", "authors": [ { "first": "Boeun", "middle": [], "last": "Kim", "suffix": "" }, { "first": "Dohaeng", "middle": [], "last": "Lee", "suffix": "" }, { "first": "Sihyung", "middle": [], "last": "Kim", "suffix": "" }, { "first": "Yejin", "middle": [], "last": "Lee", "suffix": "" }, { "first": "Jin-Xia", "middle": [], "last": "Huang", "suffix": "" }, { "first": "Oh-Woog", "middle": [], "last": "Kwon", "suffix": "" }, { "first": "Harksoo", "middle": [], "last": "Kim", "suffix": "" } ], "year": 2021, "venue": "Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering", "volume": "", "issue": "", "pages": "98--102", "other_ids": { "DOI": [ "10.18653/v1/2021.dialdoc-1.12" ] }, "num": null, "urls": [], "raw_text": "Boeun Kim, Dohaeng Lee, Sihyung Kim, Yejin Lee, Jin-Xia Huang, Oh-Woog Kwon, and Harksoo Kim. 2021. Document-grounded goal-oriented dialogue systems on pre-trained language model with diverse input representation. In Proceedings of the 1st Work- shop on Document-grounded Dialogue and Conver- sational Question Answering (DialDoc 2021), pages 98-102, Online. Association for Computational Lin- guistics.", "links": null }, "BIBREF15": { "ref_id": "b15", "title": "Technical report on shared task in DialDoc21", "authors": [ { "first": "Jiapeng", "middle": [], "last": "Li", "suffix": "" }, { "first": "Mingda", "middle": [], "last": "Li", "suffix": "" }, { "first": "Longxuan", "middle": [], "last": "Ma", "suffix": "" }, { "first": "Wei-Nan", "middle": [], "last": "Zhang", "suffix": "" }, { "first": "Ting", "middle": [], "last": "Liu", "suffix": "" } ], "year": 2021, "venue": "Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering", "volume": "", "issue": "", "pages": "52--56", "other_ids": { "DOI": [ "10.18653/v1/2021.dialdoc-1.7" ] }, "num": null, "urls": [], "raw_text": "Jiapeng Li, Mingda Li, Longxuan Ma, Wei-Nan Zhang, and Ting Liu. 2021. Technical report on shared task in DialDoc21. In Proceedings of the 1st Workshop on Document-grounded Dialogue and Conversational Question Answering (DialDoc 2021), pages 52-56, Online. Association for Computational Linguistics.", "links": null }, "BIBREF16": { "ref_id": "b16", "title": "Roberta: A robustly optimized bert pretraining approach", "authors": [ { "first": "Yinhan", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Myle", "middle": [], "last": "Ott", "suffix": "" }, { "first": "Naman", "middle": [], "last": "Goyal", "suffix": "" }, { "first": "Jingfei", "middle": [], "last": "Du", "suffix": "" }, { "first": "Mandar", "middle": [], "last": "Joshi", "suffix": "" }, { "first": "Danqi", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Omer", "middle": [], "last": "Levy", "suffix": "" }, { "first": "Mike", "middle": [], "last": "Lewis", "suffix": "" }, { "first": "Luke", "middle": [], "last": "Zettlemoyer", "suffix": "" }, { "first": "Veselin", "middle": [], "last": "Stoyanov", "suffix": "" } ], "year": 2019, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:1907.11692" ] }, "num": null, "urls": [], "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", "links": null }, "BIBREF17": { "ref_id": "b17", "title": "Survey of conversational agents in health", "authors": [ { "first": "Joao Luis Zeni", "middle": [], "last": "Montenegro", "suffix": "" }, { "first": "Cristiano", "middle": [], "last": "Andr\u00e9 Da", "suffix": "" }, { "first": "Costa", "middle": [], "last": "", "suffix": "" }, { "first": "Rodrigo", "middle": [], "last": "Da", "suffix": "" }, { "first": "Rosa", "middle": [], "last": "Righi", "suffix": "" } ], "year": 2019, "venue": "Expert Systems with Applications", "volume": "129", "issue": "", "pages": "56--67", "other_ids": {}, "num": null, "urls": [], "raw_text": "Joao Luis Zeni Montenegro, Cristiano Andr\u00e9 da Costa, and Rodrigo da Rosa Righi. 2019. Survey of con- versational agents in health. Expert Systems with Applications, 129:56-67.", "links": null }, "BIBREF18": { "ref_id": "b18", "title": "Utilizing humanto-human conversation examples for a multi domain chat-oriented dialog system", "authors": [ { "first": "Lasguido", "middle": [], "last": "Nio", "suffix": "" }, { "first": "Sakriani", "middle": [], "last": "Sakti", "suffix": "" }, { "first": "Graham", "middle": [], "last": "Neubig", "suffix": "" }, { "first": "Tomoki", "middle": [], "last": "Toda", "suffix": "" }, { "first": "Satoshi", "middle": [], "last": "Nakamura", "suffix": "" } ], "year": 2014, "venue": "IEICE TRANS-ACTIONS on Information and Systems", "volume": "97", "issue": "6", "pages": "1497--1505", "other_ids": {}, "num": null, "urls": [], "raw_text": "Lasguido Nio, Sakriani Sakti, Graham Neubig, Tomoki Toda, and Satoshi Nakamura. 2014. Utilizing human- to-human conversation examples for a multi do- main chat-oriented dialog system. IEICE TRANS- ACTIONS on Information and Systems, 97(6):1497- 1505.", "links": null }, "BIBREF19": { "ref_id": "b19", "title": "A simple but effective method to incorporate multi-turn context with BERT for conversational machine comprehension", "authors": [ { "first": "Yasuhito", "middle": [], "last": "Ohsugi", "suffix": "" }, { "first": "Itsumi", "middle": [], "last": "Saito", "suffix": "" }, { "first": "Kyosuke", "middle": [], "last": "Nishida", "suffix": "" }, { "first": "Hisako", "middle": [], "last": "Asano", "suffix": "" }, { "first": "Junji", "middle": [], "last": "Tomita", "suffix": "" } ], "year": 2019, "venue": "Proceedings of the First Workshop on NLP for Conversational AI", "volume": "", "issue": "", "pages": "11--17", "other_ids": { "DOI": [ "10.18653/v1/W19-4102" ] }, "num": null, "urls": [], "raw_text": "Yasuhito Ohsugi, Itsumi Saito, Kyosuke Nishida, Hisako Asano, and Junji Tomita. 2019. A simple but effective method to incorporate multi-turn context with BERT for conversational machine comprehen- sion. In Proceedings of the First Workshop on NLP for Conversational AI, pages 11-17, Florence, Italy. Association for Computational Linguistics.", "links": null }, "BIBREF20": { "ref_id": "b20", "title": "Coqa: A conversational question answering challenge", "authors": [ { "first": "Siva", "middle": [], "last": "Reddy", "suffix": "" }, { "first": "Danqi", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Christopher D", "middle": [], "last": "Manning", "suffix": "" } ], "year": 2019, "venue": "Transactions of the Association for Computational Linguistics", "volume": "7", "issue": "", "pages": "249--266", "other_ids": { "DOI": [ "10.1162/tacl_a_00266" ] }, "num": null, "urls": [], "raw_text": "Siva Reddy, Danqi Chen, and Christopher D Manning. 2019. Coqa: A conversational question answering challenge. Transactions of the Association for Com- putational Linguistics, 7:249-266.", "links": null }, "BIBREF21": { "ref_id": "b21", "title": "Interpretation of natural language rules in conversational machine reading", "authors": [ { "first": "Marzieh", "middle": [], "last": "Saeidi", "suffix": "" }, { "first": "Max", "middle": [], "last": "Bartolo", "suffix": "" }, { "first": "Patrick", "middle": [], "last": "Lewis", "suffix": "" }, { "first": "Sameer", "middle": [], "last": "Singh", "suffix": "" }, { "first": "Tim", "middle": [], "last": "Rockt\u00e4schel", "suffix": "" }, { "first": "Mike", "middle": [], "last": "Sheldon", "suffix": "" }, { "first": "Guillaume", "middle": [], "last": "Bouchard", "suffix": "" }, { "first": "Sebastian", "middle": [], "last": "Riedel", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "2087--2097", "other_ids": { "DOI": [ "10.18653/v1/D18-1233" ] }, "num": null, "urls": [], "raw_text": "Marzieh Saeidi, Max Bartolo, Patrick Lewis, Sameer Singh, Tim Rockt\u00e4schel, Mike Sheldon, Guillaume Bouchard, and Sebastian Riedel. 2018. Interpretation of natural language rules in conversational machine reading. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 2087-2097, Brussels, Belgium. Association for Computational Linguistics.", "links": null }, "BIBREF22": { "ref_id": "b22", "title": "MIMOQA: Multimodal input multimodal output question answering", "authors": [ { "first": "Hrituraj", "middle": [], "last": "Singh", "suffix": "" }, { "first": "Anshul", "middle": [], "last": "Nasery", "suffix": "" }, { "first": "Denil", "middle": [], "last": "Mehta", "suffix": "" }, { "first": "Aishwarya", "middle": [], "last": "Agarwal", "suffix": "" }, { "first": "Jatin", "middle": [], "last": "Lamba", "suffix": "" }, { "first": "Balaji Vasan", "middle": [], "last": "Srinivasan", "suffix": "" } ], "year": 2021, "venue": "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", "volume": "", "issue": "", "pages": "5317--5332", "other_ids": { "DOI": [ "10.18653/v1/2021.naacl-main.418" ] }, "num": null, "urls": [], "raw_text": "Hrituraj Singh, Anshul Nasery, Denil Mehta, Aishwarya Agarwal, Jatin Lamba, and Balaji Vasan Srinivasan. 2021. MIMOQA: Multimodal input multimodal out- put question answering. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5317-5332, Online. Association for Computational Linguistics.", "links": null }, "BIBREF23": { "ref_id": "b23", "title": "CAiRE in DialDoc21: Data augmentation for information seeking dialogue system", "authors": [ { "first": "Yan", "middle": [], "last": "Xu", "suffix": "" }, { "first": "Etsuko", "middle": [], "last": "Ishii", "suffix": "" }, { "first": "Genta", "middle": [], "last": "Indra Winata", "suffix": "" }, { "first": "Zhaojiang", "middle": [], "last": "Lin", "suffix": "" }, { "first": "Andrea", "middle": [], "last": "Madotto", "suffix": "" }, { "first": "Zihan", "middle": [], "last": "Liu", "suffix": "" }, { "first": "Peng", "middle": [], "last": "Xu", "suffix": "" }, { "first": "Pascale", "middle": [], "last": "Fung", "suffix": "" } ], "year": 2021, "venue": "Proceedings of the 1st Workshop on Documentgrounded Dialogue and Conversational Question Answering", "volume": "", "issue": "", "pages": "46--51", "other_ids": { "DOI": [ "10.18653/v1/2021.dialdoc-1.6" ] }, "num": null, "urls": [], "raw_text": "Yan Xu, Etsuko Ishii, Genta Indra Winata, Zhaojiang Lin, Andrea Madotto, Zihan Liu, Peng Xu, and Pas- cale Fung. 2021. CAiRE in DialDoc21: Data aug- mentation for information seeking dialogue system. In Proceedings of the 1st Workshop on Document- grounded Dialogue and Conversational Question An- swering (DialDoc 2021), pages 46-51, Online. Asso- ciation for Computational Linguistics.", "links": null }, "BIBREF24": { "ref_id": "b24", "title": "RecipeQA: A challenge dataset for multimodal comprehension of cooking recipes", "authors": [ { "first": "Semih", "middle": [], "last": "Yagcioglu", "suffix": "" }, { "first": "Aykut", "middle": [], "last": "Erdem", "suffix": "" }, { "first": "Erkut", "middle": [], "last": "Erdem", "suffix": "" }, { "first": "Nazli", "middle": [], "last": "Ikizler-Cinbis", "suffix": "" } ], "year": 2018, "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", "volume": "", "issue": "", "pages": "1358--1368", "other_ids": { "DOI": [ "10.18653/v1/D18-1166" ] }, "num": null, "urls": [], "raw_text": "Semih Yagcioglu, Aykut Erdem, Erkut Erdem, and Na- zli Ikizler-Cinbis. 2018. RecipeQA: A challenge dataset for multimodal comprehension of cooking recipes. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1358-1368, Brussels, Belgium. Association for Computational Linguistics.", "links": null }, "BIBREF25": { "ref_id": "b25", "title": "Building task-oriented dialogue systems for online shopping", "authors": [ { "first": "Zhao", "middle": [], "last": "Yan", "suffix": "" }, { "first": "Nan", "middle": [], "last": "Duan", "suffix": "" }, { "first": "Peng", "middle": [], "last": "Chen", "suffix": "" }, { "first": "Ming", "middle": [], "last": "Zhou", "suffix": "" }, { "first": "Jianshe", "middle": [], "last": "Zhou", "suffix": "" }, { "first": "Zhoujun", "middle": [], "last": "Li", "suffix": "" } ], "year": 2017, "venue": "Thirty-first AAAI conference on artificial intelligence", "volume": "", "issue": "", "pages": "", "other_ids": {}, "num": null, "urls": [], "raw_text": "Zhao Yan, Nan Duan, Peng Chen, Ming Zhou, Jianshe Zhou, and Zhoujun Li. 2017. Building task-oriented dialogue systems for online shopping. In Thirty-first AAAI conference on artificial intelligence.", "links": null }, "BIBREF26": { "ref_id": "b26", "title": "Conversational question answering: A survey", "authors": [ { "first": "Munazza", "middle": [], "last": "Zaib", "suffix": "" }, { "first": "Wei", "middle": [ "Emma" ], "last": "Zhang", "suffix": "" }, { "first": "Z", "middle": [], "last": "Quan", "suffix": "" }, { "first": "Adnan", "middle": [], "last": "Sheng", "suffix": "" }, { "first": "Yang", "middle": [], "last": "Mahmood", "suffix": "" }, { "first": "", "middle": [], "last": "Zhang", "suffix": "" } ], "year": 2021, "venue": "", "volume": "", "issue": "", "pages": "", "other_ids": { "arXiv": [ "arXiv:2106.00874" ] }, "num": null, "urls": [], "raw_text": "Munazza Zaib, Wei Emma Zhang, Quan Z Sheng, Ad- nan Mahmood, and Yang Zhang. 2021. Conversa- tional question answering: A survey. arXiv preprint arXiv:2106.00874.", "links": null }, "BIBREF27": { "ref_id": "b27", "title": "A robustly optimized BERT pre-training approach with post-training", "authors": [ { "first": "Liu", "middle": [], "last": "Zhuang", "suffix": "" }, { "first": "Lin", "middle": [], "last": "Wayne", "suffix": "" }, { "first": "Shi", "middle": [], "last": "Ya", "suffix": "" }, { "first": "Zhao", "middle": [], "last": "", "suffix": "" } ], "year": 2021, "venue": "Proceedings of the 20th Chinese National Conference on Computational Linguistics", "volume": "", "issue": "", "pages": "1218--1227", "other_ids": {}, "num": null, "urls": [], "raw_text": "Liu Zhuang, Lin Wayne, Shi Ya, and Zhao Jun. 2021. A robustly optimized BERT pre-training approach with post-training. In Proceedings of the 20th Chinese National Conference on Computational Linguistics, pages 1218-1227, Huhhot, China. Chinese Informa- tion Processing Society of China.", "links": null } }, "ref_entries": { "FIGREF0": { "num": null, "uris": null, "text": "Docalog model architecture and the overview diagram: a) a standalone answer span prediction model. b) our three-stage model consists of (i) Dr. TEIT retriever model connected to the (ii) the span prediction model, and (iii) an aggregator which works as an ultimate span-picker deciding on the most likely span of the answer, out of all predicted spans. c) A detailed view of Dr. TEIT, the retriever architecture.", "type_str": "figure" } } } }