|
{ |
|
"paper_id": "S17-2006", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:28:54.239932Z" |
|
}, |
|
"title": "SemEval-2017 Task 8: RumourEval: Determining rumour veracity and support for rumours", |
|
"authors": [ |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Derczynski", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Kalina", |
|
"middle": [], |
|
"last": "Bontcheva", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Liakata", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Warwick", |
|
"location": { |
|
"postCode": "CV4 7AL", |
|
"country": "UK" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Procter", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Warwick", |
|
"location": { |
|
"postCode": "CV4 7AL", |
|
"country": "UK" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Geraldine", |
|
"middle": [], |
|
"last": "Wong", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Sak", |
|
"middle": [], |
|
"last": "Hoi", |
|
"suffix": "", |
|
"affiliation": {}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Arkaitz", |
|
"middle": [], |
|
"last": "Zubiaga", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "University of Warwick", |
|
"location": { |
|
"postCode": "CV4 7AL", |
|
"country": "UK" |
|
} |
|
}, |
|
"email": "" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Media is full of false claims. Even Oxford Dictionaries named \"post-truth\" as the word of 2016. This makes it more important than ever to build systems that can identify the veracity of a story, and the nature of the discourse around it. Ru-mourEval is a SemEval shared task that aims to identify and handle rumours and reactions to them, in text. We present an annotation scheme, a large dataset covering multiple topics-each having their own families of claims and replies-and use these to pose two concrete challenges as well as the results achieved by participants on these challenges.", |
|
"pdf_parse": { |
|
"paper_id": "S17-2006", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Media is full of false claims. Even Oxford Dictionaries named \"post-truth\" as the word of 2016. This makes it more important than ever to build systems that can identify the veracity of a story, and the nature of the discourse around it. Ru-mourEval is a SemEval shared task that aims to identify and handle rumours and reactions to them, in text. We present an annotation scheme, a large dataset covering multiple topics-each having their own families of claims and replies-and use these to pose two concrete challenges as well as the results achieved by participants on these challenges.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Rumours are rife on the web. False claims affect people's perceptions of events and their behaviour, sometimes in harmful ways. With the increasing reliance on the Web -social media, in particularas a source of information and news updates by individuals, news professionals, and automated systems, the potential disruptive impact of rumours is further accentuated.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The task of analysing and determining veracity of social media content has been of recent interest to the field of natural language processing. After initial work (Qazvinian et al., 2011) , increasingly advanced systems and annotation schemas have been developed to support the analysis of rumour and misinformation in text (Kumar and Geethakumari, 2014; Zhang et al., 2015; Shao et al., 2016; Zubiaga et al., 2016b) . Veracity judgment can be decomposed intuitively in terms of a comparison between assertions made in -and entailments from -a candidate text, and external world knowledge. Intermediate linguistic cues have also been shown to play a role. Critically, based on recent work the task appears deeply nuanced and very challenging, while having important applications in, for example, journalism and disaster mitigation (Hermida, 2012; Procter et al., 2013a; Veil et al., 2011) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 163, |
|
"end": 187, |
|
"text": "(Qazvinian et al., 2011)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 324, |
|
"end": 354, |
|
"text": "(Kumar and Geethakumari, 2014;", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 355, |
|
"end": 374, |
|
"text": "Zhang et al., 2015;", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 375, |
|
"end": 393, |
|
"text": "Shao et al., 2016;", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 394, |
|
"end": 416, |
|
"text": "Zubiaga et al., 2016b)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 831, |
|
"end": 846, |
|
"text": "(Hermida, 2012;", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 847, |
|
"end": 869, |
|
"text": "Procter et al., 2013a;", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 870, |
|
"end": 888, |
|
"text": "Veil et al., 2011)", |
|
"ref_id": "BIBREF23" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "We propose a shared task where participants analyse rumours in the form of claims made in user-generated content, and where users respond to one another within conversations attempting to resolve the veracity of the rumour. We define a rumour as a \"circulating story of questionable veracity, which is apparently credible but hard to verify, and produces sufficient scepticism and/or anxiety so as to motivate finding out the actual truth\" (Zubiaga et al., 2015b) . While breaking news unfold, gathering opinions and evidence from as many sources as possible as communities react becomes crucial to determine the veracity of rumours and consequently reduce the impact of the spread of misinformation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 440, |
|
"end": 463, |
|
"text": "(Zubiaga et al., 2015b)", |
|
"ref_id": "BIBREF28" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Within this scenario where one needs to listen to, and assess the testimony of, different sources to make a final decision with respect to a rumour's veracity, we ran a task in SemEval consisting of two subtasks: (a) stance classification towards rumours, and (b) veracity classification. Subtask A corresponds to the core problem in crowd response analysis when using discourse around claims to verify or disprove them. Subtask B corresponds to the AI-hard task of assessing directly whether or not a claim is false.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction and Motivation", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Related to the objective of predicting a rumour's veracity, Subtask A deals with the complementary objective of tracking how other sources orient to the accuracy of the rumourous story. A key step in the analysis of the surrounding discourse is to SDQC support classification. Example 1:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "u1:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "We understand there are two gunmen and up to a dozen hostages inside the cafe under siege at Sydney.. ISIS flags remain on display #7News [support] u2: @u1 not ISIS flags [deny] u3: @u1 sorry -how do you know it's an ISIS flag? Can you actually confirm that? [query] u4: @u3 no she can't cos it's actually not [deny] u5: @u1 More on situation at Martin Place in Sydney, AU -LINK-[comment] u6: @u1 Have you actually confirmed its an ISIS flag or are you talking shit [query] SDQC support classification. Example 2: u1: These are not timid colours; soldiers back guarding Tomb of Unknown Soldier after today's shooting #StandforCanada -PICTURE- [support] u2: @u1 Apparently a hoax. Best to take Tweet down.", |
|
"cite_spans": [ |
|
{ |
|
"start": 466, |
|
"end": 473, |
|
"text": "[query]", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 643, |
|
"end": 652, |
|
"text": "[support]", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "[deny] u3: @u1 This photo was taken this morning, before the shooting.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "[deny] u4: @u1 I don't believe there are soldiers guarding this area right now.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "[deny] u5: @u4 wondered as well. I've reached out to someone who would know just to confirm that. Hopefully get response soon.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "[comment] u4: @u5 ok, thanks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "[comment]", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "Figure 1: Examples of tree-structured threads discussing the veracity of a rumour, where the label associated with each tweet is the target of the SDQC support classification task.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "determine how other users in social media regard the rumour (Procter et al., 2013b) . We propose to tackle this analysis by looking at the conversation stemming from direct and nested replies to the tweet originating the rumour (source tweet).", |
|
"cite_spans": [ |
|
{ |
|
"start": 60, |
|
"end": 83, |
|
"text": "(Procter et al., 2013b)", |
|
"ref_id": "BIBREF17" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "To this effect RumourEval provided participants with a tree-structured conversation formed of tweets replying to the originating rumourous tweet, directly or indirectly. Each tweet presents its own type of support with respect to the rumour (see Figure 1 ). We frame this in terms of supporting, denying, querying or commenting on (SDQC) the original rumour (Zubiaga et al., 2016b) . Therefore, we introduce a subtask where the goal is to label the type of interaction between a given statement (rumourous tweet) and a reply tweet (the latter can be either direct or nested replies).", |
|
"cite_spans": [ |
|
{ |
|
"start": 358, |
|
"end": 381, |
|
"text": "(Zubiaga et al., 2016b)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 246, |
|
"end": 254, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "We note that superficially this subtask may bear similarity to SemEval-2016 Task 6 on stance detection from tweets (Mohammad et al., 2016) , where participants are asked to determine whether a tweet is in favour, against or neither, of a given target entity (e.g. Hillary Clinton) or topic (e.g. climate change). Our SQDC subtask differs in two aspects. Firstly, participants needed to determine the objective support towards a rumour, an entire statement, rather than individual target concepts. Moreover, they are asked to determine additional response types to the rumourous tweet that are relevant to the discourse, such as a request for more information (questioning, Q) and making a com-ment (C), where the latter doesn't directly address support or denial towards the rumour, but provides an indication of the conversational context surrounding rumours. For example, certain patterns of comments and questions can be indicative of false rumours and others indicative of rumours that turn out to be true.", |
|
"cite_spans": [ |
|
{ |
|
"start": 115, |
|
"end": 138, |
|
"text": "(Mohammad et al., 2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "Secondly, participants need to determine the type of response towards a rumourous tweet from a tree-structured conversation, where each tweet is not necessarily sufficiently descriptive on its own, but needs to be viewed in the context of an aggregate discussion consisting of tweets preceding it in the thread. This is more closely aligned with stance classification as defined in other domains, such as public debates (Anand et al., 2011) . The latter also relates somewhat to the SemEval-2015 Task 3 on Answer Selection in Community Question Answering (Moschitti et al., 2015) , where the task was to determine the quality of responses in tree-structured threads in CQA platforms. Responses to questions are classified as 'good', 'potential' or 'bad'. Both tasks are related to textual entailment and textual similarity. However, Semeval-2015 Task3 is clearly a question answering task, the platform itself supporting a QA format in contrast with the more free-form format of conversations in Twitter. Moreover, as a question answering task Semeval-2015 Task 3 is more concerned with relevance and retrieval whereas the task we propose here is about whether support or denial can be inferred towards the original statement (source tweet) from the reply tweets.", |
|
"cite_spans": [ |
|
{ |
|
"start": 420, |
|
"end": 440, |
|
"text": "(Anand et al., 2011)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 555, |
|
"end": 579, |
|
"text": "(Moschitti et al., 2015)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "Each tweet in the tree-structured thread is categorised into one of the following four categories, following Procter et al. (2013b):", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "\u2022 Support: the author of the response supports the veracity of the rumour. \u2022 Deny: the author of the response denies the veracity of the rumour. \u2022 Query: the author of the response asks for additional evidence in relation to the veracity of the rumour. \u2022 Comment: the author of the response makes their own comment without a clear contribution to assessing the veracity of the rumour. Prior work in the area has found the task difficult, compounded by the variety present in language use between different stories (Lukasik et al., 2015; Zubiaga et al., 2017) . This indicates it is challenging enough to make for an interesting Se-mEval shared task.", |
|
"cite_spans": [ |
|
{ |
|
"start": 514, |
|
"end": 536, |
|
"text": "(Lukasik et al., 2015;", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 537, |
|
"end": 558, |
|
"text": "Zubiaga et al., 2017)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask A -SDQC Support/ Rumour stance classification", |
|
"sec_num": "1.1" |
|
}, |
|
{ |
|
"text": "The goal of this subtask is to predict the veracity of a given rumour. The rumour is presented as a tweet, reporting an update associated with a newsworthy event, but deemed unsubstantiated at the time of release. Given such a tweet/claim, and a set of other resources provided, systems should return a label describing the anticipated veracity of the rumour as true or false -see Figure 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 381, |
|
"end": 389, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Subtask B -Veracity prediction", |
|
"sec_num": "1.2" |
|
}, |
|
{ |
|
"text": "The ground truth of this task has been manually established by journalist members of the team who identified official statements or other trustworthy sources of evidence that resolved the veracity of the given rumour. Examples of tweets annotated for veracity are shown in Figure 2 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 273, |
|
"end": 281, |
|
"text": "Figure 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Subtask B -Veracity prediction", |
|
"sec_num": "1.2" |
|
}, |
|
{ |
|
"text": "The participants in this subtask chose between two variants. In the first case -the closed variant -the veracity of a rumour had to be predicted solely from the tweet itself (for example (Liu et al., 2015) rely only on the content of tweets to assess the veracity of tweets in real time, while systems such as Tweet-Cred (Gupta et al., 2014 ) follow a tweet level analysis for a similar task where the credibility of a tweet is predicted). In the second case -the open variant -additional context was provided as input to veracity prediction systems; this context consists of a Wikipedia dump. Critically, no external resources could be used that contained information from after the rumour's resolu-tion. To control this, we specified precise versions of external information that participants could use. This was important to make sure we introduced time sensitivity into the task of veracity prediction. In a practical system, the classified conversation threads from Subtask A could be used as context.", |
|
"cite_spans": [ |
|
{ |
|
"start": 187, |
|
"end": 205, |
|
"text": "(Liu et al., 2015)", |
|
"ref_id": "BIBREF12" |
|
}, |
|
{ |
|
"start": 321, |
|
"end": 340, |
|
"text": "(Gupta et al., 2014", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask B -Veracity prediction", |
|
"sec_num": "1.2" |
|
}, |
|
{ |
|
"text": "We take a simple approach to this task, using only true/false labels for rumours. In practice, however, many claims are hard to verify; for example, there were many rumours concerning Vladimir Putin's activities in early 2015, many wholly unsubstantiable. Therefore, we also expect systems to return a confidence value in the range of 0-1 for each rumour; if the rumour is unverifiable, a confidence of 0 should be returned.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Subtask B -Veracity prediction", |
|
"sec_num": "1.2" |
|
}, |
|
{ |
|
"text": "Identifying the veracity of claims made on the web is an increasingly important task (Zubiaga et al., 2015b) . Decision support, digital journalism and disaster response already rely on picking out such claims (Procter et al., 2013b) . Additionally, web and social media are a more challenging environment than e.g. newswire, which has traditionally provided the mainstay of similar tasks (such as RTE (Bentivogli et al., 2011) ). Last year we ran a workshop at WWW 2015, Rumors and Deception in Social Media: Detection, Tracking, and Visualization (RDSM 2015) 1 which garnered interest from researchers coming from a variety of backgrounds, including natural language processing, web science and computational journalism.", |
|
"cite_spans": [ |
|
{ |
|
"start": 85, |
|
"end": 108, |
|
"text": "(Zubiaga et al., 2015b)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 210, |
|
"end": 233, |
|
"text": "(Procter et al., 2013b)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 402, |
|
"end": 427, |
|
"text": "(Bentivogli et al., 2011)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Impact", |
|
"sec_num": "1.3" |
|
}, |
|
{ |
|
"text": "To capture web claims and the community reaction around them, we take data from the \"model organism\" of social media, Twitter (Tufekci, 2014) . Data for the task is available in the form of online discussion threads, each pertaining to a particular event and the rumours around it. These threads form a tree, where each tweet has a parent tweet it responds to. Together these form a conversation, initiated by a source tweet (see Figure 1) . The data has already been annotated for veracity and SDQC following a published annotation scheme (Zubiaga et al., 2016b) , as part of the PHEME project (Derczynski and Bontcheva, 2014) , in which the task organisers are partners. Figure 2: Examples of source tweets with a veracity value, which has to be predicted in the veracity prediction task. ", |
|
"cite_spans": [ |
|
{ |
|
"start": 126, |
|
"end": 141, |
|
"text": "(Tufekci, 2014)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 540, |
|
"end": 563, |
|
"text": "(Zubiaga et al., 2016b)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 595, |
|
"end": 627, |
|
"text": "(Derczynski and Bontcheva, 2014)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 430, |
|
"end": 439, |
|
"text": "Figure 1)", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data & Resources", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Our training dataset comprises 297 rumourous threads collected for 8 events in total, which include 297 source and 4,222 reply tweets, amounting to 4,519 tweets in total. These events include well-known breaking news such as the Charlie Hebdo shooting in Paris, the Ferguson unrest in the US, and the Germanwings plane crash in the French Alps. The size of the dataset means it can be distributed without modifications, according to Twitter's current data usage policy, as JSON files. This dataset is already publicly available (Zubiaga et al., 2016a) and constitutes the training and development data.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Training Data", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "For the test data, we annotated 28 additional threads. These include 20 threads extracted from the same events as the training set, and 8 threads from two newly collected events: (1) a rumour that Hillary Clinton was diagnosed with pneumonia during the 2016 US election campaign, and (2) a rumour that Youtuber Marina Joyce had been kidnapped.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Test Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The test dataset includes, in total, 1,080 tweets, 28 of which are source tweets and 1,052 replies. The distribution of labels in the training and test datasets is summarised in Table 1.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Test Data", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Along with the tweet threads, we also provided additional context that participants could make use of. The context we provided was two-fold: (1) Wikipedia articles associated with the event in question. We provided the last revision of the article prior to the source tweet being posted, and (2) content of linked URLs, using the Internet Archive to retrieve the latest revision prior to the link being tweeted, where available.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Context Data", |
|
"sec_num": "2.3" |
|
}, |
|
{ |
|
"text": "The annotation of rumours and their subsequent interactions was performed in two steps. In the first step, we sampled a subset of likely rumourous tweets from all the tweets associated with the event in question, where we used the high number of retweets as an indication of a tweet being potentially rumourous. These sampled tweets were fed to an annotation tool, by means of which our expert journalist annotators members manually identified the ones that did indeed report unverified updates and were considered to be rumours. Whenever possible, they also annotated rumours that had ultimately been proven true or the ones that had been debunked as false stories; the rest were annotated as \"unverified\". In the second step, we collected conversations associated with those rumourous tweets, which included all replies succeeding a rumourous source tweet. The type of support (SDQC) expressed by each participant in the conversation was then annotated through crowdsourcing. The methodology for performing this crowdsourced annotation process has been previously assessed and validated (Zubiaga et al., 2015a) , and is further detailed in (Zubiaga et al., 2016b) . The overall inter-annotator agreement rate of 63.7% showed the task to be challenging, and easier for source tweets (81.1%) than for replying tweets (62.2%).", |
|
"cite_spans": [ |
|
{ |
|
"start": 1089, |
|
"end": 1112, |
|
"text": "(Zubiaga et al., 2015a)", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 1142, |
|
"end": 1165, |
|
"text": "(Zubiaga et al., 2016b)", |
|
"ref_id": "BIBREF30" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Annotation", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The evaluation data was not available to those participating in any way in the task, and selec-tion decisions were taken only by organisers not connected with any submission, to retain fairness across submissions. Figure 1 shows an example of what a data instance looks like, where the source tweet in the tree presents a rumourous statement that is supported, denied, queried and commented on by others. Note that replies are nested, where some tweets reply directly to the source, while other tweets reply to earlier replies, e.g., u4 and u5 engage in a short conversation replying to each other in the second example. The input to the veracity prediction task is simpler than this; here participants had to determine if a rumour was true or false by only looking at the source tweet (see Figure 2), and optionally making use of the additional context provided by the organisers.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 214, |
|
"end": 222, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 791, |
|
"end": 797, |
|
"text": "Figure", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data Annotation", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "To prepare the evaluation resources, we collected and sampled the tweets around which there is most interaction, placed these in an existing annotation tool to be annotated as rumour vs. nonrumour, categorised them into rumour sub-stories, and labelled them for veracity.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Annotation", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "For Subtask A, the extra annotation for support / deny / question / comment at the tweet level within the conversations were performed through crowdsourcing -as performed to satisfactory quality already with the existing training data (Zubiaga et al., 2015a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 235, |
|
"end": 258, |
|
"text": "(Zubiaga et al., 2015a)", |
|
"ref_id": "BIBREF27" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data Annotation", |
|
"sec_num": "2.4" |
|
}, |
|
{ |
|
"text": "The two subtasks were evaluated as follows.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Evaluation", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The evaluation of the SDQC needed careful consideration, as the distribution of the categories is clearly skewed towards comments. Evaluation is through classification accuracy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SDQC stance classification:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Veracity prediction: The evaluation of the predicted veracity, which is either true or false for each instance, was done using macroaveraged accuracy, hence measuring the ratio of instances for which a correct prediction was made. Additionally, we calculated RMSE \u03c1 for the difference between system and reference confidence in correct examples and provided the mean of these scores. Incorrect examples have an RMSE of 1. This is normalised and combined with the macroaveraged accuracy to give a final score; e.g. acc = (1 \u2212 \u03c1)acc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SDQC stance classification:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "The baseline is the most common class. For Table 2 : Results for Task A: support/deny/query/comment classification.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 43, |
|
"end": 50, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "SDQC stance classification:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Task A, we also introduce a baseline excluding the common, low-impact \"comment\" class, considering accuracy over only support, deny and query. This is included as the SDQ baseline.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "SDQC stance classification:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "We have had 13 system submissions at Ru-mourEval, eight submissions for Subtask A (Kochkina et al., 2017; Bahuleyan and Vechtomova, 2017; Srivastava et al., 2017; Wang et al., 2017; Singh et al., 2017; Chen et al., 2017; Garc\u00eda Lozano et al., 2017; Enayet and El-Beltagy, 2017) , the identification of stance towards rumours, and five submissions for Subtask B (Srivastava et al., 2017; Wang et al., 2017; Singh et al., 2017; Chen et al., 2017; Enayet and El-Beltagy, 2017) , the rumour veracity classification task, with participant teams coming from four continents (Europe: Germany, Sweden, UK; North America: Canada; Asia: China, India, Taiwan; Africa: Egypt), showing the global reach of the issue of rumour veracity on social media.", |
|
"cite_spans": [ |
|
{ |
|
"start": 82, |
|
"end": 105, |
|
"text": "(Kochkina et al., 2017;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 106, |
|
"end": 137, |
|
"text": "Bahuleyan and Vechtomova, 2017;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 138, |
|
"end": 162, |
|
"text": "Srivastava et al., 2017;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 163, |
|
"end": 181, |
|
"text": "Wang et al., 2017;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 182, |
|
"end": 201, |
|
"text": "Singh et al., 2017;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 202, |
|
"end": 220, |
|
"text": "Chen et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 221, |
|
"end": 248, |
|
"text": "Garc\u00eda Lozano et al., 2017;", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 249, |
|
"end": 277, |
|
"text": "Enayet and El-Beltagy, 2017)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 361, |
|
"end": 386, |
|
"text": "(Srivastava et al., 2017;", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 387, |
|
"end": 405, |
|
"text": "Wang et al., 2017;", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 406, |
|
"end": 425, |
|
"text": "Singh et al., 2017;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 426, |
|
"end": 444, |
|
"text": "Chen et al., 2017;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 445, |
|
"end": 473, |
|
"text": "Enayet and El-Beltagy, 2017)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Participant Systems and Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Most participants tackled Subtask A, which involves classifying a tweet in a conversation thread as either supporting (S), denying (D), querying (Q) or commenting on (C) a rumour. Results are given in Table 2 The distribution of SDQC labels in the training, development and test sets favours comments (see Table 1 . Including and recognising the items that fit in this class is important for reducing noise in the other, information-bearing classifications (support, deny and query). In actual fact, comments are often express implicit support; the absence of dispute is a soft signal of agreement. Systems generally viewed this task as a fourway single tweet classification task, with the ex-ception of the best performing system (Turing), which addressed it as a sequential classification problem, where the SDQC label of each tweet depends on the features and labels of the previous tweets, and the ECNU and IITP systems. The IITP system takes as input pairs of source and reply tweets whereas the ECNU system addressed class imbalance by decomposing the problem into a two step classification task (comment vs. non-comment), and all non-comment tweets classified as SDQ. Half of the systems employed ensemble classifiers, where classification was obtained through majority voting (ECNU, MamaEdha, UWaterloo, DFKI-DKT). In some cases the ensembles were hybrid, consisting both of machine learning classifiers and manually created rules, with differential weighting of classifiers for different class labels (ECNU, MamaEdha, DFKI-DKT). Three systems used deep learning, with team Turing employing LSTMs for sequential classification, team IKM using convolutional neural networks (CNN) for obtaining the representation of each tweet, assigned a probability for a class by a softmax classifier and team Mama Edha using CNN as one of the classifiers in their hybrid conglomeration. The remaining two systems NileTMRG and IITP used support vector machines with linear and polynomial kernel respectively. Half of the systems invested in elaborate feature engineering including cue words and expressions denoting Belief, Knowledge, Doubt and Denial (UWaterloo) as well as Tweet domain features including meta-data about users, hashtags and event specific keywords (ECNU, UWaterloo, IITP, NileTMRG). The systems with the least elaborate features were IKM and Mama Edha for CNNs (word embeddings), DFKI-DKT (sparse word vectors as input to logistic regression) and Turing (average word vectors, punctuation, similarity between word vectors in current tweet, source tweet and previous tweet, presence of negation, picture, URL). Five out of the eight systems used pre-trained word embeddings, mostly Google News word2vec embeddings, while ECNU used four different types of embeddings. Overall, elaborate feature engineering and a strategy for addressing class imbalance seemed to pay off, as can be seen by the success of the high performance of the UWaterloo and ECNU systems. The success of the best performing system (Turing) can be attributed both to the use of LSTM to address Team Score Confidence RMSE IITP 0.393 0.746 the problem as a sequential task and the choice of word embeddings. Subtask B, veracity classification of a source tweet, was viewed as either a threeway (NileTMRG, ECNU, IITP) or two-way (IKM, DFKI-DKT) single tweet classification task. Results are given in Table 3 for the open variant, where external resources may be used, 2 and Table 4 for the closed variant -with no external resource use permitted. The systems used mostly similar features and classifiers to those in Subtask A, though some added features more specific to the distribution of SDQC labels in replies to the source tweet (e.g. the best performing system in this task, NileTMRG, considered the percentage of reply tweets classified as either S, D or Q).", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 208, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 306, |
|
"end": 313, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
}, |
|
{ |
|
"start": 3378, |
|
"end": 3385, |
|
"text": "Table 3", |
|
"ref_id": "TABREF3" |
|
}, |
|
{ |
|
"start": 3452, |
|
"end": 3459, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Participant Systems and Results", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Detecting and verifying rumours is a critical task and in the current media landscape, vital to populations so they can make decisions based on the truth. This shared task brought together many approaches to fixing veracity in real media, working through community interactions and claims made on the web. Many systems were able to achieve good results on unravelling the argument around various claims, finding out whether a discussion supports, denies, questions or comments on rumours.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "The commentary around a story often helps determine how true that story is, so this advance is a great positive. However, finding out accurately whether a story is false or true remains really tough. Systems did not reach the most-commonclass baseline, despite the data not being exceptionally skewed. even the best systems could have the wrong level of confidence in a true/false judgment, weakly verifying stories that are true and so on. This tells us that we are making progress, but that the problem is so far very hard.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "RumourEval leaves behind competitive results, a large number of approaches to be dissected by future researchers, and a benchmark dataset of thousands of documents and novel news stories. This sets a good baseline for the next steps in the area of fake news detection, as well as the material anyone needs to get started on the problem and evaluate and improve their systems.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "http://www.pheme.eu/events/rdsm2015/", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Namely, the 20160901 English Wikipedia dump.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "This work is supported by the European Commission's 7th Framework Programme for research, under grant No. 611223 PHEME. This work is also supported by the European Unions Horizon 2020 research and innovation programme under grant agreement No. 687847 COMRADES. We are grateful to Swissinfo.ch for their extended support in the form of journalistic advice, keeping the task well-grounded, and annotation and task design efforts. We also extend our thanks to the SemEval organisers for their sustained hard work, and to our participants for bearing with us during the first shared task of this nature and all the joy and trouble that comes with it.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Cats rule and dogs drool!: Classifying stance in online debate", |
|
"authors": [ |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Anand", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marilyn", |
|
"middle": [], |
|
"last": "Walker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Abbott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jean", |
|
"middle": [ |
|
"E" |
|
], |
|
"last": "Fox Tree", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Robeson", |
|
"middle": [], |
|
"last": "Bowmani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Minor", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the 2Nd Workshop on Computational Approaches to Subjectivity and Sentiment Analysis", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--9", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Pranav Anand, Marilyn Walker, Rob Abbott, Jean E. Fox Tree, Robeson Bowmani, and Michael Minor. 2011. Cats rule and dogs drool!: Clas- sifying stance in online debate. In Proceedings of the 2Nd Workshop on Computational Ap- proaches to Subjectivity and Sentiment Analy- sis. Association for Computational Linguistics, Stroudsburg, PA, USA, WASSA '11, pages 1-9.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "UWaterloo at SemEval-2017 Task 8: Detecting Stance towards Rumours with Topic Independent Features", |
|
"authors": [ |
|
{ |
|
"first": "Hareesh", |
|
"middle": [], |
|
"last": "Bahuleyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Olga", |
|
"middle": [], |
|
"last": "Vechtomova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of SemEval. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hareesh Bahuleyan and Olga Vechtomova. 2017. UWaterloo at SemEval-2017 Task 8: Detecting Stance towards Rumours with Topic Independent Features. In Proceedings of SemEval. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "The seventh Pascal Recognizing Textual Entailment challenge", |
|
"authors": [ |
|
{ |
|
"first": "Luisa", |
|
"middle": [], |
|
"last": "Bentivogli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Clark", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ido", |
|
"middle": [], |
|
"last": "Dagan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hoa", |
|
"middle": [], |
|
"last": "Dang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danilo", |
|
"middle": [], |
|
"last": "Giampiccolo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Text Analysis Conference. NIST", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luisa Bentivogli, Peter Clark, Ido Dagan, Hoa Dang, and Danilo Giampiccolo. 2011. The seventh Pascal Recognizing Textual Entailment challenge. In Pro- ceedings of the Text Analysis Conference. NIST.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "IKM at SemEval-2017 Task 8: Convolutional Neural Networks for Stance Detection and Rumor Verification", |
|
"authors": [ |
|
{ |
|
"first": "Yi-Chin", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhao-Yand", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hung-Yu", |
|
"middle": [], |
|
"last": "Kao", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of SemEval. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yi-Chin Chen, Zhao-Yand Liu, and Hung-Yu Kao. 2017. IKM at SemEval-2017 Task 8: Convolutional Neural Networks for Stance Detection and Rumor Verification. In Proceedings of SemEval. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Pheme: Veracity in digital social networks", |
|
"authors": [ |
|
{ |
|
"first": "Leon", |
|
"middle": [], |
|
"last": "Derczynski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalina", |
|
"middle": [], |
|
"last": "Bontcheva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "UMAP Workshops", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Leon Derczynski and Kalina Bontcheva. 2014. Pheme: Veracity in digital social networks. In UMAP Work- shops.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "NileTMRG at SemEval-2017 Task 8: Determining Rumour and Veracity Support for Rumours on Twitter", |
|
"authors": [ |
|
{ |
|
"first": "Omar", |
|
"middle": [], |
|
"last": "Enayet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Samhaa", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "El-Beltagy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of SemEval. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Omar Enayet and Samhaa R. El-Beltagy. 2017. NileTMRG at SemEval-2017 Task 8: Determining Rumour and Veracity Support for Rumours on Twit- ter. In Proceedings of SemEval. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Mama Edha at SemEval-2017 Task 8: Stance Classification with CNN and Rules", |
|
"authors": [ |
|
{ |
|
"first": "Marianela", |
|
"middle": [ |
|
"Garc\u00eda" |
|
], |
|
"last": "Lozano", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hanna", |
|
"middle": [], |
|
"last": "Lilja", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Edward", |
|
"middle": [], |
|
"last": "Tj\u00f6rnhammar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maja Maja", |
|
"middle": [], |
|
"last": "Karasalo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of SemEval. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marianela Garc\u00eda Lozano, Hanna Lilja, Edward Tj\u00f6rnhammar, and Maja Maja Karasalo. 2017. Mama Edha at SemEval-2017 Task 8: Stance Clas- sification with CNN and Rules. In Proceedings of SemEval. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Tweetcred: Real-time credibility assessment of content on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Aditi", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ponnurangam", |
|
"middle": [], |
|
"last": "Kumaraguru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Carlos", |
|
"middle": [], |
|
"last": "Castillo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Patrick", |
|
"middle": [], |
|
"last": "Meier", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "SocInfo", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "228--243", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Aditi Gupta, Ponnurangam Kumaraguru, Carlos Castillo, and Patrick Meier. 2014. Tweet- cred: Real-time credibility assessment of con- tent on twitter. In SocInfo. pages 228-243.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Tweets and truth: Journalism as a discipline of collaborative verification", |
|
"authors": [ |
|
{ |
|
"first": "Alfred", |
|
"middle": [], |
|
"last": "Hermida", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Journalism Practice", |
|
"volume": "6", |
|
"issue": "5-6", |
|
"pages": "659--668", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alfred Hermida. 2012. Tweets and truth: Journalism as a discipline of collaborative verification. Journalism Practice 6(5-6):659-668.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Turing at SemEval-2017 Task 8: Sequential Approach to Rumour Stance Classification with Branch-LSTM", |
|
"authors": [ |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Kochkina", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Liakata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Augenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of SemEval. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Elena Kochkina, Maria Liakata, and Isabelle Augen- stein. 2017. Turing at SemEval-2017 Task 8: Se- quential Approach to Rumour Stance Classification with Branch-LSTM. In Proceedings of SemEval. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Detecting misinformation in online social networks using cognitive psychology", |
|
"authors": [ |
|
{ |
|
"first": "Krishna", |
|
"middle": [], |
|
"last": "Kp", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "G", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Geethakumari", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Human-centric Computing and Information Sciences", |
|
"volume": "4", |
|
"issue": "1", |
|
"pages": "1--22", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "KP Krishna Kumar and G Geethakumari. 2014. De- tecting misinformation in online social networks us- ing cognitive psychology. Human-centric Comput- ing and Information Sciences 4(1):1-22.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Real-time rumor debunking on twitter", |
|
"authors": [ |
|
{ |
|
"first": "Xiaomo", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Armineh", |
|
"middle": [], |
|
"last": "Nourbakhsh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quanzhi", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rui", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sameena", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 24th ACM International on Conference on Information and Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1867--1870", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaomo Liu, Armineh Nourbakhsh, Quanzhi Li, Rui Fang, and Sameena Shah. 2015. Real-time rumor debunking on twitter. In Proceedings of the 24th ACM International on Conference on Information and Knowledge Management. ACM, pages 1867- 1870.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Classifying tweet level judgements of rumours in social media", |
|
"authors": [ |
|
{ |
|
"first": "Michal", |
|
"middle": [], |
|
"last": "Lukasik", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Trevor", |
|
"middle": [], |
|
"last": "Cohn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalina", |
|
"middle": [], |
|
"last": "Bontcheva", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "2590--2595", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michal Lukasik, Trevor Cohn, and Kalina Bontcheva. 2015. Classifying tweet level judgements of ru- mours in social media. In Proceedings of the Con- ference on Empirical Methods in Natural Language Processing. volume 2, pages 2590-2595.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "SemEval-2016 Task 6: Detecting Stance in Tweets", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Parinaz", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Sobhani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Colin", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Cherry", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the Workshop on Semantic Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif M Mohammad, Svetlana Kiritchenko, Parinaz Sobhani, Xiaodan Zhu, and Colin Cherry. 2016. SemEval-2016 Task 6: Detecting Stance in Tweets. In Proceedings of the Workshop on Semantic Evalu- ation.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Semeval-2015 task 3: Answer selection in community question answering", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Moschitti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u0131s", |
|
"middle": [], |
|
"last": "Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walid", |
|
"middle": [], |
|
"last": "Magdy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Glass", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bilal", |
|
"middle": [], |
|
"last": "Randeree", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandro Moschitti, Preslav Nakov, Llu\u0131s Marquez, Walid Magdy, James Glass, and Bilal Randeree. 2015. Semeval-2015 task 3: Answer selection in community question answering. SemEval-2015 page 269.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Reading the riots: What were the Police doing on Twitter?", |
|
"authors": [ |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Procter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Crump", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Susanne", |
|
"middle": [], |
|
"last": "Karstedt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Voss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marta", |
|
"middle": [], |
|
"last": "Cantijoch", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "Policing and Society", |
|
"volume": "23", |
|
"issue": "4", |
|
"pages": "413--436", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rob Procter, Jeremy Crump, Susanne Karstedt, Alex Voss, and Marta Cantijoch. 2013a. Reading the ri- ots: What were the Police doing on Twitter? Polic- ing and Society 23(4):413-436.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Reading the riots on twitter: methodological innovation for the analysis of big data", |
|
"authors": [ |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Procter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Farida", |
|
"middle": [], |
|
"last": "Vis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Voss", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "International journal of social research methodology", |
|
"volume": "16", |
|
"issue": "3", |
|
"pages": "197--214", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Rob Procter, Farida Vis, and Alex Voss. 2013b. Read- ing the riots on twitter: methodological innovation for the analysis of big data. International journal of social research methodology 16(3):197-214.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Rumor has it: Identifying misinformation in microblogs", |
|
"authors": [ |
|
{ |
|
"first": "Emily", |
|
"middle": [], |
|
"last": "Vahed Qazvinian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rosengren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiaozhu", |
|
"middle": [], |
|
"last": "Dragomir R Radev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mei", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Proceedings of the Conference on Empirical Methods in Natural Language Processing. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1589--1599", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vahed Qazvinian, Emily Rosengren, Dragomir R Radev, and Qiaozhu Mei. 2011. Rumor has it: Iden- tifying misinformation in microblogs. In Proceed- ings of the Conference on Empirical Methods in Nat- ural Language Processing. Association for Compu- tational Linguistics, pages 1589-1599.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Hoaxy: A platform for tracking online misinformation", |
|
"authors": [ |
|
{ |
|
"first": "Chengcheng", |
|
"middle": [], |
|
"last": "Shao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni", |
|
"middle": [ |
|
"Luca" |
|
], |
|
"last": "Ciampaglia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Flammini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Filippo", |
|
"middle": [], |
|
"last": "Menczer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1603.01511" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chengcheng Shao, Giovanni Luca Ciampaglia, Alessandro Flammini, and Filippo Menczer. 2016. Hoaxy: A platform for tracking online misinformation. arXiv preprint arXiv:1603.01511 .", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "IITP at SemEval-2017 Task 8: A Supervised Approach for Rumour Evaluation", |
|
"authors": [ |
|
{ |
|
"first": "Vikram", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sunny", |
|
"middle": [], |
|
"last": "Narayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Md", |
|
"middle": [ |
|
"Shad" |
|
], |
|
"last": "Akhtar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Asif", |
|
"middle": [], |
|
"last": "Ekbal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pushpak", |
|
"middle": [], |
|
"last": "Bhattacharya", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of SemEval. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vikram Singh, Sunny Narayan, Md Shad Akhtar, Asif Ekbal, and Pushpak Bhattacharya. 2017. IITP at SemEval-2017 Task 8: A Supervised Approach for Rumour Evaluation. In Proceedings of SemEval. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "DFKI-DKT at SemEval-2017 Task 8: Rumour Detection and Classification using Cascading Heuristics", |
|
"authors": [ |
|
{ |
|
"first": "Ankit", |
|
"middle": [], |
|
"last": "Srivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rehm", |
|
"middle": [], |
|
"last": "Rehm", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julian Moreno", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of SemEval. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ankit Srivastava, Rehm Rehm, and Julian Moreno Schneider. 2017. DFKI-DKT at SemEval- 2017 Task 8: Rumour Detection and Classification using Cascading Heuristics. In Proceedings of SemEval. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "Big questions for social media big data: Representativeness, validity and other methodological pitfalls", |
|
"authors": [ |
|
{ |
|
"first": "Zeynep", |
|
"middle": [], |
|
"last": "Tufekci", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the AAAI International Conference on Weblogs and Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zeynep Tufekci. 2014. Big questions for social me- dia big data: Representativeness, validity and other methodological pitfalls. In Proceedings of the AAAI International Conference on Weblogs and Social Media.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "A work-in-process literature review: Incorporating social media in risk and crisis communication", |
|
"authors": [ |
|
{ |
|
"first": "R", |
|
"middle": [], |
|
"last": "Shari", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tara", |
|
"middle": [], |
|
"last": "Veil", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Buehner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Palenchar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2011, |
|
"venue": "Journal of contingencies and crisis management", |
|
"volume": "19", |
|
"issue": "2", |
|
"pages": "110--122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shari R Veil, Tara Buehner, and Michael J Palenchar. 2011. A work-in-process literature review: Incor- porating social media in risk and crisis communica- tion. Journal of contingencies and crisis manage- ment 19(2):110-122.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "ECNU at SemEval-2017 Task 8: Rumour Evaluation Using Effective Features and Supervised Ensemble Models", |
|
"authors": [ |
|
{ |
|
"first": "Feixiang", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Man", |
|
"middle": [], |
|
"last": "Lan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yuanbin", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of SemEval. ACL", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Feixiang Wang, Man Lan, and Yuanbin Wu. 2017. ECNU at SemEval-2017 Task 8: Rumour Evalua- tion Using Effective Features and Supervised En- semble Models. In Proceedings of SemEval. ACL.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "Automatic detection of rumor on social network", |
|
"authors": [ |
|
{ |
|
"first": "Qiao", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shuiyuan", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jian", |
|
"middle": [], |
|
"last": "Dong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jinhua", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xueqi", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Natural Language Processing and Chinese Computing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "113--122", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Qiao Zhang, Shuiyuan Zhang, Jian Dong, Jinhua Xiong, and Xueqi Cheng. 2015. Automatic de- tection of rumor on social network. In Natu- ral Language Processing and Chinese Computing, Springer, pages 113-122.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Detection and resolution of rumours in social media: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Arkaitz", |
|
"middle": [], |
|
"last": "Zubiaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmet", |
|
"middle": [], |
|
"last": "Aker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalina", |
|
"middle": [], |
|
"last": "Bontcheva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Liakata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Procter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1704.00656" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arkaitz Zubiaga, Ahmet Aker, Kalina Bontcheva, Maria Liakata, and Rob Procter. 2017. Detection and resolution of rumours in social media: A survey. arXiv preprint arXiv:1704.00656 .", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Crowdsourcing the annotation of rumourous conversations in social media", |
|
"authors": [ |
|
{ |
|
"first": "Arkaitz", |
|
"middle": [], |
|
"last": "Zubiaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Liakata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Procter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalina", |
|
"middle": [], |
|
"last": "Bontcheva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Tolmie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 24th International Conference on World Wide Web: Companion volume. International World Wide Web Conferences Steering Committee", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "347--353", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arkaitz Zubiaga, Maria Liakata, Rob Procter, Kalina Bontcheva, and Peter Tolmie. 2015a. Crowdsourc- ing the annotation of rumourous conversations in social media. In Proceedings of the 24th Interna- tional Conference on World Wide Web: Companion volume. International World Wide Web Conferences Steering Committee, pages 347-353.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Towards detecting rumours in social media", |
|
"authors": [ |
|
{ |
|
"first": "Arkaitz", |
|
"middle": [], |
|
"last": "Zubiaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Liakata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Procter", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kalina", |
|
"middle": [], |
|
"last": "Bontcheva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Tolmie", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the AAAI Workshop on AI for Cities", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arkaitz Zubiaga, Maria Liakata, Rob Procter, Kalina Bontcheva, and Peter Tolmie. 2015b. Towards de- tecting rumours in social media. In Proceedings of the AAAI Workshop on AI for Cities.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Geraldine Wong Sak Hoi, and Peter Tolmie. 2016a. PHEME rumour scheme dataset: Journalism use case", |
|
"authors": [ |
|
{ |
|
"first": "Arkaitz", |
|
"middle": [], |
|
"last": "Zubiaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Liakata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Procter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.6084/m9.figshare.2068650.v1" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arkaitz Zubiaga, Maria Liakata, Rob Procter, Geral- dine Wong Sak Hoi, and Peter Tolmie. 2016a. PHEME rumour scheme dataset: Journalism use case. doi:10.6084/m9.figshare.2068650.v1.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "Analysing how people orient to and spread rumours in social media by looking at conversational threads", |
|
"authors": [ |
|
{ |
|
"first": "Arkaitz", |
|
"middle": [], |
|
"last": "Zubiaga", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Maria", |
|
"middle": [], |
|
"last": "Liakata", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rob", |
|
"middle": [], |
|
"last": "Procter", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "PLoS ONE", |
|
"volume": "11", |
|
"issue": "3", |
|
"pages": "1--29", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arkaitz Zubiaga, Maria Liakata, Rob Procter, Geral- dine Wong Sak Hoi, and Peter Tolmie. 2016b. Analysing how people orient to and spread ru- mours in social media by looking at con- versational threads. PLoS ONE 11(3):1-29.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"text": "Veracity prediction examples: u1: Hostage-taker in supermarket siege killed, reports say. #ParisAttacks -LINK-[true] u1: OMG. #Prince rumoured to be performing in Toronto today. Exciting! [false]", |
|
"num": null, |
|
"uris": null, |
|
"type_str": "figure" |
|
}, |
|
"TABREF1": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table/>" |
|
}, |
|
"TABREF3": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "Results for Task B: Rumour veracityopen variant.", |
|
"content": "<table><tr><td>Team</td><td colspan=\"2\">Score Confidence RMSE</td></tr><tr><td colspan=\"2\">DFKI DKT 0.393</td><td>0.845</td></tr><tr><td>ECNU</td><td>0.464</td><td>0.736</td></tr><tr><td>IITP</td><td>0.286</td><td>0.807</td></tr><tr><td>IKM</td><td>0.536</td><td>0.763</td></tr><tr><td colspan=\"2\">NileTMRG 0.536</td><td>0.672</td></tr><tr><td>Baseline</td><td>0.571</td><td>-</td></tr></table>" |
|
}, |
|
"TABREF4": { |
|
"html": null, |
|
"num": null, |
|
"type_str": "table", |
|
"text": "", |
|
"content": "<table><tr><td>: Results for Task B: Rumour veracity -</td></tr><tr><td>closed variant.</td></tr></table>" |
|
} |
|
} |
|
} |
|
} |