|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T13:12:36.505286Z" |
|
}, |
|
"title": "Detecting the Role of an Entity in Harmful Memes: Techniques and Their Limitations", |
|
"authors": [ |
|
{ |
|
"first": "Rabindra", |
|
"middle": [], |
|
"last": "Nath", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "BJIT Limited", |
|
"location": { |
|
"settlement": "Dhaka", |
|
"country": "Bangladesh" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Firoj", |
|
"middle": [], |
|
"last": "Alam", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "HBKU", |
|
"location": { |
|
"settlement": "Doha", |
|
"country": "Qatar" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "HBKU", |
|
"location": { |
|
"settlement": "Doha", |
|
"country": "Qatar" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Harmful or abusive online content has been increasing over time, raising concerns for social media platforms, government agencies, and policymakers. Such harmful or abusive content can have major negative impact on society, e.g., cyberbullying can lead to suicides, rumors about COVID-19 can cause vaccine hesitance, promotion of fake cures for COVID-19 can cause health harms and deaths. The content that is posted and shared online can be textual, visual, or a combination of both, e.g., in a meme. Here, we describe our experiments in detecting the roles of the entities (hero, villain, victim) in harmful memes, which is part of the CONSTRAINT-2022 shared task, as well as our system for the task. We further provide a comparative analysis of different experimental settings (i.e., unimodal, multimodal, attention, and augmentation). For reproducibility, we make our experimental code publicly available. 1", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Harmful or abusive online content has been increasing over time, raising concerns for social media platforms, government agencies, and policymakers. Such harmful or abusive content can have major negative impact on society, e.g., cyberbullying can lead to suicides, rumors about COVID-19 can cause vaccine hesitance, promotion of fake cures for COVID-19 can cause health harms and deaths. The content that is posted and shared online can be textual, visual, or a combination of both, e.g., in a meme. Here, we describe our experiments in detecting the roles of the entities (hero, villain, victim) in harmful memes, which is part of the CONSTRAINT-2022 shared task, as well as our system for the task. We further provide a comparative analysis of different experimental settings (i.e., unimodal, multimodal, attention, and augmentation). For reproducibility, we make our experimental code publicly available. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Social media have become one of the main communication channels for sharing information online. Unfortunately, they have been abused by malicious actors to promote their agenda using manipulative content, thus continuously plaguing political events, and the public debate, e.g., regarding the ongoing COVID-19 infodemic (Alam et al., 2021d; . Such type of content includes harm and hostility (Brooke, 2019; Joksimovic et al., 2019) , hate speech (Fortuna and Nunes, 2018) , offensive language (Zampieri et al., 2019; Rosenthal et al., 2021) , abusive language (Mubarak et al., 2017) , propaganda (Da San Martino et al., 2019 Martino et al., , 2020 , cyberbullying (Van Hee et al., 2015) , cyberaggression (Kumar et al., 2018) , and other kinds of harmful content (Pramanick et al., 2021; Sharma et al., 2022b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 320, |
|
"end": 340, |
|
"text": "(Alam et al., 2021d;", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 392, |
|
"end": 406, |
|
"text": "(Brooke, 2019;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 407, |
|
"end": 431, |
|
"text": "Joksimovic et al., 2019)", |
|
"ref_id": "BIBREF28" |
|
}, |
|
{ |
|
"start": 446, |
|
"end": 471, |
|
"text": "(Fortuna and Nunes, 2018)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 493, |
|
"end": 516, |
|
"text": "(Zampieri et al., 2019;", |
|
"ref_id": "BIBREF63" |
|
}, |
|
{ |
|
"start": 517, |
|
"end": 540, |
|
"text": "Rosenthal et al., 2021)", |
|
"ref_id": "BIBREF45" |
|
}, |
|
{ |
|
"start": 560, |
|
"end": 582, |
|
"text": "(Mubarak et al., 2017)", |
|
"ref_id": "BIBREF38" |
|
}, |
|
{ |
|
"start": 604, |
|
"end": 624, |
|
"text": "Martino et al., 2019", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 625, |
|
"end": 647, |
|
"text": "Martino et al., , 2020", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 664, |
|
"end": 686, |
|
"text": "(Van Hee et al., 2015)", |
|
"ref_id": "BIBREF59" |
|
}, |
|
{ |
|
"start": 705, |
|
"end": 725, |
|
"text": "(Kumar et al., 2018)", |
|
"ref_id": "BIBREF32" |
|
}, |
|
{ |
|
"start": 763, |
|
"end": 787, |
|
"text": "(Pramanick et al., 2021;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 788, |
|
"end": 809, |
|
"text": "Sharma et al., 2022b)", |
|
"ref_id": "BIBREF49" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The propagation of such content is often done by coordinated groups (Hristakieva et al., 2022) using automated tools and targeting specific individuals, communities, and companies. There have been many research efforts to develop automated tools to detect such kind of content. Several recent surveys have highlighted these aspects, which include fake news , misinformation and disinformation (Alam et al., 2021c; Hardalov et al., 2022) , rumours (Bondielli and Marcelloni, 2019) , propaganda (Da San Martino et al., 2020) , hate speech (Fortuna and Nunes, 2018; Schmidt and Wiegand, 2017) , cyberbullying (Haidar et al., 2016) , offensive (Husain and Uzuner, 2021) and harmful content (Sharma et al., 2022b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 68, |
|
"end": 94, |
|
"text": "(Hristakieva et al., 2022)", |
|
"ref_id": "BIBREF24" |
|
}, |
|
{ |
|
"start": 393, |
|
"end": 413, |
|
"text": "(Alam et al., 2021c;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 414, |
|
"end": 436, |
|
"text": "Hardalov et al., 2022)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 447, |
|
"end": 479, |
|
"text": "(Bondielli and Marcelloni, 2019)", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 501, |
|
"end": 522, |
|
"text": "Martino et al., 2020)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 537, |
|
"end": 562, |
|
"text": "(Fortuna and Nunes, 2018;", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 563, |
|
"end": 589, |
|
"text": "Schmidt and Wiegand, 2017)", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 606, |
|
"end": 627, |
|
"text": "(Haidar et al., 2016)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 640, |
|
"end": 665, |
|
"text": "(Husain and Uzuner, 2021)", |
|
"ref_id": "BIBREF25" |
|
}, |
|
{ |
|
"start": 686, |
|
"end": 708, |
|
"text": "(Sharma et al., 2022b)", |
|
"ref_id": "BIBREF49" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The content shared on social media comes in different forms: textual, visual, or audio-visual. Among other social media content, recently, internet memes became popular. Memes are defined as \"a group of digital items sharing common characteristics of content, form, or stance, which were created by associating them and were circulated, imitated, or transformed via the Internet by many users\" (Shifman, 2013) . Memes typically consist of images containing some text (Shifman, 2013; Suryawanshi et al., 2020a,b) . They are often shared for the purpose of having fun. However, memes can also be created and shared with bad intentions. This includes attacks on people based on characteristics such as ethnicity, race, sex, gender identity, disability, disease, nationality, and immigration status (Zannettou et al., 2018; Kiela et al., 2020) . There has been research effort to develop computational methods to detect such memes, such as detecting hateful memes (Kiela et al., 2020) , propaganda (Dimitrov et al., 2021a) , offensiveness (Suryawanshi et al., 2020a) , sexist memes (Fersini et al., 2019) , troll memes (Suryawanshi and Chakravarthi, 2021) , and generally harmful memes (Pramanick et al., 2021; Sharma et al., 2022a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 394, |
|
"end": 409, |
|
"text": "(Shifman, 2013)", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 467, |
|
"end": 482, |
|
"text": "(Shifman, 2013;", |
|
"ref_id": "BIBREF50" |
|
}, |
|
{ |
|
"start": 483, |
|
"end": 511, |
|
"text": "Suryawanshi et al., 2020a,b)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 795, |
|
"end": 819, |
|
"text": "(Zannettou et al., 2018;", |
|
"ref_id": "BIBREF64" |
|
}, |
|
{ |
|
"start": 820, |
|
"end": 839, |
|
"text": "Kiela et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 960, |
|
"end": 980, |
|
"text": "(Kiela et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 994, |
|
"end": 1018, |
|
"text": "(Dimitrov et al., 2021a)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1035, |
|
"end": 1062, |
|
"text": "(Suryawanshi et al., 2020a)", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 1078, |
|
"end": 1100, |
|
"text": "(Fersini et al., 2019)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1115, |
|
"end": 1151, |
|
"text": "(Suryawanshi and Chakravarthi, 2021)", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 1182, |
|
"end": 1206, |
|
"text": "(Pramanick et al., 2021;", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 1207, |
|
"end": 1228, |
|
"text": "Sharma et al., 2022a)", |
|
"ref_id": "BIBREF48" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Harmful memes often target individuals, organizations, or social entities. Pramanick et al. (2021) developed a dataset where the annotation consists of (i) whether a meme is harmful or not, and (ii) whether it targets an individual, an organization, a community, or society. The CONSTRAINT-2022 shared task follows a similar line of research (Sharma et al., 2022c) . The entities in a meme are first identified and then the task asks participants to predict which entities are glorified, vilified, or victimized in the meme. The task is formulated as \"Given a meme and an entity, determine the role of the entity in the meme: hero vs. villain vs. victim vs. other.\" More details are given in Section 3.", |
|
"cite_spans": [ |
|
{ |
|
"start": 75, |
|
"end": 98, |
|
"text": "Pramanick et al. (2021)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 342, |
|
"end": 364, |
|
"text": "(Sharma et al., 2022c)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Memes are multimodal in nature, but the textual and the visual content in a meme are sometimes unrelated, which can make them hard to analyze for traditional multimodal approaches. Moreover, context (e.g., where the meme was posted) plays an important role for understanding its content. Another important factor is that since the text in the meme is overlaid on top of the image, the text needs to be extracted using OCR, which can result in errors that require additional manual post-editing (Dimitrov et al., 2021a) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 494, |
|
"end": 518, |
|
"text": "(Dimitrov et al., 2021a)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Here, we address a task about entity role labeling for harmful memes based on the dataset released in the CONSTRAINT-2022 shared task; see the task overview paper for more detail (Sharma et al., 2022c) . This task is different from traditional semantic role labeling in NLP (Palmer et al., 2010) , where understanding who did what to whom, when, where, and why is typically addressed as a sequence labeling problem (He et al., 2017) . Recently, this has also been studied for visual content (Sadhu et al., 2021) , i.e., situation recognition (Yatskar et al., 2016; Pratt et al., 2020) , visual semantic role labeling (Gupta and Malik, 2015; Silberer and Pinkal, 2018; , and human-object interaction (Chao et al., 2015 (Chao et al., , 2018 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 179, |
|
"end": 201, |
|
"text": "(Sharma et al., 2022c)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 274, |
|
"end": 295, |
|
"text": "(Palmer et al., 2010)", |
|
"ref_id": "BIBREF41" |
|
}, |
|
{ |
|
"start": 415, |
|
"end": 432, |
|
"text": "(He et al., 2017)", |
|
"ref_id": "BIBREF23" |
|
}, |
|
{ |
|
"start": 491, |
|
"end": 511, |
|
"text": "(Sadhu et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 542, |
|
"end": 564, |
|
"text": "(Yatskar et al., 2016;", |
|
"ref_id": "BIBREF62" |
|
}, |
|
{ |
|
"start": 565, |
|
"end": 584, |
|
"text": "Pratt et al., 2020)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 617, |
|
"end": 640, |
|
"text": "(Gupta and Malik, 2015;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 641, |
|
"end": 667, |
|
"text": "Silberer and Pinkal, 2018;", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 699, |
|
"end": 717, |
|
"text": "(Chao et al., 2015", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 718, |
|
"end": 738, |
|
"text": "(Chao et al., , 2018", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To address the entity role labeling for a potentially harmful meme, we investigate textual, visual, and multimodal content using different pretrained models such as BERT (Devlin et al., 2019) , VGG16 (Simonyan and Zisserman, 2015) , and other visionlanguage models (Ben-younes et al., 2019) . We further explore different textual data augmentation techniques and attention methods. For the shared task participation, we used only the image modality, which resulted in an underperforming system in the leaderboard.", |
|
"cite_spans": [ |
|
{ |
|
"start": 170, |
|
"end": 191, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 200, |
|
"end": 230, |
|
"text": "(Simonyan and Zisserman, 2015)", |
|
"ref_id": "BIBREF52" |
|
}, |
|
{ |
|
"start": 265, |
|
"end": 290, |
|
"text": "(Ben-younes et al., 2019)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Further studies using other modalities and approaches improved the performance of our system, but it is still lower (0.464 macro F1) than the best system (0.586). Yet, our investigation might be useful to understand which approaches are useful for detecting the role of an entity in harmful memes.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Our contributions can be summarized as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 we addressed the problem both as sequence labeling and as classification;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 we investigated different pretrained models for text and images;", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 we explored several combinations of multimodal models, as well as attention mechanisms, and various augmentation techniques.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The rest of the paper is organized as follows: Section 2 presents previous work, Section 3 describes the task and the dataset, Section 4 formulates our experiments, Section 5 discusses the evaluation results. Finally, Section 6 concludes and points to possible directions for future work.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Below, we discuss previous work on semantic role labeling and harmful content detection, both in general and in a multimodal context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "Textual semantic role labeling has been widely studied in NLP, where the idea is to understand who did what to whom, when, where, and why. Traditionally, the task has been addressed using sequence labeling, e.g., FitzGerald et al. (2015) used local and structured learning, experimenting with PropBank and FrameNet, and Larionov et al. (2019) investigated recent transformer models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 213, |
|
"end": 237, |
|
"text": "FitzGerald et al. (2015)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 320, |
|
"end": 342, |
|
"text": "Larionov et al. (2019)", |
|
"ref_id": "BIBREF34" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Role Labeling", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "Visual semantic role labeling has been explored for images and video. Yatskar et al. (2016) addressed situation recognition, and developed a large-scale dataset containing over 500 activities, 1,700 roles, 11,000 objects, 125,000 images, and 200,000 unique situations. The images were collected from Google and the authors addressed the task as a situation recognition problem. Pratt et al. (2020) developed a dataset for situation recognition consisting of 278,336 bounding-box groundings to the 11,538 entity classes. Gupta and Malik (2015) developed a dataset of 16K examples in 10K images with actions and associated objects in the scene with different semantic roles for each action. Yang et al. (2016) worked on integrating language and vision with explicit and implicit roles. Silberer and Pinkal (2018) learned frame-semantic representations of the images. Sadhu et al. (2021) approached the same problem for video, developing a dataset of 29K 10-second movie clips, annotated with verbs and semantics roles for every two seconds of video content.", |
|
"cite_spans": [ |
|
{ |
|
"start": 70, |
|
"end": 91, |
|
"text": "Yatskar et al. (2016)", |
|
"ref_id": "BIBREF62" |
|
}, |
|
{ |
|
"start": 378, |
|
"end": 397, |
|
"text": "Pratt et al. (2020)", |
|
"ref_id": "BIBREF43" |
|
}, |
|
{ |
|
"start": 520, |
|
"end": 542, |
|
"text": "Gupta and Malik (2015)", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 689, |
|
"end": 707, |
|
"text": "Yang et al. (2016)", |
|
"ref_id": "BIBREF60" |
|
}, |
|
{ |
|
"start": 784, |
|
"end": 810, |
|
"text": "Silberer and Pinkal (2018)", |
|
"ref_id": "BIBREF51" |
|
}, |
|
{ |
|
"start": 865, |
|
"end": 884, |
|
"text": "Sadhu et al. (2021)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Semantic Role Labeling", |
|
"sec_num": "2.1" |
|
}, |
|
{ |
|
"text": "There has been significant effort for identifying misinformation, disinformation, and malinformation online (Schmidt and Wiegand, 2017; Bondielli and Marcelloni, 2019; Da San Martino et al., 2020; Alam et al., 2021c; Afridi et al., 2020; Hristakieva et al., 2022; . Most of these studies focused on textual and multimodal content. Compared to that, modeling the harmful aspects of memes has not received much attention.", |
|
"cite_spans": [ |
|
{ |
|
"start": 108, |
|
"end": 135, |
|
"text": "(Schmidt and Wiegand, 2017;", |
|
"ref_id": "BIBREF47" |
|
}, |
|
{ |
|
"start": 136, |
|
"end": 167, |
|
"text": "Bondielli and Marcelloni, 2019;", |
|
"ref_id": "BIBREF3" |
|
}, |
|
{ |
|
"start": 168, |
|
"end": 196, |
|
"text": "Da San Martino et al., 2020;", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 197, |
|
"end": 216, |
|
"text": "Alam et al., 2021c;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 217, |
|
"end": 237, |
|
"text": "Afridi et al., 2020;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 238, |
|
"end": 263, |
|
"text": "Hristakieva et al., 2022;", |
|
"ref_id": "BIBREF24" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harmful Content Detection in Memes", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Recent effort in this direction include categorizing hateful memes (Kiela et al., 2020) , detecting antisemitism (Chandra et al., 2021) , detecting the propagandistic techniques used in a meme (Dimitrov et al., 2021a), detecting harmful memes and the target of the harm (Pramanick et al., 2021) , identifying the protected categories that were attacked (Zia et al., 2021), and identifying offensive content (Suryawanshi et al., 2020a) . Among these studies, the most notable low-level efforts that advanced research by providing high-quality datasets to experiment with include shared tasks such as the Hateful Memes Challenge (Kiela et al., 2020) , the SemEval-2021 shared task on detecting persuasion techniques in memes (Dimitrov et al., 2021b) , and the troll meme classification task (Suryawanshi and Chakravarthi, 2021) . Chandra et al. (2021) investigated antisemitism along with its types as a binary and a multi-class classification problem using pretrained transformers and convolutional neural networks (CNNs) as modality-specific encoders along with various multimodal fusion strategies. Dimitrov et al. (2021a) developed a dataset with 22 propaganda techniques and investigated the different state-of-the-art pretrained models, demonstrating that joint visionlanguage models performed better than unimodal ones. Pramanick et al. (2021) addressed two tasks: detecting harmful memes and identifying the social entities they target, using a multimodal model with local and global information. Zia et al. (2021) went one step further than a binary classification of hateful memes, focusing on a more fine-grained categorization based on the protected category that was being attacked (i.e., race, disability, religion, nationality, sex) and the type of attack (i.e., contempt, mocking, inferiority, slurs, exclusion, dehumanizing, inciting violence) using the dataset released in the WOAH 2020 Shared Task. 2 Fersini et al. (2019) studied sexist memes and investigated the textual cues using late fusion. They also developed a dataset of 800 misogynistic memes covering different manifestations of hatred against women (e.g., body shaming, stereotyping, objectification, and violence), collected from different social media (Gasparini et al., 2021) . Kiela et al. (2021) summarized the participating systems in the Hateful Memes Challenge, where the best systems fine-tuned unimodal and multimodal pre-training transformer models such as Vi-sualBERT (Li et al., 2019) VL-BERT (Su et al., 2020) , UNITER , VILLA , and built ensembles on top of them.", |
|
"cite_spans": [ |
|
{ |
|
"start": 67, |
|
"end": 87, |
|
"text": "(Kiela et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 113, |
|
"end": 135, |
|
"text": "(Chandra et al., 2021)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 270, |
|
"end": 294, |
|
"text": "(Pramanick et al., 2021)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 407, |
|
"end": 434, |
|
"text": "(Suryawanshi et al., 2020a)", |
|
"ref_id": "BIBREF56" |
|
}, |
|
{ |
|
"start": 627, |
|
"end": 647, |
|
"text": "(Kiela et al., 2020)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 723, |
|
"end": 747, |
|
"text": "(Dimitrov et al., 2021b)", |
|
"ref_id": "BIBREF15" |
|
}, |
|
{ |
|
"start": 789, |
|
"end": 825, |
|
"text": "(Suryawanshi and Chakravarthi, 2021)", |
|
"ref_id": "BIBREF55" |
|
}, |
|
{ |
|
"start": 828, |
|
"end": 849, |
|
"text": "Chandra et al. (2021)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 1100, |
|
"end": 1123, |
|
"text": "Dimitrov et al. (2021a)", |
|
"ref_id": "BIBREF14" |
|
}, |
|
{ |
|
"start": 1325, |
|
"end": 1348, |
|
"text": "Pramanick et al. (2021)", |
|
"ref_id": "BIBREF42" |
|
}, |
|
{ |
|
"start": 2233, |
|
"end": 2257, |
|
"text": "(Gasparini et al., 2021)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 2260, |
|
"end": 2279, |
|
"text": "Kiela et al. (2021)", |
|
"ref_id": "BIBREF30" |
|
}, |
|
{ |
|
"start": 2459, |
|
"end": 2476, |
|
"text": "(Li et al., 2019)", |
|
"ref_id": "BIBREF35" |
|
}, |
|
{ |
|
"start": 2485, |
|
"end": 2502, |
|
"text": "(Su et al., 2020)", |
|
"ref_id": "BIBREF54" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harmful Content Detection in Memes", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "The SemEval-2021 propaganda detection shared task (Dimitrov et al., 2021b ) focused on detecting the use of propaganda techniques in the meme, and the participants' systems showed that multimodal cues were very important.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 73, |
|
"text": "(Dimitrov et al., 2021b", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harmful Content Detection in Memes", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "In the troll meme classification shared task (Suryawanshi and Chakravarthi, 2021) , the best system used ResNet152 and BERT with multimodal attention, and most systems used pretrained transformers for the text, CNNs for the images, and early fusion to combine the two modalities.", |
|
"cite_spans": [ |
|
{ |
|
"start": 45, |
|
"end": 81, |
|
"text": "(Suryawanshi and Chakravarthi, 2021)", |
|
"ref_id": "BIBREF55" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harmful Content Detection in Memes", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Combining modalities causes several challenges, which arise due to representation issues (i.e., symbolic representation for language vs. signal representation for the visual modality), misalignment between the modalities, and fusion and transferring knowledge between the modalities. In order to address multimodal problems, a lot of effort has been paid to developing different fusion techniques such as (i) early fusion, where lowlevel features from different modalities are learned, fused, and fed into a single prediction model (Jin et al., 2017b; Yang et al., 2018; Zhang et al., 2019; Singhal et al., 2019; Kang et al., 2020) , (ii) late fusion, where unimodal decisions are fused with some mechanisms such as averaging and voting (Agrawal et al., 2017; Qi et al., 2019) , and (iii) hybrid fusion, where a subset of the learned features are passed to the final classifier (early fusion), and the remaining modalities are fed to the classifier later (late fusion) (Jin et al., 2017a) . Here, we use early fusion and joint learning for fusion.", |
|
"cite_spans": [ |
|
{ |
|
"start": 532, |
|
"end": 551, |
|
"text": "(Jin et al., 2017b;", |
|
"ref_id": "BIBREF27" |
|
}, |
|
{ |
|
"start": 552, |
|
"end": 570, |
|
"text": "Yang et al., 2018;", |
|
"ref_id": "BIBREF61" |
|
}, |
|
{ |
|
"start": 571, |
|
"end": 590, |
|
"text": "Zhang et al., 2019;", |
|
"ref_id": "BIBREF65" |
|
}, |
|
{ |
|
"start": 591, |
|
"end": 612, |
|
"text": "Singhal et al., 2019;", |
|
"ref_id": "BIBREF53" |
|
}, |
|
{ |
|
"start": 613, |
|
"end": 631, |
|
"text": "Kang et al., 2020)", |
|
"ref_id": "BIBREF29" |
|
}, |
|
{ |
|
"start": 737, |
|
"end": 759, |
|
"text": "(Agrawal et al., 2017;", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 760, |
|
"end": 776, |
|
"text": "Qi et al., 2019)", |
|
"ref_id": "BIBREF44" |
|
}, |
|
{ |
|
"start": 969, |
|
"end": 988, |
|
"text": "(Jin et al., 2017a)", |
|
"ref_id": "BIBREF26" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Harmful Content Detection in Memes", |
|
"sec_num": "2.2" |
|
}, |
|
{ |
|
"text": "Below, we describe the CONSTRAINT 2022 shared task and the corresponding dataset provided by the task organizers. More detail can be found in the shared task report (Sharma et al., 2022c) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 165, |
|
"end": 187, |
|
"text": "(Sharma et al., 2022c)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task and Dataset", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "The CONSTRAINT 2022 shared task asked participating systems to detect the role of the entities in the meme, given the meme and a list of these entities. Figure 1 shows an example of an image with the extracted OCR text, implicit (image showing Salman Khan, who is not mentioned in the text), and explicit entities and their roles. The example illustrates various challenges: (i) an implicit entity, (ii) text extracted from the label of the vial, which has little connection to the overlaid written text, (iii) unclear target entity in the meme (Vladimir Putin). Such complexities are not common in the multimodal tasks we discussed above. The textual representation of the entities and their roles are different than for typical CoNLL-style semantic role labeling tasks (Carreras and M\u00e0rquez, 2005) , which makes it more difficult to address the problem in the same formulation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 771, |
|
"end": 799, |
|
"text": "(Carreras and M\u00e0rquez, 2005)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 153, |
|
"end": 161, |
|
"text": "Figure 1", |
|
"ref_id": "FIGREF0" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Task", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "By observing these challenges, we first attempted to address the problem in the same formulation: as a sequence labeling problem by converting the data to CoNLL format (see Section 4.1). Then, we further tried to address it as a classification task, i.e., predict the role of each entity in a given meme-entity pair.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Task", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "We use the dataset provided for the CONSTRAINT 2022 shared task. It contains harmful memes, OCRextracted text from these memes, and manually annotated entities with four roles: hero, villian, victim, and other. The datasets cover two domains: COVID-19 and US Politics. The COVID-19 domain consists of 2,700 training and 300 validation examples, while US Politics has 2,852 training and 350 validation examples. The test dataset combines examples from both domains, COVID-19 and US Politics, and has a total of 718 examples. For the experiments, we combined the two domains, COVID-19 and US Politics, which resulted in 5,552 training and 650 validation examples.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The class distribution of the entity roles, aggregated over all memes, in the combined COVID-19 + US Politics dataset is highly imbalanced as shown in Table 1 . We can see that overall the role of hero represents only 2%, and the role of victim covers only 5% of the entities. We can further see that the vast majority of the entities are labeled with the other role. This skewed distribution adds additional complexity to the modeling task.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 151, |
|
"end": 158, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Data", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Settings: We addressed the problem both as a sequence labeling and as a classification task. Below, we discuss each of them in detail.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "Evaluation measures: In our experiments, we used accuracy, macro-average precision, recall, and F 1 score. The latter was the official evaluation measure for the shared task. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Experiments", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "For the sequence labeling experiments, we first converted the OCR text and the entities to the CoNLL BIO-format. An example is shown in Figure 2 . To convert them, we matched the entities in the text and we assigned the same tag (role label) to the token in the text. For the implicit entity that is not in the text, we added them at the end of the text and we assigned them the annotated role; we labeled all other tokens with the O-tag. We trained the model using Conditional Random Fields (CRFs) (Lafferty et al., 2001) , which has been widely used in earlier work. As features, we used part-of-speech tags, token length, tri-grams, presence of digits, use of special characters, token shape, w2vcluster, LDA topics, words present in a vocabulary list built on the training set, and in a name list, etc. 3 We ran two sets of experiments: (i) using the same format, and (ii) using only entities as shown in Figure 2 .", |
|
"cite_spans": [ |
|
{ |
|
"start": 499, |
|
"end": 522, |
|
"text": "(Lafferty et al., 2001)", |
|
"ref_id": "BIBREF33" |
|
}, |
|
{ |
|
"start": 807, |
|
"end": 808, |
|
"text": "3", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 136, |
|
"end": 144, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
}, |
|
{ |
|
"start": 909, |
|
"end": 917, |
|
"text": "Figure 2", |
|
"ref_id": "FIGREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Sequence Labeling", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "For the classification experiments, we first converted the dataset into a classification problem. As it contains all examples with one or more entities, we reorganized the dataset so that an example contains an entity, OCR text, image, and entity role. Hence, the dataset size is now the same as the number of entity instances rather than memes. We ended up with 17,514 training examples, which is the number of training entities as shown in Table 1 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 442, |
|
"end": 449, |
|
"text": "Table 1", |
|
"ref_id": "TABREF1" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classification", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "We then ran different unimodal and multimodal experiments: (i) only text, (ii) only meme, and (iii) text and meme together. For each setting, we also ran several baseline experiments. We further ran advanced experiments such as adding attention to the network and text-based data augmentation. Figure 3 shows our experimental pipeline for this classification task. For the unimodal experiments, we used individual modalities, and we trained them using different pre-trained models.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 294, |
|
"end": 302, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classification", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "3 More details about the feature set can be found at https: //github.com/moejoe95/crf-vs-rnn-ner Note that for the text modality, we ran several combinations of fusion (e.g., text and entity) experiments. For the multimodal experiments, we combined embedding from both modalities, and we ran the classification on the fused embedding, as shown in Figure 3 .", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 347, |
|
"end": 355, |
|
"text": "Figure 3", |
|
"ref_id": "FIGREF2" |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Classification", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "For the text modality, we experimented using BERT (Devlin et al., 2019) and XLM-RoBERTa (Liu et al., 2019) . We performed ten reruns for each experiment using different random seeds, and then we picked the model that performed best on the development set. We used a batch size of 8, a learning rate of 2e-5, a maximum sequence length of 128, three epochs, and categorical cross-entropy as the loss function. We used the Transformer toolkit to train the transformer-based models.", |
|
"cite_spans": [ |
|
{ |
|
"start": 50, |
|
"end": 71, |
|
"text": "(Devlin et al., 2019)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 88, |
|
"end": 106, |
|
"text": "(Liu et al., 2019)", |
|
"ref_id": "BIBREF37" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Modality", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "Using the text-only modality, we also ran a different combination of experiments using the text and the entities, where we used bilinear fusion to combine them. We discuss this fusion technique in more detail in Section 4.2.3.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Text Modality", |
|
"sec_num": "4.2.1" |
|
}, |
|
{ |
|
"text": "For our experiments using the image modality, we extract features from a pre-trained model, and then we trained an SVM classifier using these features. In particular, we extracted features from the penultimate layer of the EfficientNet-b1 (EffNet) model (Tan and Le, 2019) , which was trained using the ImageNet dataset. For training the model using the extracted features, we used SVM with its default parameter settings, with no further optimization of its hyper-parameter values. We chose EffNet as it was shown to achieve better performance for some social media image classification tasks (Alam et al., 2021a,b) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 254, |
|
"end": 272, |
|
"text": "(Tan and Le, 2019)", |
|
"ref_id": "BIBREF58" |
|
}, |
|
{ |
|
"start": 594, |
|
"end": 616, |
|
"text": "(Alam et al., 2021a,b)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Image Modality", |
|
"sec_num": "4.2.2" |
|
}, |
|
{ |
|
"text": "For the multimodal experiments, we used the BLOCK Fusion (Ben-younes et al., 2019) approach, which was originally proposed for question answering (QA). Our motivation is that an entity can be seen like a question about the meme context, asking for its role as an answer. In a QA setting, there are three elements: (i) a context (image or text), (ii) a question, and (iii) a list of answers. The goal is to select the right answer from the answer list. Similarly, we have four types of answers (i.e., roles). The task formation is that for an entity and a context (image or text), we need to determine the role of the entity in that context. BLOCK fusion is a multi-modal framework based on block-superdiagonal tensor decomposition, where tensor blocks are decomposed into blocks of smaller sizes, with the size characterized by a set of mode-n ranks (De Lathauwer, 2008) . It is a bilinear model that takes two vectors x 1 \u2208 R I and x 2 \u2208 R J as input and then projects them to a K-dimensional space with tensor products:", |
|
"cite_spans": [ |
|
{ |
|
"start": 850, |
|
"end": 870, |
|
"text": "(De Lathauwer, 2008)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multimodal: Text and Image", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "y = T \u00d7 x 1 \u00d7 x 2 , where y \u2208 R K . Each component of y is a quadratic form of the inputs, \u2200k \u2208 [1; K]: y k = I i=1 J j=1 T ijk x 1 i x j 2", |
|
"eq_num": "(1)" |
|
} |
|
], |
|
"section": "Multimodal: Text and Image", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "BLOCK fusion can model bilinear interactions between groups of features, while limiting the complexity of the model, but keeping expressive high dimensional mono-model representations (Benyounes et al., 2019) . We used BLOCK fusion in different settings: (i) for image and entity, (ii) for text and entity, and (iii) for text, image with entity.", |
|
"cite_spans": [ |
|
{ |
|
"start": 184, |
|
"end": 208, |
|
"text": "(Benyounes et al., 2019)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multimodal: Text and Image", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "Text and entity: We extracted embedding representation for the entity and the text using a pretrained BERT model. We then fed both embedding representations into linear layers of 512 neurons each. The output of two linear layers is taken as input to the trainable block fusion network. Then, a regularization layer and linear layer are used before the final layer.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multimodal: Text and Image", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "Image and entity: To build embedding representations for the image and the entity, we used a vision transformer (ViT) (Dosovitskiy et al., 2021) and BERT pretrained models. The output of two different modalities was then used as input to the block fusion network.", |
|
"cite_spans": [ |
|
{ |
|
"start": 118, |
|
"end": 144, |
|
"text": "(Dosovitskiy et al., 2021)", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multimodal: Text and Image", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "Image, text, and entity: In this setting, we first built embedding representations for the text and the image using a pretraind BERT and ViT models, respectively. Then, we concatenated these representations (text + image) and we passed them to a linear layer with 512 neurons. We then extracted embedding representation for the target entity using the pretraind BERT model. Afterwards, we merged the text + image and the entity representations and we fed them into the fusion layer. In this way, we combined the image and the text representations as a unified context, aiming to predict the role of the target entity in this context.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multimodal: Text and Image", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "In all the experiments, we uses a learning rate of 1e \u22126 , a batch size of 8, and a maximum length of the text of 512.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Multimodal: Text and Image", |
|
"sec_num": "4.2.3" |
|
}, |
|
{ |
|
"text": "We ran two additional sets of experiments using attention mechanism and augmentation, as using such approaches has been shown to help in many natural language processing (NLP) tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Experiments", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "Attention: In the entity + image block fusion network, we used block fusion to merge the entity and the image representations. Instead of using the image representation directly, we used attention mechanism on the image and then we fed the attended features along with the entity representation into the entity + image block. To compute the attention, we used the PyTorchNLP library. 4 In a similar fashion, we applied the attention mechanism to the text and to the combined text + image representation.", |
|
"cite_spans": [ |
|
{ |
|
"start": 384, |
|
"end": 385, |
|
"text": "4", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Experiments", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "Augmentation: Text data augmentation has recently gained a lot of popularity as a way to address data scarceness and class imbalance (Feng et al., 2021) . We used three types of text augmentation techniques to balance the distribution of the different class: (i) synonym augmentation using Word-Net, (ii) word substitution using BERT, and (iii) a combination thereof. In our experiments, we used the NLPAug data augmentation package. 5 Note that we applied six times augmentation for the hero class, twice for the villain class, and three times for the victim class. These numbers were empirically set and require further investigation in future work.", |
|
"cite_spans": [ |
|
{ |
|
"start": 133, |
|
"end": 152, |
|
"text": "(Feng et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 434, |
|
"end": 435, |
|
"text": "5", |
|
"ref_id": null |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Experiments", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "Exp.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Additional Experiments", |
|
"sec_num": "4.2.4" |
|
}, |
|
{ |
|
"text": "All tokens 0.51 0.32 0.21 0.24 Only entities 0.77 0.40 0.27 0.25 Table 2 : Evaluation results on the test set for the sequence labeling reformulation of the problem.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 65, |
|
"end": 72, |
|
"text": "Table 2", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Acc P R F1", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Below, we first discuss our sequence labeling and classification experiments. We then perform some analysis, and finally, we put our results in a broader perspective in the context of the shared task. Table 2 shows the evaluation results on the test set for our sequence labeling reformulation of the problem. We performed two experiments: one where we used as input the entire meme text (i.e., all tokens), and another one where we used the concatenation of the target entities only. We can see that the latter performed marginally better, but overall the macro-F1 score is quite low in both cases. Table 3 shows the evaluation results on the test set for our classification reformulation of the problem. We computed the majority class baseline (row 0), which always predicts the most frequent label in the training set. Due to time limitations, our official submission used the image modality only, which resulted in a very low macro-F1 score of 0.23, as shown in row 1. For our text modality experiments, we used the meme text and the entities. We experimented with BERT and XLM-RoBERTa, obtaining better results using the former. Using the BLOCK fusion technique on unimodal (text + entity) and multimodality (text + image + entity) yielded sizable improvements. The combination of image + text (rows 6 and 9) did not yield much better results compared to using text only (row 4). Next, we added attention on top of block fusion, which improved the performance, but there was no much difference between the different combinations (rows 7-9). Considering only the text and the entity, we observe an improvement using text augmentation. Among the different augmentation techniques, there was no performance difference between WordNet and BERT, and combining them yielded worse results. Table 3 : Evaluation results on the test set for our classification reformulation of the problem. Our official submission for the shared task is shown in italic.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 201, |
|
"end": 208, |
|
"text": "Table 2", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 600, |
|
"end": 607, |
|
"text": "Table 3", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 1788, |
|
"end": 1795, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results and Discussion", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Next, we studied the impact of using attention and data augmentation on the individual entity roles: hero, villain, victim, and other. Table 4 shows the impact of using attention on (a) entity + image (left side), and (b) entity + [image + text] (right side) combinations. We can observe a sizable gain for the hero (+0.09), the villain (+0.06), and the victim (+0.07) roles in the former case (a). However, for case (b), there is an improvement for the victim role only; yet, this improvement is quite sizable: +0.16. Table 5 shows the impact of data augmentation using WordNet or BERT on the individual roles. We can observe sizable performance gains of +0.11 for the hero role, and +0.04 for the villain role, when using WordNet-based data augmentation. Similarly, BERT-based data augmentation yields +0.12 for the hero role, and +0.02 for the villain role. However, the impact of either augmentation on the victim and on the other role is negligible. Table 5 : Role-level results on the test set for the entity + text combination with and without augmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 135, |
|
"end": 142, |
|
"text": "Table 4", |
|
"ref_id": "TABREF4" |
|
}, |
|
{ |
|
"start": 519, |
|
"end": 526, |
|
"text": "Table 5", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 955, |
|
"end": 962, |
|
"text": "Table 5", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Role-Level Analysis", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "For our official submission for the task, we used the image modality system from line 1 in Table 3 , which was quite weak, with a macro-F1 score of 0.23. Our subsequent experiments and analysis pointed to several promising directions: (i) combining the textual and the image modalities, (ii) using attention, (iii) performing data augmentation. As a result, we managed to improve our results to 0.46. Yet, this is still far behind the F1-score of the winning system: 0.5867.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 91, |
|
"end": 98, |
|
"text": "Table 3", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Official Submission", |
|
"sec_num": "5.4" |
|
}, |
|
{ |
|
"text": "We addressed the problem of understanding the role of the entities in harmful memes, as part of the CONSTRAINT-2022 shared task. We presented a comparative analysis of the importance of different modalities: the text and the image. We further experimented with two task reformulations -sequence labeling and classification-, and we found the latter to work better. Overall, we obtained improvements when using BLOCK fusion, attention between the image and the text representations, and data augmentation.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "In future work, we plan to combine the sequence and the classification formulations in a joint multimodal setting. We further want to experiment with multi-task learning using other meme analysis tasks and datasets. Last but not least, we plan to develop better data augmentation techniques to improve the performance on the low-frequency roles.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion and Future Work", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "https://github.com/robi56/harmful_ memes_block_fusion", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://github.com/facebookresearch/ fine_grained_hateful_memes", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "http://github.com/PetrochukM/ PyTorch-NLP 5 https://github.com/makcedward/nlpaug", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Shivam Sharma, Tharun Suresh, Atharva Jitendra, Himanshi Mathur, Preslav Nakov, Md. Shad Akhtar, and Tanmoy Chakraborty. 2022c. Findings of the constraint 2022 shared task on detecting the hero, the villain, and the victim in memes. In Proceedings of the Workshop on Combating Online Hostile Posts in Regional Languages during Emergency Situations, CONSTRAINT '22, Dublin, Ireland. Association for Computational Linguistics.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The work is part of the Tanbih mega-project, which is developed at the Qatar Computing Research Institute, HBKU, and aims to limit the impact of \"fake news,\" propaganda, and media bias by making users aware of what they are reading, thus promoting media literacy and critical thinking.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgments", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Hamed Firooz, and Preslav Nakov. 2021c. A survey on multimodal disinformation detection", |
|
"authors": [ |
|
{ |
|
"first": "Shaden", |
|
"middle": [], |
|
"last": "Martino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shaar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2103.12541" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martino, Shaden Shaar, Hamed Firooz, and Preslav Nakov. 2021c. A survey on multimodal disinforma- tion detection. arXiv preprint arXiv:2103.12541.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Fighting the COVID-19 infodemic: Modeling the perspective of journalists, fact-checkers, social media platforms, policy makers, and the society", |
|
"authors": [ |
|
{ |
|
"first": "Firoj", |
|
"middle": [], |
|
"last": "Alam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaden", |
|
"middle": [], |
|
"last": "Shaar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fahim", |
|
"middle": [], |
|
"last": "Dalvi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hassan", |
|
"middle": [], |
|
"last": "Sajjad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Nikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni", |
|
"middle": [], |
|
"last": "Da San", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [], |
|
"last": "Martino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nadir", |
|
"middle": [], |
|
"last": "Abdelali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Durrani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Abdulaziz", |
|
"middle": [], |
|
"last": "Darwish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wajdi", |
|
"middle": [], |
|
"last": "Al-Homaid", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommaso", |
|
"middle": [], |
|
"last": "Zaghouani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gijs", |
|
"middle": [], |
|
"last": "Caselli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Friso", |
|
"middle": [], |
|
"last": "Danoe", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Britt", |
|
"middle": [], |
|
"last": "Stolk", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Bruntink", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Findings of the Association for Computational Linguistics, EMNLP (Findings) '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "611--649", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Firoj Alam, Shaden Shaar, Fahim Dalvi, Hassan Saj- jad, Alex Nikolov, Hamdy Mubarak, Giovanni Da San Martino, Ahmed Abdelali, Nadir Durrani, Kareem Darwish, Abdulaziz Al-Homaid, Wajdi Za- ghouani, Tommaso Caselli, Gijs Danoe, Friso Stolk, Britt Bruntink, and Preslav Nakov. 2021d. Fighting the COVID-19 infodemic: Modeling the perspective of journalists, fact-checkers, social media platforms, policy makers, and the society. In Findings of the Association for Computational Linguistics, EMNLP (Findings) '21, pages 611-649, Punta Cana, Domini- can Republic. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "BLOCK: Bilinear superdiagonal fusion for visual question answering and visual relationship detection", |
|
"authors": [ |
|
{ |
|
"first": "Hedi", |
|
"middle": [], |
|
"last": "Ben-Younes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Remi", |
|
"middle": [], |
|
"last": "Cadene", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nicolas", |
|
"middle": [], |
|
"last": "Thome", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthieu", |
|
"middle": [], |
|
"last": "Cord", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the AAAI Conference on Artificial Intelligence, AAAI '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1609/aaai.v33i01.33018102" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hedi Ben-younes, Remi Cadene, Nicolas Thome, and Matthieu Cord. 2019. BLOCK: Bilinear superdiag- onal fusion for visual question answering and visual relationship detection. In Proceedings of the AAAI Conference on Artificial Intelligence, AAAI '19, Honolulu, Hawaii, USA. AAAI Press.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "A survey on fake news and rumour detection techniques", |
|
"authors": [ |
|
{ |
|
"first": "Alessandro", |
|
"middle": [], |
|
"last": "Bondielli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Francesco", |
|
"middle": [], |
|
"last": "Marcelloni", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Information Sciences", |
|
"volume": "497", |
|
"issue": "", |
|
"pages": "38--55", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alessandro Bondielli and Francesco Marcelloni. 2019. A survey on fake news and rumour detection tech- niques. Information Sciences, 497:38-55.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Condescending, Rude, Assholes\": Framing gender and hostility on Stack Overflow", |
|
"authors": [ |
|
{ |
|
"first": "Sian", |
|
"middle": [], |
|
"last": "Brooke", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Third Workshop on Abusive Language Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "172--180", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-3519" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sian Brooke. 2019. \"Condescending, Rude, Assholes\": Framing gender and hostility on Stack Overflow. In Proceedings of the Third Workshop on Abusive Lan- guage Online, pages 172-180, Florence, Italy. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Introduction to the CoNLL-2005 shared task: Semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Xavier", |
|
"middle": [], |
|
"last": "Carreras", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Llu\u00eds", |
|
"middle": [], |
|
"last": "M\u00e0rquez", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2005, |
|
"venue": "Proceedings of the 9th Conference on Computational Natural Language Learning, CoNLL '05", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "152--164", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xavier Carreras and Llu\u00eds M\u00e0rquez. 2005. Introduction to the CoNLL-2005 shared task: Semantic role label- ing. In Proceedings of the 9th Conference on Com- putational Natural Language Learning, CoNLL '05, pages 152-164, Ann Arbor, Michigan, USA.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Subverting the Jewtocracy\": Online antisemitism detection using multimodal deep learning", |
|
"authors": [ |
|
{ |
|
"first": "Mohit", |
|
"middle": [], |
|
"last": "Chandra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dheeraj", |
|
"middle": [], |
|
"last": "Reddy Pailla", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Himanshu", |
|
"middle": [], |
|
"last": "Bhatia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aadilmehdi", |
|
"middle": [ |
|
"J" |
|
], |
|
"last": "Sanchawala", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manish", |
|
"middle": [], |
|
"last": "Shrivastava", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ponnurangam", |
|
"middle": [], |
|
"last": "Kumaraguru", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 13th ACM Web Science Conference 2021, WebSci '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "148--157", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3447535.3462502" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mohit Chandra, Dheeraj Reddy Pailla, Himanshu Bhatia, AadilMehdi J. Sanchawala, Manish Gupta, Manish Shrivastava, and Ponnurangam Kumaraguru. 2021. \"Subverting the Jewtocracy\": Online anti- semitism detection using multimodal deep learning. In Proceedings of the 13th ACM Web Science Con- ference 2021, WebSci '21, pages 148-157. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Learning to detect humanobject interactions", |
|
"authors": [ |
|
{ |
|
"first": "Yu-Wei", |
|
"middle": [], |
|
"last": "Chao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yunfan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xieyang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Huayi", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jia", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 IEEE Winter Conference on Applications of Computer Vision, WACV '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "381--389", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu-Wei Chao, Yunfan Liu, Xieyang Liu, Huayi Zeng, and Jia Deng. 2018. Learning to detect human- object interactions. In Proceedings of the 2018 IEEE Winter Conference on Applications of Com- puter Vision, WACV '18, pages 381-389, Lake Tahoe, Nevada, USA. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "HICO: A benchmark for recognizing human-object interactions in images", |
|
"authors": [ |
|
{ |
|
"first": "Yu-Wei", |
|
"middle": [], |
|
"last": "Chao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yugeng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiaxuan", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jia", |
|
"middle": [], |
|
"last": "Deng", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the IEEE International Conference on Computer Vision, ICCV '15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1017--1025", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yu-Wei Chao, Zhan Wang, Yugeng He, Jiaxuan Wang, and Jia Deng. 2015. HICO: A benchmark for rec- ognizing human-object interactions in images. In Proceedings of the IEEE International Conference on Computer Vision, ICCV '15, pages 1017-1025, Santiago, Chile. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "UNITER: UNiversal Image-TExt Representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Yen-Chun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linjie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Licheng", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ahmed", |
|
"middle": [ |
|
"El" |
|
], |
|
"last": "Kholy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Faisal", |
|
"middle": [], |
|
"last": "Ahmed", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the European Conference on Computer Vision, ECCV '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "104--120", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. 2020. UNITER: UNiversal Image- TExt Representation learning. In Proceedings of the European Conference on Computer Vision, ECCV '20, pages 104-120, Cham. Springer Inter- national Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "A survey on computational propaganda detection", |
|
"authors": [ |
|
{ |
|
"first": "Giovanni", |
|
"middle": [], |
|
"last": "Da San", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Martino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Cresci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seunghak", |
|
"middle": [], |
|
"last": "Barr\u00f3n-Cede\u00f1o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Roberto", |
|
"middle": [ |
|
"Di" |
|
], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Pietro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, IJCAI '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4826--4832", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.24963/ijcai.2020/672" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Giovanni Da San Martino, Stefano Cresci, Alberto Barr\u00f3n-Cede\u00f1o, Seunghak Yu, Roberto Di Pietro, and Preslav Nakov. 2020. A survey on computa- tional propaganda detection. In Proceedings of the Twenty-Ninth International Joint Conference on Ar- tificial Intelligence, IJCAI '20, pages 4826-4832, Online. IJCAI.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Fine-grained analysis of propaganda in news article", |
|
"authors": [ |
|
{ |
|
"first": "Giovanni", |
|
"middle": [], |
|
"last": "Da San", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Seunghak", |
|
"middle": [], |
|
"last": "Martino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rostislav", |
|
"middle": [], |
|
"last": "Barr\u00f3n-Cede\u00f1o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Petrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing, EMNLP-IJCNLP '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5636--5646", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D19-1565" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Giovanni Da San Martino, Seunghak Yu, Alberto Barr\u00f3n-Cede\u00f1o, Rostislav Petrov, and Preslav Nakov. 2019. Fine-grained analysis of propaganda in news article. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing, EMNLP- IJCNLP '19, pages 5636-5646, Hong Kong, China. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Decompositions of a higher-order tensor in block terms-part ii: Definitions and uniqueness", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Lieven De Lathauwer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "SIAM Journal on Matrix Analysis and Applications", |
|
"volume": "30", |
|
"issue": "3", |
|
"pages": "1033--1066", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Lieven De Lathauwer. 2008. Decompositions of a higher-order tensor in block terms-part ii: Defini- tions and uniqueness. SIAM Journal on Matrix Anal- ysis and Applications, 30(3):1033-1066.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
|
"authors": [ |
|
{ |
|
"first": "Jacob", |
|
"middle": [], |
|
"last": "Devlin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ming-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Toutanova", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4171--4186", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1423" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, NAACL-HLT '19, pages 4171-4186, Min- neapolis, Minnesota, USA. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Detecting propaganda techniques in memes", |
|
"authors": [ |
|
{ |
|
"first": "Dimitar", |
|
"middle": [], |
|
"last": "Dimitrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaden", |
|
"middle": [], |
|
"last": "Bishr Bin Ali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Firoj", |
|
"middle": [], |
|
"last": "Shaar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Alam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Silvestri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Firooz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni Da San", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Martino", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL-IJCNLP '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6603--6617", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.acl-long.516" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dimitar Dimitrov, Bishr Bin Ali, Shaden Shaar, Firoj Alam, Fabrizio Silvestri, Hamed Firooz, Preslav Nakov, and Giovanni Da San Martino. 2021a. De- tecting propaganda techniques in memes. In Pro- ceedings of the 59th Annual Meeting of the Associa- tion for Computational Linguistics and the 11th In- ternational Joint Conference on Natural Language Processing, ACL-IJCNLP '21, pages 6603-6617, Online. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Task 6 at SemEval-2021: Detection of persuasion techniques in texts and images", |
|
"authors": [ |
|
{ |
|
"first": "Dimitar", |
|
"middle": [], |
|
"last": "Dimitrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaden", |
|
"middle": [], |
|
"last": "Bishr Bin Ali", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Firoj", |
|
"middle": [], |
|
"last": "Shaar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Alam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Silvestri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Firooz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni Da San", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Martino", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 15th International Workshop on Semantic Evaluation, SemEval '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Dimitar Dimitrov, Bishr Bin Ali, Shaden Shaar, Firoj Alam, Fabrizio Silvestri, Hamed Firooz, Preslav Nakov, and Giovanni Da San Martino. 2021b. Task 6 at SemEval-2021: Detection of persuasion tech- niques in texts and images. In Proceedings of the 15th International Workshop on Semantic Evalua- tion, SemEval '21, Bangkok, Thailand. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Jakob Uszkoreit, and Neil Houlsby. 2021. An image is worth 16x16 words: Transformers for image recognition at scale", |
|
"authors": [ |
|
{ |
|
"first": "Alexey", |
|
"middle": [], |
|
"last": "Dosovitskiy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lucas", |
|
"middle": [], |
|
"last": "Beyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alexander", |
|
"middle": [], |
|
"last": "Kolesnikov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dirk", |
|
"middle": [], |
|
"last": "Weissenborn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaohua", |
|
"middle": [], |
|
"last": "Zhai", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Unterthiner", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mostafa", |
|
"middle": [], |
|
"last": "Dehghani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthias", |
|
"middle": [], |
|
"last": "Minderer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georg", |
|
"middle": [], |
|
"last": "Heigold", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sylvain", |
|
"middle": [], |
|
"last": "Gelly", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the International", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. 2021. An image is worth 16x16 words: Transformers for image recog- nition at scale. In Proceedings of the International", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "A survey on automatic detection of hate speech in text", |
|
"authors": [ |
|
{ |
|
"first": "Paula", |
|
"middle": [], |
|
"last": "Fortuna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "S\u00e9rgio", |
|
"middle": [], |
|
"last": "Nunes", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "ACM Computing Surveys", |
|
"volume": "51", |
|
"issue": "4", |
|
"pages": "1--30", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Paula Fortuna and S\u00e9rgio Nunes. 2018. A survey on au- tomatic detection of hate speech in text. ACM Com- puting Surveys, 51(4):1-30.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Large-scale adversarial training for vision-and-language representation learning", |
|
"authors": [ |
|
{ |
|
"first": "Zhe", |
|
"middle": [], |
|
"last": "Gan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yen-Chun", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Linjie", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yu", |
|
"middle": [], |
|
"last": "Cheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingjing", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Advances in Neural Information Processing Systems", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "6616--6628", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhe Gan, Yen-Chun Chen, Linjie Li, Chen Zhu, Yu Cheng, and Jingjing Liu. 2020. Large-scale ad- versarial training for vision-and-language represen- tation learning. Advances in Neural Information Processing Systems, 33:6616-6628.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Benchmark dataset of memes with text transcriptions for automatic detection of multi-modal misogynistic content", |
|
"authors": [ |
|
{ |
|
"first": "Francesca", |
|
"middle": [], |
|
"last": "Gasparini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giulia", |
|
"middle": [], |
|
"last": "Rizzi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aurora", |
|
"middle": [], |
|
"last": "Saibene", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elisabetta", |
|
"middle": [], |
|
"last": "Fersini", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2106.08409" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Francesca Gasparini, Giulia Rizzi, Aurora Saibene, and Elisabetta Fersini. 2021. Benchmark dataset of memes with text transcriptions for automatic detection of multi-modal misogynistic content. arXiv:2106.08409.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Visual semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Saurabh", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jitendra", |
|
"middle": [], |
|
"last": "Malik", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1505.04474" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saurabh Gupta and Jitendra Malik. 2015. Visual se- mantic role labeling. arXiv:1505.04474.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Cyberbullying detection: A survey on multilingual techniques", |
|
"authors": [ |
|
{ |
|
"first": "Batoul", |
|
"middle": [], |
|
"last": "Haidar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fadi", |
|
"middle": [], |
|
"last": "Maroun Chamoun", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Yamout", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 European Modelling Symposium, EMS '2016", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "165--171", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1109/EMS.2016.037" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Batoul Haidar, Maroun Chamoun, and Fadi Yamout. 2016. Cyberbullying detection: A survey on multi- lingual techniques. In Proceedings of the 2016 Euro- pean Modelling Symposium, EMS '2016, pages 165- 171, Pisa, Italy. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "A survey on stance detection for mis-and disinformation identification", |
|
"authors": [ |
|
{ |
|
"first": "Momchil", |
|
"middle": [], |
|
"last": "Hardalov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arnav", |
|
"middle": [], |
|
"last": "Arora", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Isabelle", |
|
"middle": [], |
|
"last": "Augenstein", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT '2022", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Momchil Hardalov, Arnav Arora, Preslav Nakov, and Isabelle Augenstein. 2022. A survey on stance de- tection for mis-and disinformation identification. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, NAACL-HLT '2022, Seattle, Washington, USA.", |
|
"links": null |
|
}, |
|
"BIBREF23": { |
|
"ref_id": "b23", |
|
"title": "Deep semantic role labeling: What works and what's next", |
|
"authors": [ |
|
{ |
|
"first": "Luheng", |
|
"middle": [], |
|
"last": "He", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kenton", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "473--483", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/P17-1044" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Luheng He, Kenton Lee, Mike Lewis, and Luke Zettle- moyer. 2017. Deep semantic role labeling: What works and what's next. In Proceedings of the 55th Annual Meeting of the Association for Computa- tional Linguistics, ACL '17, pages 473-483, Van- couver, Canada. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF24": { |
|
"ref_id": "b24", |
|
"title": "The spread of propaganda by coordinated communities on social media", |
|
"authors": [ |
|
{ |
|
"first": "Kristina", |
|
"middle": [], |
|
"last": "Hristakieva", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stefano", |
|
"middle": [], |
|
"last": "Cresci", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni", |
|
"middle": [], |
|
"last": "Da San", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mauro", |
|
"middle": [], |
|
"last": "Martino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Conti", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the 14th International ACM Conference on Web Science, WebSci '2022", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Kristina Hristakieva, Stefano Cresci, Giovanni Da San Martino, Mauro Conti, and Preslav Nakov. 2022. The spread of propaganda by coordinated communities on social media. In Proceedings of the 14th International ACM Conference on Web Science, WebSci '2022, Barcelona, Spain. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF25": { |
|
"ref_id": "b25", |
|
"title": "A survey of offensive language detection for the arabic language", |
|
"authors": [ |
|
{ |
|
"first": "Fatemah", |
|
"middle": [], |
|
"last": "Husain", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ozlem", |
|
"middle": [], |
|
"last": "Uzuner", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "ACM Transactions on Asian and Low-Resource Language Information Processing (TALLIP)", |
|
"volume": "20", |
|
"issue": "1", |
|
"pages": "1--44", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Fatemah Husain and Ozlem Uzuner. 2021. A survey of offensive language detection for the arabic language. ACM Transactions on Asian and Low-Resource Lan- guage Information Processing (TALLIP), 20(1):1- 44.", |
|
"links": null |
|
}, |
|
"BIBREF26": { |
|
"ref_id": "b26", |
|
"title": "Multimodal fusion with recurrent neural networks for rumor detection on microblogs", |
|
"authors": [ |
|
{ |
|
"first": "Zhiwei", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Han", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongdong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiebo", |
|
"middle": [], |
|
"last": "Luo", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the 25th ACM international conference on Multimedia, MM '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "795--816", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiwei Jin, Juan Cao, Han Guo, Yongdong Zhang, and Jiebo Luo. 2017a. Multimodal fusion with recurrent neural networks for rumor detection on microblogs. In Proceedings of the 25th ACM international con- ference on Multimedia, MM '17, pages 795-816, California, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF27": { |
|
"ref_id": "b27", |
|
"title": "Novel visual and statistical image features for microblogs news verification", |
|
"authors": [ |
|
{ |
|
"first": "Zhiwei", |
|
"middle": [], |
|
"last": "Jin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yongdong", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jianshe", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Tian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "IEEE Transactions on Multimedia", |
|
"volume": "19", |
|
"issue": "3", |
|
"pages": "598--608", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zhiwei Jin, Juan Cao, Yongdong Zhang, Jianshe Zhou, and Qi Tian. 2017b. Novel visual and statistical im- age features for microblogs news verification. IEEE Transactions on Multimedia, 19(3):598-608.", |
|
"links": null |
|
}, |
|
"BIBREF28": { |
|
"ref_id": "b28", |
|
"title": "Automated identification of verbally abusive behaviors in online discussions", |
|
"authors": [ |
|
{ |
|
"first": "Srecko", |
|
"middle": [], |
|
"last": "Joksimovic", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ryan", |
|
"middle": [ |
|
"S" |
|
], |
|
"last": "Baker", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jaclyn", |
|
"middle": [], |
|
"last": "Ocumpaugh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Miguel", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "L", |
|
"middle": [], |
|
"last": "Andres", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Tot", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elle", |
|
"middle": [ |
|
"Yuan" |
|
], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shane", |
|
"middle": [], |
|
"last": "Dawson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the Third Workshop on Abusive Language Online", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "36--45", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W19-3505" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Srecko Joksimovic, Ryan S. Baker, Jaclyn Ocumpaugh, Juan Miguel L. Andres, Ivan Tot, Elle Yuan Wang, and Shane Dawson. 2019. Automated identification of verbally abusive behaviors in online discussions. In Proceedings of the Third Workshop on Abusive Language Online, pages 36-45, Florence, Italy. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF29": { |
|
"ref_id": "b29", |
|
"title": "Multi-modal component embedding for fake news detection", |
|
"authors": [ |
|
{ |
|
"first": "Seongku", |
|
"middle": [], |
|
"last": "Kang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junyoung", |
|
"middle": [], |
|
"last": "Hwang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hwanjo", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 14th International Conference on Ubiquitous Information Management and Communication, IMCOM '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--6", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "SeongKu Kang, Junyoung Hwang, and Hwanjo Yu. 2020. Multi-modal component embedding for fake news detection. In Proceedings of the 14th Interna- tional Conference on Ubiquitous Information Man- agement and Communication, IMCOM '20, pages 1-6, Taichung, Taiwan. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF30": { |
|
"ref_id": "b30", |
|
"title": "The hateful memes challenge: competition report", |
|
"authors": [ |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Firooz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Mohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vedanuj", |
|
"middle": [], |
|
"last": "Goswami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "A", |
|
"middle": [], |
|
"last": "Casey", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Peter", |
|
"middle": [], |
|
"last": "Fitzpatrick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Greg", |
|
"middle": [], |
|
"last": "Bull", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tony", |
|
"middle": [], |
|
"last": "Lipstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ron", |
|
"middle": [], |
|
"last": "Nelli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 35th International Conference on Neural Information Processing Systems: Competition and Demonstration Track, NeurIPS '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "344--360", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douwe Kiela, Hamed Firooz, Aravind Mohan, Vedanuj Goswami, Amanpreet Singh, Casey A Fitzpatrick, Peter Bull, Greg Lipstein, Tony Nelli, Ron Zhu, et al. 2021. The hateful memes challenge: competition re- port. In Proceedings of the 35th International Con- ference on Neural Information Processing Systems: Competition and Demonstration Track, NeurIPS '21, pages 344-360, Online.", |
|
"links": null |
|
}, |
|
"BIBREF31": { |
|
"ref_id": "b31", |
|
"title": "Pratik Ringshia, and Davide Testuggine. 2020. The hateful memes challenge: Detecting hate speech in multimodal memes", |
|
"authors": [ |
|
{ |
|
"first": "Douwe", |
|
"middle": [], |
|
"last": "Kiela", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Firooz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aravind", |
|
"middle": [], |
|
"last": "Mohan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Vedanuj", |
|
"middle": [], |
|
"last": "Goswami", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanpreet", |
|
"middle": [], |
|
"last": "Singh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the 34th International Conference on Neural Information Processing Systems, NIPS '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Douwe Kiela, Hamed Firooz, Aravind Mohan, Vedanuj Goswami, Amanpreet Singh, Pratik Ringshia, and Davide Testuggine. 2020. The hateful memes chal- lenge: Detecting hate speech in multimodal memes. In Proceedings of the 34th International Confer- ence on Neural Information Processing Systems, NIPS '20, NY, USA. Curran Associates Inc.", |
|
"links": null |
|
}, |
|
"BIBREF32": { |
|
"ref_id": "b32", |
|
"title": "Benchmarking aggression identification in social media", |
|
"authors": [ |
|
{ |
|
"first": "Ritesh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Atul", |
|
"middle": [], |
|
"last": "Kr", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Ojha", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the First Workshop on Trolling, Aggression and Cyberbullying, TRAC", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--11", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ritesh Kumar, Atul Kr. Ojha, Shervin Malmasi, and Marcos Zampieri. 2018. Benchmarking aggression identification in social media. In Proceedings of the First Workshop on Trolling, Aggression and Cyber- bullying, TRAC'2018, pages 1-11, Santa Fe, New Mexico, USA. Association for Computational Lin- guistics.", |
|
"links": null |
|
}, |
|
"BIBREF33": { |
|
"ref_id": "b33", |
|
"title": "Conditional random fields: Probabilistic models for segmenting and labeling sequence data", |
|
"authors": [ |
|
{ |
|
"first": "John", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Lafferty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Mccallum", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fernando", |
|
"middle": [ |
|
"C N" |
|
], |
|
"last": "Pereira", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2001, |
|
"venue": "Proceedings of the Eighteenth International Conference on Machine Learning, ICML '01", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "282--289", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "John D. Lafferty, Andrew McCallum, and Fernando C. N. Pereira. 2001. Conditional random fields: Probabilistic models for segmenting and labeling sequence data. In Proceedings of the Eighteenth International Conference on Machine Learning, ICML '01, pages 282-289, San Francisco, Califor- nia, USA. Morgan Kaufmann Publishers Inc.", |
|
"links": null |
|
}, |
|
"BIBREF34": { |
|
"ref_id": "b34", |
|
"title": "Semantic role labeling with pretrained language models for known and unknown predicates", |
|
"authors": [ |
|
{ |
|
"first": "Daniil", |
|
"middle": [], |
|
"last": "Larionov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Artem", |
|
"middle": [], |
|
"last": "Shelmanov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Elena", |
|
"middle": [], |
|
"last": "Chistova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ivan", |
|
"middle": [], |
|
"last": "Smirnov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the International Conference on Recent Advances in Natural Language Processing", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "619--628", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.26615/978-954-452-056-4_073" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Daniil Larionov, Artem Shelmanov, Elena Chistova, and Ivan Smirnov. 2019. Semantic role labeling with pretrained language models for known and un- known predicates. In Proceedings of the Interna- tional Conference on Recent Advances in Natural Language Processing, RANLP '2019, pages 619- 628, Varna, Bulgaria. INCOMA Ltd.", |
|
"links": null |
|
}, |
|
"BIBREF35": { |
|
"ref_id": "b35", |
|
"title": "VisualBERT: A simple and performant baseline for vision and language", |
|
"authors": [ |
|
{ |
|
"first": "Liunian Harold", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Da", |
|
"middle": [], |
|
"last": "Yin", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Cho-Jui", |
|
"middle": [], |
|
"last": "Hsieh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai-Wei", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1908.03557" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. 2019. VisualBERT: A simple and performant baseline for vision and lan- guage. arXiv:1908.03557.", |
|
"links": null |
|
}, |
|
"BIBREF36": { |
|
"ref_id": "b36", |
|
"title": "Cross-media structured common space for multimedia event extraction", |
|
"authors": [ |
|
{ |
|
"first": "Manling", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alireza", |
|
"middle": [], |
|
"last": "Zareian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qi", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Spencer", |
|
"middle": [], |
|
"last": "Whitehead", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Di", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ji", |
|
"middle": [], |
|
"last": "Heng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shih-Fu", |
|
"middle": [], |
|
"last": "Chang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, ACL '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2557--2568", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.acl-main.230" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Manling Li, Alireza Zareian, Qi Zeng, Spencer White- head, Di Lu, Heng Ji, and Shih-Fu Chang. 2020. Cross-media structured common space for multime- dia event extraction. In Proceedings of the 58th An- nual Meeting of the Association for Computational Linguistics, ACL '20, pages 2557-2568, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF37": { |
|
"ref_id": "b37", |
|
"title": "RoBERTa: A robustly optimized BERT pretraining approach", |
|
"authors": [ |
|
{ |
|
"first": "Yinhan", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Myle", |
|
"middle": [], |
|
"last": "Ott", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Naman", |
|
"middle": [], |
|
"last": "Goyal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingfei", |
|
"middle": [], |
|
"last": "Du", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mandar", |
|
"middle": [], |
|
"last": "Joshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Danqi", |
|
"middle": [], |
|
"last": "Chen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Omer", |
|
"middle": [], |
|
"last": "Levy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mike", |
|
"middle": [], |
|
"last": "Lewis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Veselin", |
|
"middle": [], |
|
"last": "Stoyanov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1907.11692" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A robustly optimized BERT pretraining approach. arXiv preprint arXiv:1907.11692.", |
|
"links": null |
|
}, |
|
"BIBREF38": { |
|
"ref_id": "b38", |
|
"title": "Abusive language detection on Arabic social media", |
|
"authors": [ |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kareem", |
|
"middle": [], |
|
"last": "Darwish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Walid", |
|
"middle": [], |
|
"last": "Magdy", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the First Workshop on Abusive Language Online, WALO '17", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "52--56", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-3008" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamdy Mubarak, Kareem Darwish, and Walid Magdy. 2017. Abusive language detection on Arabic social media. In Proceedings of the First Workshop on Abusive Language Online, WALO '17, pages 52-56, Vancouver, BC, Canada. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF39": { |
|
"ref_id": "b39", |
|
"title": "Yavuz Selim Kartal, and Javier Beltr\u00e1n. 2022. The CLEF-2022 CheckThat! lab on fighting the COVID-19 infodemic and fake news detection", |
|
"authors": [ |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alberto", |
|
"middle": [], |
|
"last": "Barr\u00f3n-Cede\u00f1o", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni", |
|
"middle": [], |
|
"last": "Da San", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Firoj", |
|
"middle": [], |
|
"last": "Martino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julia", |
|
"middle": [ |
|
"Maria" |
|
], |
|
"last": "Alam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Stru\u00df", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rub\u00e9n", |
|
"middle": [], |
|
"last": "Mandl", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tommaso", |
|
"middle": [], |
|
"last": "M\u00edguez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mucahid", |
|
"middle": [], |
|
"last": "Caselli", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Wajdi", |
|
"middle": [], |
|
"last": "Kutlu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chengkai", |
|
"middle": [], |
|
"last": "Zaghouani", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shaden", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Shaar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamdy", |
|
"middle": [], |
|
"last": "Gautam Kishore Shahi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alex", |
|
"middle": [], |
|
"last": "Mubarak", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nikolay", |
|
"middle": [], |
|
"last": "Nikolov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Babulkov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Advances in Information Retrieval, CLEF '2022", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "416--428", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Preslav Nakov, Alberto Barr\u00f3n-Cede\u00f1o, Giovanni Da San Martino, Firoj Alam, Julia Maria Stru\u00df, Thomas Mandl, Rub\u00e9n M\u00edguez, Tommaso Caselli, Mucahid Kutlu, Wajdi Zaghouani, Chengkai Li, Shaden Shaar, Gautam Kishore Shahi, Hamdy Mubarak, Alex Nikolov, Nikolay Babulkov, Yavuz Selim Kartal, and Javier Beltr\u00e1n. 2022. The CLEF-2022 CheckThat! lab on fighting the COVID-19 infodemic and fake news detection. In Advances in Information Retrieval, CLEF '2022, pages 416-428. Springer International Publishing.", |
|
"links": null |
|
}, |
|
"BIBREF40": { |
|
"ref_id": "b40", |
|
"title": "Jisun An, and Haewoon Kwak. 2021. A survey on predicting the factuality and the bias of news media", |
|
"authors": [ |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Taha", |
|
"middle": [], |
|
"last": "Husrev", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Sencar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2103.12506" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Preslav Nakov, Husrev Taha Sencar, Jisun An, and Hae- woon Kwak. 2021. A survey on predicting the factu- ality and the bias of news media. arXiv:2103.12506.", |
|
"links": null |
|
}, |
|
"BIBREF41": { |
|
"ref_id": "b41", |
|
"title": "Semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Martha", |
|
"middle": [], |
|
"last": "Palmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "Gildea", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nianwen", |
|
"middle": [], |
|
"last": "Xue", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "Synthesis Lectures on Human Language Technologies", |
|
"volume": "3", |
|
"issue": "1", |
|
"pages": "1--103", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martha Palmer, Daniel Gildea, and Nianwen Xue. 2010. Semantic role labeling. Synthesis Lectures on Hu- man Language Technologies, 3(1):1-103.", |
|
"links": null |
|
}, |
|
"BIBREF42": { |
|
"ref_id": "b42", |
|
"title": "MOMENTA: A multimodal framework for detecting harmful memes and their targets", |
|
"authors": [ |
|
{ |
|
"first": "Shraman", |
|
"middle": [], |
|
"last": "Pramanick", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shivam", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitar", |
|
"middle": [], |
|
"last": "Dimitrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Md", |
|
"middle": [ |
|
"Shad" |
|
], |
|
"last": "Akhtar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanmoy", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Findings of the Association for Computational Linguistics, EMNLP (Findings) '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "4439--4455", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.findings-emnlp.379" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shraman Pramanick, Shivam Sharma, Dimitar Dim- itrov, Md. Shad Akhtar, Preslav Nakov, and Tanmoy Chakraborty. 2021. MOMENTA: A multimodal framework for detecting harmful memes and their targets. In Findings of the Association for Compu- tational Linguistics, EMNLP (Findings) '21, pages 4439-4455, Punta Cana, Dominican Republic. Asso- ciation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF43": { |
|
"ref_id": "b43", |
|
"title": "Grounded situation recognition", |
|
"authors": [ |
|
{ |
|
"first": "Sarah", |
|
"middle": [], |
|
"last": "Pratt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luca", |
|
"middle": [], |
|
"last": "Weihs", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aniruddha", |
|
"middle": [], |
|
"last": "Kembhavi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the European Conference on Computer Vision, ECCV '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "314--332", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sarah Pratt, Mark Yatskar, Luca Weihs, Ali Farhadi, and Aniruddha Kembhavi. 2020. Grounded situa- tion recognition. In Proceedings of the European Conference on Computer Vision, ECCV '20, pages 314-332, Online. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF44": { |
|
"ref_id": "b44", |
|
"title": "Exploiting multi-domain visual information for fake news detection", |
|
"authors": [ |
|
{ |
|
"first": "Peng", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tianyun", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Junbo", |
|
"middle": [], |
|
"last": "Guo", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jintao", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the IEEE International Conference on Data Mining, ICDM '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "518--527", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Peng Qi, Juan Cao, Tianyun Yang, Junbo Guo, and Jin- tao Li. 2019. Exploiting multi-domain visual infor- mation for fake news detection. In Proceedings of the IEEE International Conference on Data Mining, ICDM '19, pages 518-527, Beijing, China. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF45": { |
|
"ref_id": "b45", |
|
"title": "SOLID: A Large-Scale Semi-Supervised Dataset for Offensive Language Identification", |
|
"authors": [ |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pepa", |
|
"middle": [], |
|
"last": "Atanasova", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Georgi", |
|
"middle": [], |
|
"last": "Karadzhov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Findings of the Association for Computational Linguistics, ACL-IJCNLP (Findings) '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "915--928", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Sara Rosenthal, Pepa Atanasova, Georgi Karadzhov, Marcos Zampieri, and Preslav Nakov. 2021. SOLID: A Large-Scale Semi-Supervised Dataset for Offen- sive Language Identification. In Findings of the Association for Computational Linguistics, ACL- IJCNLP (Findings) '21, pages 915-928, Online. As- sociation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF46": { |
|
"ref_id": "b46", |
|
"title": "Ram Nevatia, and Aniruddha Kembhavi. 2021. Visual semantic role labeling for video understanding", |
|
"authors": [ |
|
{ |
|
"first": "Arka", |
|
"middle": [], |
|
"last": "Sadhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanmay", |
|
"middle": [], |
|
"last": "Gupta", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "5589--5600", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Arka Sadhu, Tanmay Gupta, Mark Yatskar, Ram Neva- tia, and Aniruddha Kembhavi. 2021. Visual seman- tic role labeling for video understanding. In Pro- ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR '21, pages 5589-5600, Online. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF47": { |
|
"ref_id": "b47", |
|
"title": "A survey on hate speech detection using natural language processing", |
|
"authors": [ |
|
{ |
|
"first": "Anna", |
|
"middle": [], |
|
"last": "Schmidt", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Wiegand", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2017, |
|
"venue": "Proceedings of the Fifth International Workshop on Natural Language Processing for Social Media", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--10", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/W17-1101" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Anna Schmidt and Michael Wiegand. 2017. A survey on hate speech detection using natural language pro- cessing. In Proceedings of the Fifth International Workshop on Natural Language Processing for So- cial Media, pages 1-10, Valencia, Spain. Associa- tion for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF48": { |
|
"ref_id": "b48", |
|
"title": "DISARM: Detecting the victims targeted by harmful memes", |
|
"authors": [ |
|
{ |
|
"first": "Shivam", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shad", |
|
"middle": [], |
|
"last": "Md", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Akhtar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanmoy", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Findings of North American Chapter of the Association for Computational Linguistics, EMNLP (Findings) '22", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shivam Sharma, Md Shad Akhtar, Preslav Nakov, and Tanmoy Chakraborty. 2022a. DISARM: Detecting the victims targeted by harmful memes. In Findings of North American Chapter of the Association for Computational Linguistics, EMNLP (Findings) '22, Seattle, Washington, USA. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF49": { |
|
"ref_id": "b49", |
|
"title": "Detecting and understanding harmful memes: A survey", |
|
"authors": [ |
|
{ |
|
"first": "Shivam", |
|
"middle": [], |
|
"last": "Sharma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Firoj", |
|
"middle": [], |
|
"last": "Alam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Md", |
|
"middle": [ |
|
"Shad" |
|
], |
|
"last": "Akhtar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dimitar", |
|
"middle": [], |
|
"last": "Dimitrov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Giovanni", |
|
"middle": [], |
|
"last": "Da San", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hamed", |
|
"middle": [], |
|
"last": "Martino", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Firooz", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Fabrizio", |
|
"middle": [], |
|
"last": "Halevy", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Silvestri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanmoy", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2022, |
|
"venue": "Proceedings of the 31st International Joint Conference on Artificial Intelligence, IJCAI-ECAI '22", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shivam Sharma, Firoj Alam, Md. Shad Akhtar, Dimitar Dimitrov, Giovanni Da San Martino, Hamed Firooz, Alon Halevy, Fabrizio Silvestri, Preslav Nakov, and Tanmoy Chakraborty. 2022b. Detecting and under- standing harmful memes: A survey. In Proceedings of the 31st International Joint Conference on Artifi- cial Intelligence, IJCAI-ECAI '22, Vienna, Austria.", |
|
"links": null |
|
}, |
|
"BIBREF50": { |
|
"ref_id": "b50", |
|
"title": "Memes in digital culture", |
|
"authors": [ |
|
{ |
|
"first": "Limor", |
|
"middle": [], |
|
"last": "Shifman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Limor Shifman. 2013. Memes in digital culture. MIT press.", |
|
"links": null |
|
}, |
|
"BIBREF51": { |
|
"ref_id": "b51", |
|
"title": "Grounding semantic roles in images", |
|
"authors": [ |
|
{ |
|
"first": "Carina", |
|
"middle": [], |
|
"last": "Silberer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Manfred", |
|
"middle": [], |
|
"last": "Pinkal", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
|
"volume": "18", |
|
"issue": "", |
|
"pages": "2616--2626", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/D18-1282" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Carina Silberer and Manfred Pinkal. 2018. Ground- ing semantic roles in images. In Proceedings of the 2018 Conference on Empirical Methods in Natu- ral Language Processing, EMNLP '18, pages 2616- 2626, Brussels, Belgium. Association for Computa- tional Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF52": { |
|
"ref_id": "b52", |
|
"title": "Very deep convolutional networks for large-scale image recognition", |
|
"authors": [ |
|
{ |
|
"first": "Karen", |
|
"middle": [], |
|
"last": "Simonyan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andrew", |
|
"middle": [], |
|
"last": "Zisserman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the 3rd International Conference on Learning Representations, ICLR '15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Karen Simonyan and Andrew Zisserman. 2015. Very deep convolutional networks for large-scale image recognition. In Proceedings of the 3rd International Conference on Learning Representations, ICLR '15, San Diego, CA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF53": { |
|
"ref_id": "b53", |
|
"title": "SpotFake: A multi-modal framework for fake news detection", |
|
"authors": [ |
|
{ |
|
"first": "Shivangi", |
|
"middle": [], |
|
"last": "Singhal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rajiv Ratn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tanmoy", |
|
"middle": [], |
|
"last": "Shah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ponnurangam", |
|
"middle": [], |
|
"last": "Chakraborty", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shin'ichi", |
|
"middle": [], |
|
"last": "Kumaraguru", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Satoh", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 IEEE fifth international conference on multimedia big data, BigMM '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "39--47", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shivangi Singhal, Rajiv Ratn Shah, Tanmoy Chakraborty, Ponnurangam Kumaraguru, and Shin'ichi Satoh. 2019. SpotFake: A multi-modal framework for fake news detection. In Proceedings of the 2019 IEEE fifth international conference on multimedia big data, BigMM '19, pages 39-47. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF54": { |
|
"ref_id": "b54", |
|
"title": "VL-BERT: pre-training of generic visual-linguistic representations", |
|
"authors": [ |
|
{ |
|
"first": "Weijie", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xizhou", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yue", |
|
"middle": [], |
|
"last": "Cao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bin", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lewei", |
|
"middle": [], |
|
"last": "Lu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Furu", |
|
"middle": [], |
|
"last": "Wei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jifeng", |
|
"middle": [], |
|
"last": "Dai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 8th International Conference on Learning Representations, ICLR '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. 2020. VL-BERT: pre-training of generic visual-linguistic representa- tions. In Proceedings of the 8th International Con- ference on Learning Representations, ICLR '20, Ad- dis Ababa, Ethiopia. OpenReview.net.", |
|
"links": null |
|
}, |
|
"BIBREF55": { |
|
"ref_id": "b55", |
|
"title": "Findings of the shared task on Troll Meme Classification in Tamil", |
|
"authors": [ |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages, Kyiv. Association for Computational Linguistics", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shardul Suryawanshi and Bharathi Raja Chakravarthi. 2021. Findings of the shared task on Troll Meme Classification in Tamil. In Proceedings of the First Workshop on Speech and Language Technologies for Dravidian Languages, Kyiv. Association for Compu- tational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF56": { |
|
"ref_id": "b56", |
|
"title": "Multimodal meme dataset (MultiOFF) for identifying offensive content in image and text", |
|
"authors": [ |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Arcan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Buitelaar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Second Workshop on Trolling, Aggression and Cyberbullying", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "32--41", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shardul Suryawanshi, Bharathi Raja Chakravarthi, Mi- hael Arcan, and Paul Buitelaar. 2020a. Multimodal meme dataset (MultiOFF) for identifying offensive content in image and text. In Proceedings of the Sec- ond Workshop on Trolling, Aggression and Cyber- bullying, pages 32-41, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF57": { |
|
"ref_id": "b57", |
|
"title": "A dataset for troll classification of TamilMemes", |
|
"authors": [ |
|
{ |
|
"first": "Shardul", |
|
"middle": [], |
|
"last": "Suryawanshi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Bharathi Raja Chakravarthi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mihael", |
|
"middle": [], |
|
"last": "Verma", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "John", |
|
"middle": [], |
|
"last": "Arcan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Paul", |
|
"middle": [], |
|
"last": "Philip Mccrae", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Buitelaar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 5th Workshop on Indian Language Data: Resources and Evaluation, WILDRE '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "7--13", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shardul Suryawanshi, Bharathi Raja Chakravarthi, Pranav Verma, Mihael Arcan, John Philip McCrae, and Paul Buitelaar. 2020b. A dataset for troll classi- fication of TamilMemes. In Proceedings of the 5th Workshop on Indian Language Data: Resources and Evaluation, WILDRE '20, pages 7-13, Marseille, France. European Language Resources Association.", |
|
"links": null |
|
}, |
|
"BIBREF58": { |
|
"ref_id": "b58", |
|
"title": "EfficientNet: Rethinking model scaling for convolutional neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Mingxing", |
|
"middle": [], |
|
"last": "Tan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quoc", |
|
"middle": [], |
|
"last": "Le", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the International Conference on Machine Learning, ICLR '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "6105--6114", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mingxing Tan and Quoc Le. 2019. EfficientNet: Re- thinking model scaling for convolutional neural net- works. In Proceedings of the International Confer- ence on Machine Learning, ICLR '19, pages 6105- 6114, CA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF59": { |
|
"ref_id": "b59", |
|
"title": "Detection and fine-grained classification of cyberbullying events", |
|
"authors": [ |
|
{ |
|
"first": "Cynthia", |
|
"middle": [], |
|
"last": "Van Hee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Els", |
|
"middle": [], |
|
"last": "Lefever", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ben", |
|
"middle": [], |
|
"last": "Verhoeven", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Julie", |
|
"middle": [], |
|
"last": "Mennes", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bart", |
|
"middle": [], |
|
"last": "Desmet", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guy", |
|
"middle": [ |
|
"De" |
|
], |
|
"last": "Pauw", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Proceedings of the International Conference Recent Advances in Natural Language Processing, RANLP '15", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "672--680", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cynthia Van Hee, Els Lefever, Ben Verhoeven, Julie Mennes, Bart Desmet, Guy De Pauw, Walter Daele- mans, and Veronique Hoste. 2015. Detection and fine-grained classification of cyberbullying events. In Proceedings of the International Conference Re- cent Advances in Natural Language Processing, RANLP '15, pages 672-680, Hissar, Bulgaria. IN- COMA Ltd. Shoumen, Bulgaria.", |
|
"links": null |
|
}, |
|
"BIBREF60": { |
|
"ref_id": "b60", |
|
"title": "Grounded semantic role labeling", |
|
"authors": [ |
|
{ |
|
"first": "Shaohua", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qiaozi", |
|
"middle": [], |
|
"last": "Gao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changsong", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Song-Chun", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Joyce", |
|
"middle": [], |
|
"last": "Chai", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT '16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "149--159", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shaohua Yang, Qiaozi Gao, Changsong Liu, Caim- ing Xiong, Song-Chun Zhu, and Joyce Chai. 2016. Grounded semantic role labeling. In Proceedings of the 2016 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT '16, pages 149-159, San Diego, California. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF61": { |
|
"ref_id": "b61", |
|
"title": "TI-CNN: Convolutional neural networks for fake news detection", |
|
"authors": [ |
|
{ |
|
"first": "Yang", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Lei", |
|
"middle": [], |
|
"last": "Zheng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jiawei", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Qingcai", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhoujun", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Philip S", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1806.00749" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Yang Yang, Lei Zheng, Jiawei Zhang, Qingcai Cui, Zhoujun Li, and Philip S Yu. 2018. TI-CNN: Con- volutional neural networks for fake news detection. arXiv:1806.00749.", |
|
"links": null |
|
}, |
|
"BIBREF62": { |
|
"ref_id": "b62", |
|
"title": "Situation recognition: Visual semantic role labeling for image understanding", |
|
"authors": [ |
|
{ |
|
"first": "Mark", |
|
"middle": [], |
|
"last": "Yatskar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Luke", |
|
"middle": [], |
|
"last": "Zettlemoyer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ali", |
|
"middle": [], |
|
"last": "Farhadi", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, CVPR '16", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Mark Yatskar, Luke Zettlemoyer, and Ali Farhadi. 2016. Situation recognition: Visual semantic role labeling for image understanding. In Proceedings of the IEEE Conference on Computer Vision and Pat- tern Recognition, CVPR '16, San Juan, PR, USA. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF63": { |
|
"ref_id": "b63", |
|
"title": "Predicting the type and target of offensive posts in social media", |
|
"authors": [ |
|
{ |
|
"first": "Marcos", |
|
"middle": [], |
|
"last": "Zampieri", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shervin", |
|
"middle": [], |
|
"last": "Malmasi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Preslav", |
|
"middle": [], |
|
"last": "Nakov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sara", |
|
"middle": [], |
|
"last": "Rosenthal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Noura", |
|
"middle": [], |
|
"last": "Farra", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ritesh", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1415--1420", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/N19-1144" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Marcos Zampieri, Shervin Malmasi, Preslav Nakov, Sara Rosenthal, Noura Farra, and Ritesh Kumar. 2019. Predicting the type and target of offensive posts in social media. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL '19, pages 1415- 1420, Minneapolis, Minnesota, USA. Association for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF64": { |
|
"ref_id": "b64", |
|
"title": "On the origins of memes by means of fringe web communities", |
|
"authors": [ |
|
{ |
|
"first": "Savvas", |
|
"middle": [], |
|
"last": "Zannettou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tristan", |
|
"middle": [], |
|
"last": "Caulfield", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeremy", |
|
"middle": [], |
|
"last": "Blackburn", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emiliano", |
|
"middle": [], |
|
"last": "De Cristofaro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Sirivianos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gianluca", |
|
"middle": [], |
|
"last": "Stringhini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Guillermo", |
|
"middle": [], |
|
"last": "Suarez-Tangil", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the Internet Measurement Conference, IMC '18", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "188--202", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Savvas Zannettou, Tristan Caulfield, Jeremy Black- burn, Emiliano De Cristofaro, Michael Sirivianos, Gianluca Stringhini, and Guillermo Suarez-Tangil. 2018. On the origins of memes by means of fringe web communities. In Proceedings of the Internet Measurement Conference, IMC '18, pages 188-202, Boston, USA. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF65": { |
|
"ref_id": "b65", |
|
"title": "Multi-modal knowledgeaware event memory network for social media rumor detection", |
|
"authors": [ |
|
{ |
|
"first": "Huaiwen", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Quan", |
|
"middle": [], |
|
"last": "Fang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shengsheng", |
|
"middle": [], |
|
"last": "Qian", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Changsheng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Proceedings of the 27th ACM International Conference on Multimedia, MM '19", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1942--1951", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.1145/3343031.3350850" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Huaiwen Zhang, Quan Fang, Shengsheng Qian, and Changsheng Xu. 2019. Multi-modal knowledge- aware event memory network for social media rumor detection. In Proceedings of the 27th ACM Inter- national Conference on Multimedia, MM '19, page 1942-1951, Nice, France. ACM.", |
|
"links": null |
|
}, |
|
"BIBREF66": { |
|
"ref_id": "b66", |
|
"title": "SAFE: Similarity-aware multi-modal fake news detection", |
|
"authors": [ |
|
{ |
|
"first": "Xinyi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jindi", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reza", |
|
"middle": [], |
|
"last": "Zafarani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the Pacific-Asia Conference on Knowledge Discovery and Data Mining, PAKDD '20", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "354--367", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xinyi Zhou, Jindi Wu, and Reza Zafarani. 2020. SAFE: Similarity-aware multi-modal fake news de- tection. In Proceedings of the Pacific-Asia Con- ference on Knowledge Discovery and Data Mining, PAKDD '20, pages 354-367, Singapore. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF67": { |
|
"ref_id": "b67", |
|
"title": "A survey of fake news: Fundamental theories, detection methods, and opportunities", |
|
"authors": [ |
|
{ |
|
"first": "Xinyi", |
|
"middle": [], |
|
"last": "Zhou", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Reza", |
|
"middle": [], |
|
"last": "Zafarani", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "53", |
|
"issue": "", |
|
"pages": "1--40", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xinyi Zhou and Reza Zafarani. 2020. A survey of fake news: Fundamental theories, detection methods, and opportunities. ACM Computing Surveys, 53(5):1- 40.", |
|
"links": null |
|
}, |
|
"BIBREF68": { |
|
"ref_id": "b68", |
|
"title": "Racist or sexist meme? Classifying memes beyond hateful", |
|
"authors": [ |
|
{ |
|
"first": "Ignacio", |
|
"middle": [], |
|
"last": "Haris Bin Zia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gareth", |
|
"middle": [], |
|
"last": "Castro", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Tyson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 5th Workshop on Online Abuse and Harms, WOAH '21", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "215--219", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2021.woah-1.23" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Haris Bin Zia, Ignacio Castro, and Gareth Tyson. 2021. Racist or sexist meme? Classifying memes beyond hateful. In Proceedings of the 5th Workshop on On- line Abuse and Harms, WOAH '21, pages 215-219, Online. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"FIGREF0": { |
|
"num": null, |
|
"text": "An example image showing the implicit (Salman Khan) and the explicit entities (from a text perspective) and their roles.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF1": { |
|
"num": null, |
|
"text": "Example with text in BIO format.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"FIGREF2": { |
|
"num": null, |
|
"text": "Diagram of our experimental pipeline.", |
|
"type_str": "figure", |
|
"uris": null |
|
}, |
|
"TABREF1": { |
|
"type_str": "table", |
|
"content": "<table/>", |
|
"num": null, |
|
"text": "", |
|
"html": null |
|
}, |
|
"TABREF3": { |
|
"type_str": "table", |
|
"content": "<table><tr><td>Role</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td></tr></table>", |
|
"num": null, |
|
"text": "E+I, w/o Att. E+I, w/ Att. E+[I+T], w/o Att. E+[I+T], w/ Att. Hero 0.06 0.02 0.03 0.09 0.15 0.12 0.22 0.12 0.15 0.09 0.21 0.12 Villain 0.35 0.44 0.39 0.40 0.51 0.45 0.39 0.54 0.45 0.39 0.54 0.45 Victim 0.30 0.25 0.28 0.33 0.39 0.35 0.23 0.18 0.20 0.31 0.45 0.36 Other 0.86 0.84 0.85 0.88 0.81 0.84 0.87 0.84 0.85 0.89 0.77 0.82", |
|
"html": null |
|
}, |
|
"TABREF4": { |
|
"type_str": "table", |
|
"content": "<table><tr><td/><td/><td>No Aug.</td><td/><td colspan=\"3\">Aug. WordNet</td><td colspan=\"3\">Aug. BERT</td></tr><tr><td>Role</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td></tr><tr><td colspan=\"10\">Hero 0.21 0.12 0.15 0.33 0.21 0.26 0.30 0.25 0.27 Villain 0.36 0.49 0.42 0.41 0.52 0.46 0.39 0.51 0.44 Victim 0.31 0.27 0.29 0.30 0.27 0.29 0.29 0.27 0.28</td></tr><tr><td colspan=\"10\">Other 0.87 0.83 0.85 0.87 0.84 0.86 0.87 0.83 0.85</td></tr></table>", |
|
"num": null, |
|
"text": "Role-level results on the test set with (w/) or without (w/o) attention between the context (text, image) and the entity. (E: Entity, I: Image, Att.: Attention, T: Text)", |
|
"html": null |
|
} |
|
} |
|
} |
|
} |