Benjamin Aw
Add updated pkl file v3
6fa4bc9
{
"paper_id": "S16-1002",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T15:26:59.437269Z"
},
"title": "SemEval-2016 Task 5: Aspect Based Sentiment Analysis",
"authors": [
{
"first": "Maria",
"middle": [],
"last": "Pontiki",
"suffix": "",
"affiliation": {},
"email": "[email protected]."
},
{
"first": "Dimitrios",
"middle": [],
"last": "Galanis",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Haris",
"middle": [],
"last": "Papageorgiou",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Ion",
"middle": [],
"last": "Androutsopoulos",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Suresh",
"middle": [],
"last": "Manandhar",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of York",
"location": {
"country": "UK"
}
},
"email": ""
},
{
"first": "Mohammad",
"middle": [],
"last": "Al-Smadi",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Science and Technology Irbid",
"location": {
"country": "Jordan, Jordan"
}
},
"email": ""
},
{
"first": "Mahmoud",
"middle": [],
"last": "Al-Ayyoub",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of Science and Technology Irbid",
"location": {
"country": "Jordan, Jordan"
}
},
"email": ""
},
{
"first": "Yanyan",
"middle": [],
"last": "Zhao",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Harbin Institute of Technology",
"location": {
"addrLine": "Heilongjiang, P.R. China, 6 LT3",
"settlement": "Harbin"
}
},
"email": ""
},
{
"first": "Bing",
"middle": [],
"last": "Qin",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Harbin Institute of Technology",
"location": {
"addrLine": "Heilongjiang, P.R. China, 6 LT3",
"settlement": "Harbin"
}
},
"email": ""
},
{
"first": "Orph\u00e9e",
"middle": [],
"last": "De Clercq",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "V\u00e9ronique",
"middle": [],
"last": "Hoste",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Marianna",
"middle": [],
"last": "Apidianaki",
"suffix": "",
"affiliation": {
"laboratory": "LIMSI, CNRS, Univ. Paris-Sud",
"institution": "Universit\u00e9 Paris-Saclay",
"location": {
"settlement": "Orsay",
"country": "France"
}
},
"email": ""
},
{
"first": "Xavier",
"middle": [],
"last": "Tannier",
"suffix": "",
"affiliation": {
"laboratory": "LIMSI, CNRS, Univ. Paris-Sud",
"institution": "Universit\u00e9 Paris-Saclay",
"location": {
"settlement": "Orsay",
"country": "France"
}
},
"email": ""
},
{
"first": "Natalia",
"middle": [],
"last": "Loukachevitch",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Lomonosov Moscow State University",
"location": {
"settlement": "Moscow"
}
},
"email": ""
},
{
"first": "Evgeny",
"middle": [],
"last": "Kotelnikov",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Vyatka State University",
"location": {
"addrLine": "Russian Federation, 10 Universitat Pompeu Fabra",
"settlement": "Kirov, Barcelona",
"country": "Spain"
}
},
"email": ""
},
{
"first": "Nuria",
"middle": [],
"last": "Bel",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Salud",
"middle": [],
"last": "Mar\u00eda Jim\u00e9nez-Zafra",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Universidad de Ja\u00e9n",
"location": {
"country": "Spain"
}
},
"email": ""
},
{
"first": "G\u00fcl\u015fen",
"middle": [],
"last": "Eryi\u011fit",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "Istanbul Technical University",
"location": {
"country": "Turkey"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "This paper describes the SemEval 2016 shared task on Aspect Based Sentiment Analysis (ABSA), a continuation of the respective tasks of 2014 and 2015. In its third year, the task provided 19 training and 20 testing datasets for 8 languages and 7 domains, as well as a common evaluation procedure. From these datasets, 25 were for sentence-level and 14 for text-level ABSA; the latter was introduced for the first time as a subtask in SemEval. The task attracted 245 submissions from 29 teams.",
"pdf_parse": {
"paper_id": "S16-1002",
"_pdf_hash": "",
"abstract": [
{
"text": "This paper describes the SemEval 2016 shared task on Aspect Based Sentiment Analysis (ABSA), a continuation of the respective tasks of 2014 and 2015. In its third year, the task provided 19 training and 20 testing datasets for 8 languages and 7 domains, as well as a common evaluation procedure. From these datasets, 25 were for sentence-level and 14 for text-level ABSA; the latter was introduced for the first time as a subtask in SemEval. The task attracted 245 submissions from 29 teams.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Many consumers use the Web to share their experiences about products, services or travel destinations (Yoo and Gretzel, 2008) . Online opinionated texts (e.g., reviews, tweets) are important for consumer decision making (Chevalier and Mayzlin, 2006) and constitute a source of valuable customer feedback that can help companies to measure satisfaction and improve their products or services. In this setting, Aspect Based Sentiment Analysis (ABSA) -i.e., mining opinions from text about specific entities and their aspects (Liu, 2012) -can provide valuable insights to both consumers and businesses. An ABSA method can analyze large amounts of unstructured texts and extract (coarse-or fine-grained) information not included in the user ratings that are available in some review sites (e.g., Fig. 1 ).",
"cite_spans": [
{
"start": 102,
"end": 125,
"text": "(Yoo and Gretzel, 2008)",
"ref_id": "BIBREF38"
},
{
"start": 220,
"end": 249,
"text": "(Chevalier and Mayzlin, 2006)",
"ref_id": "BIBREF11"
},
{
"start": 523,
"end": 534,
"text": "(Liu, 2012)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [
{
"start": 792,
"end": 798,
"text": "Fig. 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Sentiment Analysis (SA) touches every aspect (e.g., entity recognition, coreference resolution, negation handling) of Natural Language Processing (Liu, 2012) and as Cambria et al. (2013) mention \"it requires a deep understanding of the explicit and implicit, regular and irregular, and syntactic and semantic language rules\". Within the last few years several SA-related shared tasks have been organized in the context of workshops and conferences focus-ing on somewhat different research problems (Seki et al., 2007; Seki et al., 2008; Seki et al., 2010; Mitchell, 2013; Nakov et al., 2013; Rosenthal et al., 2014; Pontiki et al., 2014; Rosenthal et al., 2015; Ghosh et al., 2015; Pontiki et al., 2015; Mohammad et al., 2016; Recupero and Cambria, 2014; Ruppenhofer et al., 2014; Loukachevitch et al., 2015) . Such competitions provide training datasets and the opportunity for direct comparison of different approaches on common test sets.",
"cite_spans": [
{
"start": 146,
"end": 157,
"text": "(Liu, 2012)",
"ref_id": "BIBREF19"
},
{
"start": 165,
"end": 186,
"text": "Cambria et al. (2013)",
"ref_id": "BIBREF9"
},
{
"start": 498,
"end": 517,
"text": "(Seki et al., 2007;",
"ref_id": "BIBREF31"
},
{
"start": 518,
"end": 536,
"text": "Seki et al., 2008;",
"ref_id": "BIBREF32"
},
{
"start": 537,
"end": 555,
"text": "Seki et al., 2010;",
"ref_id": "BIBREF33"
},
{
"start": 556,
"end": 571,
"text": "Mitchell, 2013;",
"ref_id": "BIBREF22"
},
{
"start": 572,
"end": 591,
"text": "Nakov et al., 2013;",
"ref_id": "BIBREF24"
},
{
"start": 592,
"end": 615,
"text": "Rosenthal et al., 2014;",
"ref_id": "BIBREF28"
},
{
"start": 616,
"end": 637,
"text": "Pontiki et al., 2014;",
"ref_id": "BIBREF25"
},
{
"start": 638,
"end": 661,
"text": "Rosenthal et al., 2015;",
"ref_id": "BIBREF29"
},
{
"start": 662,
"end": 681,
"text": "Ghosh et al., 2015;",
"ref_id": "BIBREF15"
},
{
"start": 682,
"end": 703,
"text": "Pontiki et al., 2015;",
"ref_id": "BIBREF26"
},
{
"start": 704,
"end": 726,
"text": "Mohammad et al., 2016;",
"ref_id": "BIBREF23"
},
{
"start": 727,
"end": 754,
"text": "Recupero and Cambria, 2014;",
"ref_id": "BIBREF27"
},
{
"start": 755,
"end": 780,
"text": "Ruppenhofer et al., 2014;",
"ref_id": "BIBREF30"
},
{
"start": 781,
"end": 808,
"text": "Loukachevitch et al., 2015)",
"ref_id": "BIBREF20"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Currently, most of the available SA-related datasets, whether released in the context of shared tasks or not (Socher et al., 2013; Ganu et al., 2009) , are monolingual and usually focus on English texts. Multilingual datasets (Klinger and Cimiano, 2014; Jim\u00e9nez-Zafra et al., 2015) provide additional benefits enabling the development and testing of crosslingual methods (Lambert, 2015) . Following this direction, this year the SemEval ABSA task provided datasets in a variety of languages.",
"cite_spans": [
{
"start": 109,
"end": 130,
"text": "(Socher et al., 2013;",
"ref_id": "BIBREF35"
},
{
"start": 131,
"end": 149,
"text": "Ganu et al., 2009)",
"ref_id": "BIBREF14"
},
{
"start": 226,
"end": 253,
"text": "(Klinger and Cimiano, 2014;",
"ref_id": "BIBREF17"
},
{
"start": 254,
"end": 281,
"text": "Jim\u00e9nez-Zafra et al., 2015)",
"ref_id": "BIBREF16"
},
{
"start": 371,
"end": 386,
"text": "(Lambert, 2015)",
"ref_id": "BIBREF18"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "ABSA was introduced as a shared task for the first time in the context of SemEval in 2014; SemEval-2014 Task 4 1 (SE-ABSA14) provided datasets of English reviews annotated at the sentence level with aspect terms (e.g., \"mouse\", \"pizza\") and their polarity for the laptop and restaurant domains, as well as coarser aspect categories (e.g., \"food\") and their polarity only for restaurants (Pontiki et al., 2014) . SemEval-2015 Task 12 2 (SE-ABSA15) built upon SE-ABSA14 and consolidated its subtasks into a unified framework in which all the identified constituents of the expressed opinions (i.e., aspects, opinion target expressions and sentiment polarities) meet a set of guidelines and are linked to each other within sentence-level tuples (Pontiki et al., 2015) . These tuples are important since they indicate the part of text within which a specific opinion is expressed. However, a user might also be interested in the overall rating of the text towards a particular aspect. Such ratings can be used to estimate the mean sentiment per aspect from multiple reviews (McAuley et al., 2012) . Therefore, in addition to sentence-level annotations, SE-ABSA16 3 accommodated also text-level ABSA annotations and provided the respective training and testing data. Fur-1 http://alt.qcri.org/semeval2014/task4/ 2 http://alt.qcri.org/semeval2015/task12/ 3 http://alt.qcri.org/semeval2016/task5/ thermore, the SE-ABSA15 annotation framework was extended to new domains and applied to languages other than English (Arabic, Chinese, Dutch, French, Russian, Spanish, and Turkish) .",
"cite_spans": [
{
"start": 387,
"end": 409,
"text": "(Pontiki et al., 2014)",
"ref_id": "BIBREF25"
},
{
"start": 742,
"end": 764,
"text": "(Pontiki et al., 2015)",
"ref_id": "BIBREF26"
},
{
"start": 1070,
"end": 1092,
"text": "(McAuley et al., 2012)",
"ref_id": "BIBREF21"
},
{
"start": 1507,
"end": 1570,
"text": "(Arabic, Chinese, Dutch, French, Russian, Spanish, and Turkish)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The remainder of this paper is organized as follows: the task set-up is described in Section 2. Section 3 provides information about the datasets and the annotation process, while Section 4 presents the evaluation measures and the baselines. General information about participation in the task is provided in Section 5. The evaluation scores of the participating systems are presented and discussed in Section 6. The paper concludes with an overall assessment of the task.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "The SE-ABSA16 task consisted of the following subtasks and slots. Participants were free to choose the subtasks, slots, domains and languages they wished to participate in.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Task Description",
"sec_num": "2"
},
{
"text": "Subtask 1 (SB1): Sentence-level ABSA. Given an opinionated text about a target entity, identify all the opinion tuples with the following types (tuple slots) of information:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Task Description",
"sec_num": "2"
},
{
"text": "\u2022 Slot1: Aspect Category. Identification of the entity E and attribute A pairs towards which an opinion is expressed in a given sentence. E and A should be chosen from predefined inventories 4 of entity types (e.g., \"restaurant\", \"food\") and attribute labels (e.g., \"price\", \"quality\").",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Task Description",
"sec_num": "2"
},
{
"text": "\u2022 Slot2: Opinion Target Expression (OTE). Extraction of the linguistic expression used in the given text to refer to the reviewed entity E of each E#A pair. The OTE is defined by its starting and ending offsets. When there is no explicit mention of the entity, the slot takes the value \"null\". The identification of Slot2 values was required only in the restaurants, hotels, museums and telecommunications domains.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Task Description",
"sec_num": "2"
},
{
"text": "\u2022 Slot3: Sentiment Polarity. Each identified E#A pair has to be assigned one of the following polarity labels: \"positive\", \"negative\", \"neutral\" (mildly positive or mildly negative). An example of opinion tuples with Slot1-3 values from the restaurants domain is shown below: \"Their sake list was extensive, but we were looking for Purple Haze, which wasn't listed but made for us upon request!\" \u2192 {cat: \"drinks#style_options\", trg: \"sake list\", fr: \"6\", to: \"15\", pol: \"positive\"}, {cat: \"service#general\", trg: \"null\", fr: \"0\", to: \"0\", pol: \"positive\"}. The variable cat indicates the aspect category (Slot1), pol the polarity (Slot3), and trg the ote (Slot2); f r, to are the starting/ending offsets of ote.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Task Description",
"sec_num": "2"
},
{
"text": "Subtask 2 (SB2): Text-level ABSA. Given a customer review about a target entity, the goal was to identify a set of {cat, pol} tuples that summarize the opinions expressed in the review. cat can be assigned the same values as in SB1 (E#A tuple), while pol can be set to \"positive\", \"negative\", \"neutral\", or \"conflict\". For example, for the review text \"The So called laptop Runs to Slow and I hate it! Do not buy it! It is the worst laptop ever \", a system should return the following opinion tuples: {cat: \"laptop#general\", pol: \"negative\"}, {cat: \"laptop#operation_performance\", pol: \"negative\"} .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Task Description",
"sec_num": "2"
},
{
"text": "Subtask 3 (SB3): Out-of-domain ABSA. In SB3 participants had the opportunity to test their systems in domains for which no training data was made available; the domains remained unknown until the start of the evaluation period. Test data for SB3 were provided only for the museums domain in French.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Task Description",
"sec_num": "2"
},
{
"text": "A total of 39 datasets were provided in the context of the SE-ABSA16 task; 19 for training and 20 for testing. The texts were from 7 domains and 8 languages; English (en), Arabic (ar), Chinese (ch), Dutch (du), French (fr), Russian (ru), Spanish (es) and Turkish (tu). The datasets for the domains of restaurants (rest), laptops (lapt), mobile phones (phns), digital cameras (came), hotels (hote) and museums (muse) consist of customer reviews, whilst the telecommunication domain (telc) data consists of tweets. A total of 70790 manually annotated ABSA tuples were provided for training and testing; sentencelevel annotations (SB1) in 8 languages for 7 domains, and 23136 text-level annotations (SB2) in 6 languages for 3 domains. Table 1 provides more information on the distribution of texts, sentences and annotated tuples per dataset.",
"cite_spans": [],
"ref_spans": [
{
"start": 732,
"end": 739,
"text": "Table 1",
"ref_id": "TABREF1"
}
],
"eq_spans": [],
"section": "Data Collection and Annotation",
"sec_num": "3.1"
},
{
"text": "The rest, hote, and lapt datasets were annotated at the sentence-level (SB1) following the respective annotation schemas of SE-ABSA15 (Pontiki et al., 2015) . Below are examples 5 of annotated sentences for the aspect category \"service#general\" in en (1), du (2), fr (3), ru (4), es (5), and tu (6) for the rest domain and in ar (7) for the hote domain:",
"cite_spans": [
{
"start": 134,
"end": 156,
"text": "(Pontiki et al., 2015)",
"ref_id": "BIBREF26"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Data Collection and Annotation",
"sec_num": "3.1"
},
{
"text": "1. Service was slow, but the people were friendly. \u2192 {trg: \"Service\", pol: \"negative\"}, {trg: \"people\", pol: \"positive\"} 2. Snelle bediening en vriendelijk personeel moet ook gemeld worden!! \u2192 {trg: \"bediening\", pol: \"positive\"}, {trg: \"personeel\", pol: \"positive\"} 3. Le service est impeccable, personnel agr\u00e9able. \u2192 {trg: \"service\" , pol: \"positive\"}, {trg: \"personnel\", pol: \"positive\"} 4. \u041f\u0440\u043e \u0441\u0435\u0440\u0432\u0438\u0441 \u043d\u0438\u0447\u0435\u0433\u043e \u043d\u0435\u0433\u0430\u0442\u0438\u0432\u043d\u043e\u0433\u043e \u043d\u0435 \u0441\u043a\u0430\u0436\u0435\u0448\u044c\u0431\u044b\u0441\u0442\u0440\u043e \u043f\u043e\u0434\u0445\u043e\u0434\u044f\u0442, \u0432\u0441\u0435 \u0443\u043b\u044f\u0431\u0430\u044e\u0442\u0441\u044f, \u043f\u043e\u0434\u0445\u043e\u0434\u044f\u0442 \u0441\u043f\u0440\u0430\u0448\u0438\u0432\u0430\u044e\u0442, \u0432\u0441\u0451 \u043b\u0438 \u043d\u0440\u0430\u0432\u0438\u0442\u0441\u044f. \u2192 {trg: \"\u0441\u0435\u0440\u0432\u0438\u0441\", pol: \"neutral\" } 5. Tambi\u00e9n la rapidez en el servicio. \u2192 {trg: \"servicio\", pol: \"positive\" } 6. Servisi h\u0131zl\u0131 valesi var. \u2192 {trg: \"Servisi\", pol: \"positive\"} 7. .. \u202b\ufeb3\ufeae\ufbfe\ufecc\ufe94\u202c \u202b\u0648\u202c \u202b\ufe9f\ufeaa\u0627\u202c \u202b\ufe9f\ufbff\ufeaa\u0629\u202c \u202b\u0627\ufedf\ufea8\ufeaa\ufee3\ufe94\u202c \u2192 {trg: \u202b\"\u0627\ufedf\ufea8\ufeaa\ufee3\ufe94\"\u202c , pol: \"positive\"} The lapt annotation schema was extended to two other domains of consumer electronics, came and phns. Examples of annotated sentences in the lapt (en), phns (du and ch) and came (ch) domains are shown below:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data Collection and Annotation",
"sec_num": "3.1"
},
{
"text": "1. It is extremely portable and easily connects to WIFI at the library and elsewhere. \u2192 {cat: \"laptop#portability\", pol: \"positive\"} , {cat: \"laptop#connectivity\", pol: \"positive\"} 2. Apps starten snel op en werken vlot, internet gaat prima.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data Collection and Annotation",
"sec_num": "3.1"
},
{
"text": "\u2192 {cat: \"software#operation_performance\", pol: \"positive\"}, {cat: \"phone#connectivity\", pol: \"positive\"} 5 The offsets of the opinion target expressions are omitted.",
"cite_spans": [
{
"start": 105,
"end": 106,
"text": "5",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Data Collection and Annotation",
"sec_num": "3.1"
},
{
"text": "3. \u5f53\u7136\u5c4f\u5e55\u8fd9\u4e48\u597d \u2192{cat: \"display#quality\", pol: \"positive\"} 4. \u66f4 \u8f7b \u4fbf \u7684 \u673a \u8eab \u4e5f \u4fbf \u4e8e \u643a \u5e26\u3002\u2192 {cat: \"camera# portability\", pol: \"positive\"}",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data Collection and Annotation",
"sec_num": "3.1"
},
{
"text": "In addition, the SE-ABSA15 framework was extended to two new domains for which annotation guidelines were compiled: telc for tu and muse for fr. Below are two examples:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data Collection and Annotation",
"sec_num": "3.1"
},
{
"text": "1. #Internet kopuyor s\u00fcrekli :( @turkcell \u2192 {cat:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data Collection and Annotation",
"sec_num": "3.1"
},
{
"text": "\"internet#coverage\", trg: \"Internet\", pol: \"positive\"} 2. 5\u20ac pour les \u00e9tudiants, \u00e7a vaut le coup. \u2192 {cat: \"museum#prices\", \"null\", \"positive\"}",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data Collection and Annotation",
"sec_num": "3.1"
},
{
"text": "The text-level (SB2) annotation task was based on the sentence-level annotations; given a customer review about a target entity (e.g., a restaurant) that included sentence-level annotations of ABSA tuples, the goal was to identify a set of {cat, pol} tuples that summarize the opinions expressed in it. This was not a simple summation/aggregation of the sentence-level annotations since an aspect may be discussed with different sentiment in different parts of the review. In such cases the dominant sentiment had to be identified. In case of conflicting opinions where the dominant sentiment was not clear, the \"conflict\" label was assigned. In addition, each review was assigned an overall sentiment label about the target entity (e.g., \"restaurant#general\", \"laptop#general\"), even if it was not included in the sentence-level annotations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Data Collection and Annotation",
"sec_num": "3.1"
},
{
"text": "All datasets for each language were prepared by one or more research groups as shown in Table 2 . The en, du, fr, ru and es datasets were annotated using brat (Stenetorp et al., 2012) , a web-based annotation tool, which was configured appropriately for the needs of the task. The tu datasets were annotated using a customized version of turksent (Eryigit et al., 2013) , a sentiment annotation tool for social media. For the ar and the ch data in-house tools 6 were used. Below are some further details about the annotation process for each language.",
"cite_spans": [
{
"start": 159,
"end": 183,
"text": "(Stenetorp et al., 2012)",
"ref_id": "BIBREF36"
},
{
"start": 347,
"end": 369,
"text": "(Eryigit et al., 2013)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [
{
"start": 88,
"end": 95,
"text": "Table 2",
"ref_id": "TABREF3"
}
],
"eq_spans": [],
"section": "Annotation Process",
"sec_num": "3.2"
},
{
"text": "English. The SE-ABSA15 (Pontiki et al., 2015) training and test datasets (with some minor corrections) were merged and provided for training (rest and lapt domains). New data was collected and annotated from scratch for testing. In a first phase, the rest test data was annotated by an experienced 7 linguist (annotator A), and the lapt data by 5 undergraduate computer science students. The resulting annotations for both domains were then inspected and corrected (if needed) by a second expert linguist, one of the task organizers (annotator B). Borderline cases were resolved collaboratively by annotators A and B.",
"cite_spans": [
{
"start": 23,
"end": 45,
"text": "(Pontiki et al., 2015)",
"ref_id": "BIBREF26"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Annotation Process",
"sec_num": "3.2"
},
{
"text": "Arabic. The hote dataset was annotated in repeated cycles. In a first phase, the data was annotated by three native Arabic speakers, all with a computer science background; then the output was validated by a senior researcher, one of the task organizers. If needed (e.g. when inconsistencies were found) they were given back to the annotators.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Annotation Process",
"sec_num": "3.2"
},
{
"text": "Chinese. The datasets presented by Zhao et al. (2015) were re-annotated by three native Chinese speakers according to the SE-ABSA16 annotation schema and were provided for training and testing (phns and came domains).",
"cite_spans": [
{
"start": 35,
"end": 53,
"text": "Zhao et al. (2015)",
"ref_id": "BIBREF39"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Annotation Process",
"sec_num": "3.2"
},
{
"text": "Dutch. The rest and phns datasets (De Clercq and Hoste, 2016) were initially annotated by a trained linguist, native speaker of Dutch. Then, the output was verified by another Dutch linguist and disagreements were resolved between them. Fi-7 Also annotator for SE-ABSA14 and 15. nally, the task organizers inspected collaboratively all the annotated data and corrections were made when needed.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Annotation Process",
"sec_num": "3.2"
},
{
"text": "French. The train (rest) and test (rest, muse) datasets were annotated from scratch by a linguist, native speaker of French. When the annotator was not confident, a decision was made collaboratively with the organizers. In a second phase, the task organizers checked all the annotations for mistakes and inconsistencies and corrected them, when necessary. For more information on the French datasets consult Apidianaki et al. (2016).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Annotation Process",
"sec_num": "3.2"
},
{
"text": "The rest datasets of the SentiRuEval-2015 task (Loukachevitch et al., 2015) were automatically converted to the SE-ABSA16 annotation schema; then a linguist, native speaker of Russian, checked them and added missing information. Finally, the datasets were inspected by a second linguist annotator (also native speaker of Russian) for mistakes and inconsistencies, which were resolved along with one of the task organizers.",
"cite_spans": [
{
"start": 47,
"end": 75,
"text": "(Loukachevitch et al., 2015)",
"ref_id": "BIBREF20"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Russian.",
"sec_num": null
},
{
"text": "Spanish. Initially, 50 texts (134 sentences) from the whole available data were annotated by 4 annotators. The inter-anotator agreement (IAA) in terms of F-1 was 91% for the identification of OTE, 88% for the aspect category detection (E#A pair), and 80% for opinion tuples extraction (E#A, OTE, polarity). Provided that the IAA was substantially high for all slots, the rest of the data was divided into 4 parts and each one was annotated by a different native Spanish speakers (2 linguists and 2 software engineers). Subsequently, the resulting annotations were validated and corrected (if needed) by the task organizers.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Russian.",
"sec_num": null
},
{
"text": "Turkish. The telc dataset was based on the data used in (Y\u0131ld\u0131r\u0131m et al., 2015) , while the rest dataset was created from scratch. Both datasets were annotated simultaneously by two linguists. Then, one of the organizers validated/inspected the resulting annotations and corrected them when needed.",
"cite_spans": [
{
"start": 56,
"end": 79,
"text": "(Y\u0131ld\u0131r\u0131m et al., 2015)",
"ref_id": "BIBREF37"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Russian.",
"sec_num": null
},
{
"text": "Similarly to SE-ABSA14 and SE-ABSA15, the datasets 8 of SE-ABSA16 were provided in an XML format and they are available under specific license terms through META-SHARE 9 , a repository devoted to the sharing and dissemination of language resources (Piperidis, 2012).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Datasets Format and Availability",
"sec_num": "3.3"
},
{
"text": "The evaluation ran in two phases. In the first phase (Phase A), the participants were asked to return separately the aspect categories (Slot1), the OTEs (Slot2), and the {Slot1, Slot2} tuples for SB1. For SB2 the respective text-level categories had to be identified. In the second phase (Phase B), the gold annotations for the test sets of Phase A were provided and participants had to return the respective sentiment polarity values (Slot3). Similarly to SE-ABSA15, F-1 scores were calculated for Slot1, Slot2 and {Slot1, Slot2} tuples, by comparing the annotations that a system returned to the gold annotations (using micro-averaging). For Slot1 evaluation, duplicate occurrences of categories were ignored in both SB1 and SB2. For Slot2, the calculation for each sentence considered only distinct targets and discarded \"null\" targets, since they do not correspond to explicit mentions. To evaluate sentiment polarity classification (Slot3) in Phase B, we calculated the accuracy of each system, defined as the number of correctly predicted polarity labels of the (gold) aspect categories, divided by the total number of the gold aspect categories. Furthermore, we implemented and provided baselines for all slots of SB1 and SB2. In particular, the SE-ABSA15 baselines that were implemented for the English language 8 The data are available at: http://metashare.ilsp. gr:8080/repository/search/?q=semeval+2016 9 META-SHARE (http://www.metashare.org/) was implemented in the framework of the META-NET Network of Excellence (http://www.meta-net.eu/). (Pontiki et al., 2015) , were adapted for the other languages by using appropriate stopword lists and tokenization functions. The baselines are briefly discussed below: SB1-Slot1: For category (E#A) extraction, a Support Vector Machine (SVM) with a linear kernel is trained. In particular, n unigram features are extracted from the respective sentence of each tuple that is encountered in the training data. The category value (e.g., \"service#general\") of the tuple is used as the correct label of the feature vector. Similarly, for each test sentence s, a feature vector is built and the trained SVM is used to predict the probabilities of assigning each possible category to s (e.g., {\"service#general\", 0.2}, {\"restaurant#general\", 0.4}. Then, a threshold 10 t is used to decide which of the categories will be assigned 11 to s. As features, we use the 1,000 most frequent unigrams of the training data excluding stopwords. SB1-Slot2: The baseline uses the training reviews to create for each category c (e.g., \"service#general\") a list of OTEs (e.g., \"service#general\" \u2192 {\"staff\", \"waiter\"}). These are extracted from the (training) opinion tuples whose category value is c . Then, given a test sentence s and an assigned category c, the baseline finds in s the first occurrence of each OTE of c's list. The OTE slot is filled with the first of the target occurrences found in s. If no target occurrences are found, the slot is assigned the value \"null\". SB1-Slot3: For polarity prediction we trained a SVM classifier with a linear kernel. Again, as in Slot1, n unigram features are extracted from the respective sentence of each tuple of the training data. In addition, an integer-valued feature 12 that indicates the category of the tuple is used. The correct label for the extracted training feature vector is the corresponding polarity value (e.g., \"positive\"). Then, for each tuple {category, OTE} of a test sentence s, a feature vector is built and classified using the trained SVM.",
"cite_spans": [
{
"start": 1320,
"end": 1321,
"text": "8",
"ref_id": null
},
{
"start": 1553,
"end": 1575,
"text": "(Pontiki et al., 2015)",
"ref_id": "BIBREF26"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation Measures and Baselines",
"sec_num": "4"
},
{
"text": "SB2-Slot1: The sentence-level tuples returned by the SB1 baseline are copied to the text level and duplicates are removed. ",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation Measures and Baselines",
"sec_num": "4"
},
{
"text": "For each text-level aspect category c the baseline traverses the predicted sentence-level tuples of the same category returned by the respective SB1 baseline and counts the polarity labels (positive, negative, neutral). Finally, the polarity label with the highest frequency is assigned to the textlevel category c. If there are no sentence-level tuples for the same c, the polarity label is determined based on all tuples regardless of c.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "SB2-Slot3:",
"sec_num": null
},
{
"text": "The baseline systems and evaluation scripts are implemented in Java and are available for download from the SE-ABSA16 website 13 . The LibSVM package 14 (Chang and Lin, 2011) is used for SVM training and prediction. The scores of the baselines 13 http://alt.qcri.org/semeval2016/task5/index. php?id=data-and-tools 14 http://www.csie.ntu.edu.tw/~cjlin/libsvm/ in the test datasets are presented in Section 6 along with the system scores.",
"cite_spans": [
{
"start": 153,
"end": 174,
"text": "(Chang and Lin, 2011)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "SB2-Slot3:",
"sec_num": null
},
{
"text": "The task attracted in total 245 submissions from 29 teams. The majority of the submissions (216 runs) were for SB1. The newly introduced SB2 attracted 29 submissions from 5 teams in 2 languages (en and sp). Most of the submissions (168) were runs for the rest domain. This was expected, mainly for two reasons; first, the rest classification schema is less fine-grained (complex) compared to the other domains (e.g., lapt). Secondly, this domain was supported for 6 languages enabling also multilingual or language-agnostic approaches. The remaining submissions were distributed as follows: 54 in lapt, 12 in phns, 7 in came and 4 in hote.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Participation",
"sec_num": "5"
},
{
"text": "The evaluation results are presented in Tables 3 (SB1 : rest-en), 4 (SB1: rest-es, fr, ru, du, tu & hote-ar), 6 (SB1: lapt, came, phns), and 7 (SB2) 15 . Each participating team was allowed to submit up to two runs per slot and domain in each phase; one constrained (C), where only the provided training data could be used, and one unconstrained (U), where other resources (e.g., publicly available Language Teams Submissions English 27 156 Arabic 3 4 Chinese 3 14 Dutch 4 16 French 5 13 Russian 5 15 Spanish 6 21 Turkish 3 6 All 29 245 lexica) and additional data of any kind could be used for training. In the latter case, the teams had to report the resources used. Delayed submissions (i.e., runs submitted after the deadline and the release of the gold annotations) are marked with \"*\". As revealed by the results, in both SB1 and SB2 the majority of the systems surpassed the baseline by a small or large margin and, as expected, the unconstrained systems achieved better results than the constrained ones. In SB1, the teams with the highest scores for Slot1 and Slot2 achieved similar F-1 scores (see Table 3 ) in most cases (e.g., en/rest, es/rest, du/rest, fr/rest), which shows that the two slots have a similar level of difficulty. However, as expected, the {Slot1, Slot2} scores were significantly lower since the linking of the target expressions to the corresponding aspects is also required. The highest scores in SB1 for all slots (Slot1, Slot2, {Slot1, Slot2}, Slot3) were achieved in the en/rest; this is probably due to the high participation and to the lower complexity of the rest annotation schema compared to the other domains. If we compare the results for SB1 and SB2, we notice that the SB2 scores for Slot1 are significantly higher (e.g., en/lapt, en/rest, es/rest) even though the respective annotations are for the same (or almost the same) set of texts. This is due to the fact that it is easier to identify whether a whole text discusses an aspect c than finding all the sentences in the text discussing c . On the other hand, for Slot3, the SB2 scores are lower (e.g., en/rest, es/rest, ru/rest, en/lapt) than the respective SB1 scores. This is mainly because an aspect may be discussed at different points in a text and often with different sentiment. In such cases a system has to identify the dominant sentiment, which usually is not trivial.",
"cite_spans": [],
"ref_spans": [
{
"start": 40,
"end": 54,
"text": "Tables 3 (SB1",
"ref_id": "TABREF1"
},
{
"start": 400,
"end": 555,
"text": "Language Teams Submissions English 27 156 Arabic 3 4 Chinese 3 14 Dutch 4 16 French 5 13 Russian 5 15 Spanish 6 21 Turkish 3 6 All",
"ref_id": "TABREF1"
},
{
"start": 1134,
"end": 1141,
"text": "Table 3",
"ref_id": "TABREF4"
}
],
"eq_spans": [],
"section": "Evaluation Results",
"sec_num": "6"
},
{
"text": "In its third year, the SemEval ABSA task provided 19 training and 20 testing datasets, from 7 domains and 8 languages, attracting 245 submissions from 29 teams. The use of the same annotation guidelines for domains addressed in different languages gives the opportunity to experiment also with crosslingual or language-agnostic approaches. In addition, SE-ABSA16 included for the first time a text- level subtask. Future work will address the creation of datasets in more languages and domains and the enrichment of the annotation schemas with other types of SA-related information like topics, events and figures of speech (e.g., irony, metaphor).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusions",
"sec_num": "7"
},
{
"text": "The full inventories of the aspect category labels for each domain are provided in Appendix A.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "The ar annotation tool was developed by the technical team of the Advanced Arabic Text Mining group at Jordan University of Science and Technology. The ch tool was developed by the Research Center for Social Computing and Information Retrieval at Harbin Institute of Technology.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "The threshold t was set to 0.2 for all datasets.11 We use the -b 1 option of LibSVM to obtain probabilities.12 Each E#A pair has been assigned a distinct integer value.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "submitted results for 2 languages, 5 teams submitted results for 3-7 languages, while only one team participated in all languages.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "No submissions were made for sb3-muse-fr & sb1-telctu.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "The authors are grateful to all the annotators and contributors for their valuable support to the task: Konstantina Papanikolaou, Juli Bakagianni, Omar Qwasmeh, Nesreen Alqasem, Areen Magableh, Saja Alzoubi, Bashar Talafha, Zekui Li, Binbin Li, Shengqiu Li, Aaron Gevaert, Els Lefever, C\u00e9cile Richart, Pavel Blinov, Maria Shatalova, M. Teresa Mart\u00edn-Valdivia, Pilar Santolaria, Fatih Samet \u00c7etin, Ezgi Y\u0131ld\u0131r\u0131m, Can \u00d6zbey, Leonidas Valavanis, Stavros Giorgis, Dionysios Xenos, Panos Theodor-akakos, and Apostolos Rousas. The work described in this paper is partially funded by the projects EOX GR07/3712 and \"Research Programs for Excellence 2014-2016 / CitySense-ATHENA R.I.C.\". The Arabic track was partially supported by the Jordan University of Science and Technology, Research Grant Number: 20150164. The Dutch track has been partly funded by the PARIS project (IWT-SBO-Nr. 110067). The French track was partially supported by the French National Research Agency under project ANR-12-CORD-0015/TransRead. The Russian track was partially supported by the Russian Foundation for Basic Research (RFBR) according to the research projects No. 14-07-00682a, 16-07-00342a, and No. 16-37-00311mol_a. The Spanish track has been partially supported by a grant from the Ministerio de Educaci\u00f3n, Cultura y Deporte (MECD -scholarship FPU014/00983) and REDES project (TIN2015-65136-C2-1-R) from the Ministerio de Econom\u00eda y Competitividad. The Turkish track was partially supported by TUBITAK-TEYDEB (The Scientific and Technological Research Council of Turkey -Technology and Innovation Funding Programs Directorate) project (grant number: 3140671).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
},
{
"text": "An interesting observation is that, unlike SE-ABSA15, Slot1 (aspect category detection) attracted significantly more submissions than Slot2 (OTE extraction); this may indicate a shift towards conceptlevel approaches. Regarding participation per language, the majority of the submissions (156/245) were for en; see more information in Table 5 . Most teams (20) submitted results only for one language (18 for en and 2 for ru). Of the remaining teams, Appendix A. Aspect inventories for all domains Entity Labels laptop, display, keyboard, mouse, motherboard, cpu, fans_cooling, ports, memory, power_supply optical_drives, battery, graphics, hard_disk, multimedia_devices, hardware, software, os, warranty, shipping, support, company Attribute Labels general, price, quality, design_features, operation_performance, usability, portability, connectivity, miscellaneous Entity Labels phone, display, keyboard, cpu, ports, memory, power_supply, hard_disk, multimedia_devices, battery, hardware, software, os, warranty, shipping, support, company Attribute Labels Same as in Laptops (Table 8 ) with the exception of portability that is included in the design_features label and does not apply as a separate attribute type. camera, display, keyboard, cpu, ports, memory, power_supply, battery, multimedia_devices, hardware, software, os, warranty, shipping, support, company, lens, photo, focus Attribute Labels Same as in Laptops (Table 8 ). ",
"cite_spans": [
{
"start": 1217,
"end": 1387,
"text": "camera, display, keyboard, cpu, ports, memory, power_supply, battery, multimedia_devices, hardware, software, os, warranty, shipping, support, company, lens, photo, focus",
"ref_id": null
}
],
"ref_spans": [
{
"start": 334,
"end": 341,
"text": "Table 5",
"ref_id": null
},
{
"start": 1077,
"end": 1085,
"text": "(Table 8",
"ref_id": null
},
{
"start": 1424,
"end": 1432,
"text": "(Table 8",
"ref_id": null
}
],
"eq_spans": [],
"section": "annex",
"sec_num": null
}
],
"bib_entries": {
"BIBREF8": {
"ref_id": "b8",
"title": "A Dataset for Aspect-Based Sentiment Analysis in French",
"authors": [
{
"first": "Xavier",
"middle": [],
"last": "References Marianna Apidianaki",
"suffix": ""
},
{
"first": "C\u00e9cile",
"middle": [],
"last": "Tannier",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Richart",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the International Conference on Language Resources and Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "References Marianna Apidianaki, Xavier Tannier, and C\u00e9cile Richart. 2016. A Dataset for Aspect-Based Sentiment Analysis in French. In Proceedings of the Interna- tional Conference on Language Resources and Eval- uation.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "New Avenues in Opinion Mining and Sentiment Analysis",
"authors": [
{
"first": "Erik",
"middle": [],
"last": "Cambria",
"suffix": ""
},
{
"first": "W",
"middle": [],
"last": "Bj\u00f6rn",
"suffix": ""
},
{
"first": "Yunqing",
"middle": [],
"last": "Schuller",
"suffix": ""
},
{
"first": "Catherine",
"middle": [],
"last": "Xia",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Havasi",
"suffix": ""
}
],
"year": 2013,
"venue": "IEEE Intelligent Systems",
"volume": "28",
"issue": "2",
"pages": "15--21",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Erik Cambria, Bj\u00f6rn W. Schuller, Yunqing Xia, and Catherine Havasi. 2013. New Avenues in Opinion Mining and Sentiment Analysis. IEEE Intelligent Sys- tems, 28(2):15-21.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "LIBSVM: A library for support vector machines",
"authors": [
{
"first": "Chih-Chung",
"middle": [],
"last": "Chang",
"suffix": ""
},
{
"first": "Chih-Jen",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 2011,
"venue": "ACM TIST",
"volume": "2",
"issue": "3",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Chih-Chung Chang and Chih-Jen Lin. 2011. LIBSVM: A library for support vector machines. ACM TIST, 2(3):27.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "The effect of word of mouth on sales: Online book reviews",
"authors": [
{
"first": "Judith",
"middle": [],
"last": "Chevalier",
"suffix": ""
},
{
"first": "Dina",
"middle": [],
"last": "Mayzlin",
"suffix": ""
}
],
"year": 2006,
"venue": "J. Marketing Res",
"volume": "",
"issue": "",
"pages": "345--354",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Judith Chevalier and Dina Mayzlin. 2006. The effect of word of mouth on sales: Online book reviews. J. Marketing Res, pages 345-354.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Rude waiter but mouthwatering pastries! An exploratory study into Dutch Aspect-Based Sentiment Analysis",
"authors": [
{
"first": "Orph\u00e9e",
"middle": [],
"last": "De Clercq",
"suffix": ""
},
{
"first": "V\u00e9ronique",
"middle": [],
"last": "Hoste",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 10th International Conference on Language Resources and Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Orph\u00e9e De Clercq and V\u00e9ronique Hoste. 2016. Rude waiter but mouthwatering pastries! An exploratory study into Dutch Aspect-Based Sentiment Analysis. In Proceedings of the 10th International Conference on Language Resources and Evaluation.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "TURKSENT: A Sentiment Annotation Tool for Social Media",
"authors": [
{
"first": "G\u00fclsen",
"middle": [],
"last": "Eryigit",
"suffix": ""
},
{
"first": "Fatih",
"middle": [],
"last": "Samet Cetin",
"suffix": ""
},
{
"first": "Meltem",
"middle": [],
"last": "Yan\u0131k",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 7th Linguistic Annotation Workshop and Interoperability",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "G\u00fclsen Eryigit, Fatih Samet Cetin, Meltem Yan\u0131k, Turk- cell Global Bilgi, Tanel Temel, and Ilyas Ci\u00e7ekli. 2013. TURKSENT: A Sentiment Annotation Tool for Social Media. In Proceedings of the 7th Linguistic Annotation Workshop and Interoperability with Dis- course.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Beyond the Stars: Improving Rating Predictions using Review Text Content",
"authors": [
{
"first": "Gayatree",
"middle": [],
"last": "Ganu",
"suffix": ""
},
{
"first": "Noemie",
"middle": [],
"last": "Elhadad",
"suffix": ""
},
{
"first": "Am\u00e9lie",
"middle": [],
"last": "Marian",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of WebDB",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Gayatree Ganu, Noemie Elhadad, and Am\u00e9lie Marian. 2009. Beyond the Stars: Improving Rating Predic- tions using Review Text Content. In Proceedings of WebDB.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "SemEval-2015 Task 11: Sentiment Analysis of Figurative Language in Twitter",
"authors": [
{
"first": "Aniruddha",
"middle": [],
"last": "Ghosh",
"suffix": ""
},
{
"first": "Guofu",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Tony",
"middle": [],
"last": "Veale",
"suffix": ""
},
{
"first": "Paolo",
"middle": [],
"last": "Rosso",
"suffix": ""
},
{
"first": "Ekaterina",
"middle": [],
"last": "Shutova",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Barnden",
"suffix": ""
},
{
"first": "Antonio",
"middle": [],
"last": "Reyes",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 9th International Workshop on Semantic Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Aniruddha Ghosh, Guofu Li, Tony Veale, Paolo Rosso, Ekaterina Shutova, John Barnden, and Antonio Reyes. 2015. SemEval-2015 Task 11: Sentiment Analysis of Figurative Language in Twitter. In Proceedings of the 9th International Workshop on Semantic Evaluation, Denver, Colorado.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "A Multilingual Annotated Dataset for Aspect-Oriented Opinion Mining",
"authors": [
{
"first": "M",
"middle": [],
"last": "Salud",
"suffix": ""
},
{
"first": "Giacomo",
"middle": [],
"last": "Jim\u00e9nez-Zafra",
"suffix": ""
},
{
"first": "Andrea",
"middle": [],
"last": "Berardi",
"suffix": ""
},
{
"first": "Diego",
"middle": [],
"last": "Esuli",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Marcheggiani",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "2533--2538",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Salud M. Jim\u00e9nez-Zafra, Giacomo Berardi, Andrea Esuli, Diego Marcheggiani, Mar\u00eda Teresa Mart\u00edn-Valdivia, and Alejandro Moreo Fern\u00e1ndez. 2015. A Multi- lingual Annotated Dataset for Aspect-Oriented Opin- ion Mining. In Proceedings of Empirical Methods in Natural Language Processing, pages 2533-2538.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "The USAGE Review Corpus for Fine Grained Multi Lingual Opinion Analysis",
"authors": [
{
"first": "Roman",
"middle": [],
"last": "Klinger",
"suffix": ""
},
{
"first": "Philipp",
"middle": [],
"last": "Cimiano",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the Ninth International Conference on Language Resources and Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Roman Klinger and Philipp Cimiano. 2014. The USAGE Review Corpus for Fine Grained Multi Lingual Opin- ion Analysis. In Proceedings of the Ninth Interna- tional Conference on Language Resources and Eval- uation, Reykjavik, Iceland.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Aspect-Level Cross-lingual Sentiment Classification with Constrained SMT",
"authors": [
{
"first": "Patrik",
"middle": [],
"last": "Lambert",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing",
"volume": "",
"issue": "",
"pages": "781--787",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Patrik Lambert. 2015. Aspect-Level Cross-lingual Sen- timent Classification with Constrained SMT. In Pro- ceedings of the Association for Computational Linguis- tics and the International Joint Conference on Natu- ral Language Processing, 2015, Beijing, China, pages 781-787.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Synthesis Lectures on Human Language Technologies",
"authors": [
{
"first": "Bing",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2012,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bing Liu. 2012. Sentiment Analysis and Opinion Mining. Synthesis Lectures on Human Language Technologies. Morgan & Claypool Publishers.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "SentiRuEval: Testing Objectoriented Sentiment Analysis Systems in Russian",
"authors": [
{
"first": "Natalia",
"middle": [],
"last": "Loukachevitch",
"suffix": ""
},
{
"first": "Pavel",
"middle": [],
"last": "Blinov",
"suffix": ""
},
{
"first": "Evgeny",
"middle": [],
"last": "Kotelnikov",
"suffix": ""
},
{
"first": "Yulia",
"middle": [],
"last": "Rubtsova",
"suffix": ""
},
{
"first": "Vladimir",
"middle": [],
"last": "Ivanov",
"suffix": ""
},
{
"first": "Elena",
"middle": [],
"last": "Tutubalina",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of International Conference Dialog",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Natalia Loukachevitch, Pavel Blinov, Evgeny Kotel- nikov, Yulia Rubtsova, Vladimir Ivanov, and Elena Tutubalina. 2015. SentiRuEval: Testing Object- oriented Sentiment Analysis Systems in Russian. In Proceedings of International Conference Dialog.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Learning Attitudes and Attributes from Multiaspect Reviews",
"authors": [
{
"first": "Julian",
"middle": [
"J"
],
"last": "Mcauley",
"suffix": ""
},
{
"first": "Jure",
"middle": [],
"last": "Leskovec",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Jurafsky",
"suffix": ""
}
],
"year": 2012,
"venue": "12th IEEE International Conference on Data Mining, ICDM 2012",
"volume": "",
"issue": "",
"pages": "1020--1025",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Julian J. McAuley, Jure Leskovec, and Dan Jurafsky. 2012. Learning Attitudes and Attributes from Multi- aspect Reviews. In 12th IEEE International Confer- ence on Data Mining, ICDM 2012, Brussels, Belgium, December 10-13, 2012, pages 1020-1025.",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Overview of the TAC2013 Knowledge Base Population Evaluation English Sentiment Slot Filling",
"authors": [
{
"first": "Margaret",
"middle": [],
"last": "Mitchell",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 6th Text Analysis Conference",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Margaret Mitchell. 2013. Overview of the TAC2013 Knowledge Base Population Evaluation English Senti- ment Slot Filling. In Proceedings of the 6th Text Anal- ysis Conference, Gaithersburg, Maryland, USA.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "SemEval-2016 Task 6: Detecting Stance in Tweets",
"authors": [
{
"first": "M",
"middle": [],
"last": "Saif",
"suffix": ""
},
{
"first": "Svetlana",
"middle": [],
"last": "Mohammad",
"suffix": ""
},
{
"first": "Parinaz",
"middle": [],
"last": "Kiritchenko",
"suffix": ""
},
{
"first": "Xiaodan",
"middle": [],
"last": "Sobhani",
"suffix": ""
},
{
"first": "Colin",
"middle": [],
"last": "Zhu",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Cherry",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 10th International Workshop on Semantic Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Saif M Mohammad, Svetlana Kiritchenko, Parinaz Sob- hani, Xiaodan Zhu, and Colin Cherry. 2016. SemEval- 2016 Task 6: Detecting Stance in Tweets. In Proceed- ings of the 10th International Workshop on Semantic Evaluation, San Diego, California.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "Stelios Piperidis. 2012. The META-SHARE Language Resources Sharing Infrastructure: Principles, Challenges, Solutions",
"authors": [
{
"first": "Preslav",
"middle": [],
"last": "Nakov",
"suffix": ""
},
{
"first": "Sara",
"middle": [],
"last": "Rosenthal",
"suffix": ""
},
{
"first": "Zornitsa",
"middle": [],
"last": "Kozareva",
"suffix": ""
},
{
"first": "Veselin",
"middle": [],
"last": "Stoyanov",
"suffix": ""
},
{
"first": "Alan",
"middle": [],
"last": "Ritter",
"suffix": ""
},
{
"first": "Theresa",
"middle": [],
"last": "Wilson",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 8th International Conference on Language Resources and Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Preslav Nakov, Sara Rosenthal, Zornitsa Kozareva, Veselin Stoyanov, Alan Ritter, and Theresa Wilson. 2013. SemEval-2013 Task 2: Sentiment Analysis in Twitter. In Proceedings of the 7th International Work- shop on Semantic Evaluation, Atlanta, Georgia. Stelios Piperidis. 2012. The META-SHARE Language Resources Sharing Infrastructure: Principles, Chal- lenges, Solutions. In Proceedings of the 8th Interna- tional Conference on Language Resources and Evalu- ation.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Harris Papageorgiou, Ion Androutsopoulos, and Suresh Manandhar",
"authors": [
{
"first": "Maria",
"middle": [],
"last": "Pontiki",
"suffix": ""
},
{
"first": "Dimitrios",
"middle": [],
"last": "Galanis",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Pavlopoulos",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 8th International Workshop on Semantic Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Maria Pontiki, Dimitrios Galanis, John Pavlopoulos, Har- ris Papageorgiou, Ion Androutsopoulos, and Suresh Manandhar. 2014. SemEval-2014 Task 4: Aspect Based Sentiment Analysis. In Proceedings of the 8th International Workshop on Semantic Evaluation, Dublin, Ireland.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "SemEval-2015 Task 12: Aspect Based Sentiment Analysis",
"authors": [
{
"first": "Maria",
"middle": [],
"last": "Pontiki",
"suffix": ""
},
{
"first": "Dimitrios",
"middle": [],
"last": "Galanis",
"suffix": ""
},
{
"first": "Harris",
"middle": [],
"last": "Papageorgiou",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 9th International Workshop on Semantic Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Maria Pontiki, Dimitrios Galanis, Harris Papageorgiou, Suresh Manandhar, and Ion Androutsopoulos. 2015. SemEval-2015 Task 12: Aspect Based Sentiment Analysis. In Proceedings of the 9th International Workshop on Semantic Evaluation, Denver, Colorado.",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "Eswc'14 challenge on concept-level sentiment analysis",
"authors": [
{
"first": "Diego",
"middle": [],
"last": "Reforgiato Recupero",
"suffix": ""
},
{
"first": "Erik",
"middle": [],
"last": "Cambria",
"suffix": ""
}
],
"year": 2014,
"venue": "Semantic Web Evaluation Challenge -SemWe-bEval",
"volume": "",
"issue": "",
"pages": "3--20",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Diego Reforgiato Recupero and Erik Cambria. 2014. Eswc'14 challenge on concept-level sentiment analy- sis. In Semantic Web Evaluation Challenge -SemWe- bEval 2014 at ESWC 2014, Anissaras, Crete, Greece, pages 3-20.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "SemEval-2014 Task 4: Sentiment Analysis in Twitter",
"authors": [
{
"first": "Sara",
"middle": [],
"last": "Rosenthal",
"suffix": ""
},
{
"first": "Alan",
"middle": [],
"last": "Ritter",
"suffix": ""
},
{
"first": "Preslav",
"middle": [],
"last": "Nakov",
"suffix": ""
},
{
"first": "Veselin",
"middle": [],
"last": "Stoyanov",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 8th International Workshop on Semantic Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sara Rosenthal, Alan Ritter, Preslav Nakov, and Veselin Stoyanov. 2014. SemEval-2014 Task 4: Sentiment Analysis in Twitter. In Proceedings of the 8th Interna- tional Workshop on Semantic Evaluation, Dublin, Ire- land.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "SemEval-2015 Task 10: Sentiment Analysis in Twitter",
"authors": [
{
"first": "Sara",
"middle": [],
"last": "Rosenthal",
"suffix": ""
},
{
"first": "Preslav",
"middle": [],
"last": "Nakov",
"suffix": ""
},
{
"first": "Svetlana",
"middle": [],
"last": "Kiritchenko",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Saif",
"suffix": ""
},
{
"first": "Alan",
"middle": [],
"last": "Mohammad",
"suffix": ""
},
{
"first": "Veselin",
"middle": [],
"last": "Ritter",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Stoyanov",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 9th International Workshop on Semantic Evaluation",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sara Rosenthal, Preslav Nakov, Svetlana Kiritchenko, Saif M Mohammad, Alan Ritter, and Veselin Stoy- anov. 2015. SemEval-2015 Task 10: Sentiment Anal- ysis in Twitter. In Proceedings of the 9th International Workshop on Semantic Evaluation, Denver, Colorado.",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "IG-GSA Shared Tasks on German Sentiment Analysis (GESTALT)",
"authors": [
{
"first": "Josef",
"middle": [],
"last": "Ruppenhofer",
"suffix": ""
},
{
"first": "Roman",
"middle": [],
"last": "Klinger",
"suffix": ""
},
{
"first": "Julia",
"middle": [
"Maria"
],
"last": "Stru\u00df",
"suffix": ""
},
{
"first": "Jonathan",
"middle": [],
"last": "Sonntag",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Wiegand",
"suffix": ""
}
],
"year": 2014,
"venue": "Workshop Proceedings of the 12th Edition of the KONVENS Conference",
"volume": "",
"issue": "",
"pages": "164--173",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Josef Ruppenhofer, Roman Klinger, Julia Maria Stru\u00df, Jonathan Sonntag, and Michael Wiegand. 2014. IG- GSA Shared Tasks on German Sentiment Analysis (GESTALT). In Workshop Proceedings of the 12th Edition of the KONVENS Conference, pages 164-173.",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "Overview of Opinion Analysis Pilot Task at NTCIR-6",
"authors": [
{
"first": "Yohei",
"middle": [],
"last": "Seki",
"suffix": ""
},
{
"first": "David",
"middle": [
"Kirk"
],
"last": "Evans",
"suffix": ""
},
{
"first": "Lun-Wei",
"middle": [],
"last": "Ku",
"suffix": ""
},
{
"first": "Hsin-Hsi",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Noriko",
"middle": [],
"last": "Kando",
"suffix": ""
},
{
"first": "Chin-Yew",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 6th NTCIR Workshop",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yohei Seki, David Kirk Evans, Lun-Wei Ku, Hsin-Hsi Chen, Noriko Kando, and Chin-Yew Lin. 2007. Overview of Opinion Analysis Pilot Task at NTCIR- 6. In Proceedings of the 6th NTCIR Workshop, Tokyo, Japan.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Overview of Multilingual Opinion Analysis Task at NTCIR-7",
"authors": [
{
"first": "Yohei",
"middle": [],
"last": "Seki",
"suffix": ""
},
{
"first": "David",
"middle": [
"Kirk"
],
"last": "Evans",
"suffix": ""
},
{
"first": "Lun-Wei",
"middle": [],
"last": "Ku",
"suffix": ""
},
{
"first": "Le",
"middle": [],
"last": "Sun",
"suffix": ""
},
{
"first": "Hsin-Hsi",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Noriko",
"middle": [],
"last": "Kando",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 7th NTCIR Workshop",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yohei Seki, David Kirk Evans, Lun-Wei Ku, Le Sun, Hsin-Hsi Chen, and Noriko Kando. 2008. Overview of Multilingual Opinion Analysis Task at NTCIR-7. In Proceedings of the 7th NTCIR Workshop, Tokyo, Japan.",
"links": null
},
"BIBREF33": {
"ref_id": "b33",
"title": "Overview of Multilingual Opinion Analysis Task at NTCIR-8: A Step Toward Cross",
"authors": [
{
"first": "Yohei",
"middle": [],
"last": "Seki",
"suffix": ""
},
{
"first": "Lun-Wei",
"middle": [],
"last": "Ku",
"suffix": ""
},
{
"first": "Le",
"middle": [],
"last": "Sun",
"suffix": ""
},
{
"first": "Hsin-Hsi",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Noriko",
"middle": [],
"last": "Kando",
"suffix": ""
}
],
"year": 2010,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yohei Seki, Lun-Wei Ku, Le Sun, Hsin-Hsi Chen, and Noriko Kando. 2010. Overview of Multilingual Opin- ion Analysis Task at NTCIR-8: A Step Toward Cross",
"links": null
},
"BIBREF34": {
"ref_id": "b34",
"title": "Lingual Opinion Analysis",
"authors": [],
"year": null,
"venue": "Proceedings of the 8th NTCIR Workshop",
"volume": "",
"issue": "",
"pages": "209--220",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lingual Opinion Analysis. In Proceedings of the 8th NTCIR Workshop, Tokyo, Japan, pages 209-220.",
"links": null
},
"BIBREF35": {
"ref_id": "b35",
"title": "Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank",
"authors": [
{
"first": "Richard",
"middle": [],
"last": "Socher",
"suffix": ""
},
{
"first": "Alex",
"middle": [],
"last": "Perelygin",
"suffix": ""
},
{
"first": "Jean",
"middle": [],
"last": "Wu",
"suffix": ""
},
{
"first": "Jason",
"middle": [],
"last": "Chuang",
"suffix": ""
},
{
"first": "Christopher",
"middle": [
"D"
],
"last": "Manning",
"suffix": ""
},
{
"first": "Andrew",
"middle": [
"Y"
],
"last": "Ng",
"suffix": ""
},
{
"first": "Christopher",
"middle": [],
"last": "Potts",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "1631--1642",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Y. Ng, and Christo- pher Potts. 2013. Recursive Deep Models for Semantic Compositionality Over a Sentiment Tree- bank. In Proceedings of Empirical Methods in Natural Language Processing, pages 1631-1642, Stroudsburg, PA.",
"links": null
},
"BIBREF36": {
"ref_id": "b36",
"title": "BRAT: A Web-based Tool for NLP-Assisted Text Annotation",
"authors": [
{
"first": "Pontus",
"middle": [],
"last": "Stenetorp",
"suffix": ""
},
{
"first": "Sampo",
"middle": [],
"last": "Pyysalo",
"suffix": ""
},
{
"first": "Goran",
"middle": [],
"last": "Topic",
"suffix": ""
},
{
"first": "Tomoko",
"middle": [],
"last": "Ohta",
"suffix": ""
},
{
"first": "Sophia",
"middle": [],
"last": "Ananiadou",
"suffix": ""
},
{
"first": "Jun'ichi",
"middle": [],
"last": "Tsujii",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the European Chapter of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "102--107",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pontus Stenetorp, Sampo Pyysalo, Goran Topic, Tomoko Ohta, Sophia Ananiadou, and Jun'ichi Tsujii. 2012. BRAT: A Web-based Tool for NLP-Assisted Text An- notation. In Proceedings of the European Chapter of the Association for Computational Linguistics, pages 102-107.",
"links": null
},
"BIBREF37": {
"ref_id": "b37",
"title": "The impact of nlp on turkish sentiment analysis. T\u00dcRK\u0130YE B\u0130L\u0130\u015e\u0130M VAKFI B\u0130LG\u0130SAYAR B\u0130L\u0130MLER\u0130 ve M\u00dcHEND\u0130SL\u0130\u011e\u0130 DERG\u0130S\u0130",
"authors": [
{
"first": "Ezgi",
"middle": [],
"last": "Y\u0131ld\u0131r\u0131m",
"suffix": ""
}
],
"year": 2015,
"venue": "",
"volume": "7",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ezgi Y\u0131ld\u0131r\u0131m, Fatih Samet \u00c7etin, G\u00fcl\u015fen Eryi\u011fit, and Tanel Temel. 2015. The impact of nlp on turkish sentiment analysis. T\u00dcRK\u0130YE B\u0130L\u0130\u015e\u0130M VAKFI B\u0130LG\u0130SAYAR B\u0130L\u0130MLER\u0130 ve M\u00dcHEND\u0130SL\u0130\u011e\u0130 DERG\u0130S\u0130, 7(1 (Bas\u0131l\u0131 8).",
"links": null
},
"BIBREF38": {
"ref_id": "b38",
"title": "What Motivates Consumers to Write Online Travel Reviews?",
"authors": [
{
"first": "Kyung",
"middle": [
"Hyan"
],
"last": "Yoo",
"suffix": ""
},
{
"first": "Ulrike",
"middle": [],
"last": "Gretzel",
"suffix": ""
}
],
"year": 2008,
"venue": "J. of IT & Tourism",
"volume": "10",
"issue": "4",
"pages": "283--295",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kyung Hyan Yoo and Ulrike Gretzel. 2008. What Moti- vates Consumers to Write Online Travel Reviews? J. of IT & Tourism, 10(4):283-295.",
"links": null
},
"BIBREF39": {
"ref_id": "b39",
"title": "Creating a Fine-Grained Corpus for Chinese Sentiment Analysis",
"authors": [
{
"first": "Yanyan",
"middle": [],
"last": "Zhao",
"suffix": ""
},
{
"first": "Bing",
"middle": [],
"last": "Qin",
"suffix": ""
},
{
"first": "Ting",
"middle": [],
"last": "Liu",
"suffix": ""
}
],
"year": 2015,
"venue": "IEEE Intelligent Systems",
"volume": "30",
"issue": "1",
"pages": "36--43",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yanyan Zhao, Bing Qin, and Ting Liu. 2015. Creating a Fine-Grained Corpus for Chinese Sentiment Analysis. IEEE Intelligent Systems, 30(1):36-43.",
"links": null
}
},
"ref_entries": {
"TABREF1": {
"html": null,
"text": "Datasets provided for SE-ABSA16.",
"type_str": "table",
"num": null,
"content": "<table/>"
},
"TABREF2": {
"html": null,
"text": "Language and Speech Processing, Athena R.C., Athens, Greece Dept. of Informatics, Athens University of Economics and Business, Greece Arabic Computer Science Dept., Jordan University of Science and Technology Irbid, Jordan Chinese Harbin Institute of Technology, Harbin, Heilongjiang, P.R.",
"type_str": "table",
"num": null,
"content": "<table><tr><td>Lang.</td><td>Research team(s)</td></tr><tr><td>English</td><td>Institute for China</td></tr><tr><td>Dutch</td><td>LT3, Ghent University, Ghent, Belgium</td></tr><tr><td>French</td><td>LIMSI, CNRS, Univ. Paris-Sud, Universit\u00e9 Paris-Saclay, Orsay, France</td></tr><tr><td>Russian</td><td>Lomonosov Moscow State University, Moscow, Russian Federation Vyatka State University, Kirov, Russian Federation</td></tr><tr><td>Spanish</td><td>Universitat Pompeu Fabra, Barcelona, Spain SINAI, Universidad de Ja\u00e9n, Spain</td></tr><tr><td colspan=\"2\">Turkish Dept. of Computer Engineering, Istanbul Technical University, Turkey</td></tr><tr><td/><td>Turkcell Global Bilgi, Turkey</td></tr></table>"
},
"TABREF3": {
"html": null,
"text": "Research teams that contributed to the creation of the datasets for each language.",
"type_str": "table",
"num": null,
"content": "<table/>"
},
"TABREF4": {
"html": null,
"text": "English REST results for SB1.",
"type_str": "table",
"num": null,
"content": "<table/>"
},
"TABREF5": {
"html": null,
"text": "Number of participating teams and submitted runs per language.",
"type_str": "table",
"num": null,
"content": "<table/>"
},
"TABREF6": {
"html": null,
"text": "LAPT, CAME, and PHNS results for SB1.",
"type_str": "table",
"num": null,
"content": "<table/>"
},
"TABREF8": {
"html": null,
"text": "Results for SB2.",
"type_str": "table",
"num": null,
"content": "<table/>"
}
}
}
}