|
{ |
|
"paper_id": "S18-1049", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T15:45:26.560692Z" |
|
}, |
|
"title": "CENNLP at SemEval-2018 Task 1: Constrained Vector Space Model in Affects in Tweets", |
|
"authors": [ |
|
{ |
|
"first": "J", |
|
"middle": [ |
|
"R" |
|
], |
|
"last": "Naveen", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Coimbatore Amrita Vishwa Vidyapeetham", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Barathi", |
|
"middle": [], |
|
"last": "Ganesh", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Coimbatore Amrita Vishwa Vidyapeetham", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
}, |
|
{ |
|
"first": "Anand", |
|
"middle": [], |
|
"last": "Kumar", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Coimbatore Amrita Vishwa Vidyapeetham", |
|
"location": { |
|
"country": "India" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "This paper discusses on task 1, \"Affect in Tweets\" sharedtask, conducted in SemEval-2018. This task comprises of various subtasks, which required participants to analyse over different emotions and sentiments based on the provided tweet data and also measure the intensity of these emotions for subsequent subtasks. Our approach is to come up with a model for all the subtasks on count based representation and use machine learning techniques for regression and classification related tasks. In this work, we use bag of words technique for supervised text classification and regression.. Further, fine tuning on various parameters for the bag of word, representation model we acquired better scores over various other baseline models (Vinayan et al.) participated in the sharedtask.", |
|
"pdf_parse": { |
|
"paper_id": "S18-1049", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "This paper discusses on task 1, \"Affect in Tweets\" sharedtask, conducted in SemEval-2018. This task comprises of various subtasks, which required participants to analyse over different emotions and sentiments based on the provided tweet data and also measure the intensity of these emotions for subsequent subtasks. Our approach is to come up with a model for all the subtasks on count based representation and use machine learning techniques for regression and classification related tasks. In this work, we use bag of words technique for supervised text classification and regression.. Further, fine tuning on various parameters for the bag of word, representation model we acquired better scores over various other baseline models (Vinayan et al.) participated in the sharedtask.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "A huge portion of analysis in natural language processing try to find better understand and process various kinds of info in text. Day by day the development of social websites, blogging and the consummation of technologies gives vast amount text data on the internet, which opened a space to study peoples feeling, reviews, and emotion from their own written languages, called sentimental analysis. Sentimental analysis has so many attractions and has done so many research in this area.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Sentiment analysis remains a sequence of techniques, approaches, and tools about sensing and mining subjective info (such as opinion and attitudes) from language (Bravo-Marquez et al., 2014) . Traditional approaches Mohammad et al., 2013) are finding out the polarity of the positive, negative, neutral classification problem Bravo-Marquez et al., 2015) . Recent research in sentimental analysis (Mohammad and are done on the data-driven algorithm view point. But at the same time combination of good linguistic awareness data can increase the performance and insights about the task. We used machine learning techniques to build the model. Linear regression, random forest methods are used respectively for prediction and classification tasks. A mathematical system or an algorithm need some form of numeric representation to work with. The naive way of representing a word in vector form is one hot representation but it is a very ineffective way for representing a large corpus. In a more effective way, we need some semantic similarities to nearby points, thus creating the representation bring beneficial info about the word actual meaning, called word embedding models that are categorized based on count and predictive word embedding models. Both embedding models at least some way share sematic meaning. We used here count based word embedding methods for inputting the word. In more specific, Feature representation is done based on the term-document matrix (TDM) and term frequency-inverse frequency (TFIDF) matrix. The optimum value of n-gram range, depth of classifier, mindf are obtained by hyper parameter tuning.", |
|
"cite_spans": [ |
|
{ |
|
"start": 162, |
|
"end": 190, |
|
"text": "(Bravo-Marquez et al., 2014)", |
|
"ref_id": "BIBREF1" |
|
}, |
|
{ |
|
"start": 216, |
|
"end": 238, |
|
"text": "Mohammad et al., 2013)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 326, |
|
"end": 353, |
|
"text": "Bravo-Marquez et al., 2015)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Dataset provided by shared task was sourced from Twitter API by focusing emotion-correlated words. The tweets were annotated separately for 4 emotions namely anger, joy, fear and sadness. The data provided were annotated with best-worst scaling technique (Kiritchenko and Mohammad, 2016) that gave better annotation consistency and emotion intensity scores for tweets. There were 5 subtasks in task1 . For each sub-tasks, separate training and testing data sets are given for Spanish, English, and Arabic. Subtasks 1 and 3 focused on emotion intensity and sentiment intensity tasks respectively which were categorized into regression tasks (EI-reg and V-reg ). In that emotion intensity and sentimental intensity is a real-valued scale between 0 and 1, where 0 represents least and 1 represents the most intensity of the tweeters from written tweets. Rest of the subtasks EI-oc, V-oc, E-c were multi-class classifications problems that are emotion intensity ordinal classification, sentiment analysis ordinal classification, emotion classification subtasks respectively. For subtask 2(EI-oc) distinct training and testing, dataset are provided for anger, fear, joy, and sadness. Subtask 4(V-oc) gives 7 ordinal classes, according to different levels of positive and negative valence state of the tweeter. Ta 3 Background", |
|
"cite_spans": [ |
|
{ |
|
"start": 255, |
|
"end": 287, |
|
"text": "(Kiritchenko and Mohammad, 2016)", |
|
"ref_id": "BIBREF4" |
|
} |
|
], |
|
"ref_spans": [ |
|
{ |
|
"start": 1305, |
|
"end": 1307, |
|
"text": "Ta", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Corpus", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "TD is the most basic method of representation of a text used in NLP. In this technique, for every individual document present in a corpus, we take the raw count of the words present in that document over all the unique words present in the entire corpus as its representation (Larson, 2010) . That is to say, a vocabulary is created using all the word in the entire corpus and for a single document representation, the count of the words are incremented in view of their occurrence only for that docu- ment. The drawback of this method is that this creates a very spares matrix where only a few of the columns are accumulated with numbers whereas, the rest of the columns are all zeros, thus bringing to the term frequency method.", |
|
"cite_spans": [ |
|
{ |
|
"start": 276, |
|
"end": 290, |
|
"text": "(Larson, 2010)", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TDM", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "One of the problems that occur due to the term document representation is that, it takes a raw count of all the words present in the document where most frequently occurring words like conjunction, preposition appear very often across most of the articles, thus not adding any significant importance to the individual article. On the other hand, seldom occurring words, like proper nouns give a more individual identification to the article. Thus, coming to a method where we take in the frequency of the words over the entire corpus, this method is termed as term-frequency(tf).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TF-IDF", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In language processing technique a collection of commonly appearing words with apparently less significance to a document are called as 'stop words', these can be removed at pre-processing level. Whereas, more often than not a list of stop words is not a sophisticated approach to adjusting term frequency for commonly used words. Inverse document frequency (idf ) is a technique (Ramos et al., 2003) wherein, less weight age is given to more commonly occurring words (not restricted to only stop words) and vice-verse for seldomly used words across the entire corpus.", |
|
"cite_spans": [ |
|
{ |
|
"start": 380, |
|
"end": 400, |
|
"text": "(Ramos et al., 2003)", |
|
"ref_id": "BIBREF12" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TF-IDF", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "idf t = ln N tot docs n docs containing t Combining the two ideologies (tf-idf ) brings, the rarity of the term intended to measure how important a word can be to the document in a collection (or corpus) of documents. it can be considered as a heuristic quantity. The term inverse document frequency for any given term is defined as", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TF-IDF", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "tf \u2212idf t,d = tf t,d * idf t", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "TF-IDF", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Linear regression is a commonly used supervised learning approach for prediction. The key goal is to fit a best fit line between a dependent and independent variable so as to minimize the error sum of squares between the actual and predicted value using the model. The model for linear regression is usually fitted using least square approach, or by minimizing the error sum of squares between the actual and predicted value. In certain cases, the model can also be framed by adding a regularization term. The regularization term is added to avoid overfitting (Fran\u00e7ois and Miltsakaki, 2012) .", |
|
"cite_spans": [ |
|
{ |
|
"start": 560, |
|
"end": 591, |
|
"text": "(Fran\u00e7ois and Miltsakaki, 2012)", |
|
"ref_id": "BIBREF2" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Linear Regression", |
|
"sec_num": "3.3" |
|
}, |
|
{ |
|
"text": "Random forest, an ensemble decision tree based classifier which averages various combination of trees created on arbitrary samples from the data set. A decision tree breakdown the data into minor sub-classes while instantaneously construe a tree using decision and leaf nodes. The category is embodied by leafs nodes. A decision node takes two or extra divisions with choices or leafs. Every tree in the RF is made on an arbitrary decent subclass of features present (Liaw et al., 2002) on the entire data. The RF algorithm medians trees to generate a system with short variance and insignificant trees are canceled out, left trees produce the output.", |
|
"cite_spans": [ |
|
{ |
|
"start": 467, |
|
"end": 486, |
|
"text": "(Liaw et al., 2002)", |
|
"ref_id": "BIBREF7" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Random Forest", |
|
"sec_num": "3.4" |
|
}, |
|
{ |
|
"text": "The model will be effective based on how it is extracting meaningful information from raw text. The system is created with the help of scikit-learn library 1 which is a python based library very much useful for classification, regression, clustering, data preparation, dimensionality reduction etc.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "1 http://scikit-learn.org/stable/ The training, development and test data set are taken from SemEval18 website.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "1. Importing training and cross-validation from the given data set 2. Removes all the stop words from data that are insignificant.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "3. Create a bag of words model which is a simple numeric representation of piece of text that is easy to classify. We just count the frequency of each word in the piece of text and created a dictionary of them which is called tokenization process in NLP which is then passed to countvectorize object in scikit learn package to create a set of maximum features. We use fit transform method to model (Ganesh et al., 2016) the bag of words feature vector which are stored in an array.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "4. Same tools and methods are followed for creating TDM matrix as mentioned in step 3", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "5. We created a classifier or prediction with the help of machine learning model. Here we used random forest classifiers consisting of one hundred trees. RF is a set of decision trees graphs that model all possibility of certain outcomes. ", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Methodology", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The group of tasks is particularly focusing on automatic detection of the intensity of emotion (EIreg) and sentiment (V-reg) of the tweeter. In this task, they have presented with the problem of classifying multi-classed emotion of tweets, such as EI-oc, V-oc, E-c . We have approached these tasks with a count based representation model, where every individual tweet is represented based on varied vocabulary size, and how these will perform for different category of subtasks over three different language dataset namely English, Spanish and Arabic. We base the model, considering in mind that an algorithm should not be narrowed down to a certain problem. That is it should not be biased towards a particular problem overall, this inference is made on the fact that all subtasks under task1 are focused on understanding the effect of tweets from the same corpora. As all the subtasks under task1 follow a generic grid search models, which are varied over min-df, n-gram parameters. The El-reg task was tuned on mean square error and varience for all 3 languages. El-reg gave comparatively better accuracy in TF-IDF matrix than TDM matrix.so we used TF-IDF for creating feature matrix. V-reg is a regression task where sentiment intensity was predicted. Spanish and English used TF-IDF and Arabic corpora used term document matrix for feature input matrix. These feature are found out by grid search method. Arabic and Spanish data give 58 % prediction and English data give slight high result which is 62", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Result", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Pearson (all instances) Valence English 0.622 Arabic 0.583 Spanish 0.580 Subtasks 2,4,5 are multi-label classification problems whose models are also generated by bag of words method. But the classification which was done by random forest did not yield expected result comparing to regression tasks.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Result", |
|
"sec_num": "5" |
|
}, |
|
{ |
|
"text": "Affect in tweets has been found out by the bag of words representation and classical machine learning algorithms. Random Forest and linear re-gression were used as machine learning tasks for predicting classification tasks and regression tasks respectively in which regression task gave fairly good results while classification task yield not so favorable results. TF-IDF seems to give better results for English and Spanish languages whereas TDM gave better results for the Arabic language. Emotion intensity and valence were captured by our model for the validation given data. Algorithms performed nearly same with TF-IDF and TDM but with slightly better results while using TF-IDF.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
} |
|
], |
|
"back_matter": [], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Positive, negative, or neutral: Learning an expanded opinion lexicon from emoticon-annotated tweets", |
|
"authors": [ |
|
{ |
|
"first": "Felipe", |
|
"middle": [], |
|
"last": "Bravo-Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eibe", |
|
"middle": [], |
|
"last": "Frank", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Bernhard", |
|
"middle": [], |
|
"last": "Pfahringer", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "IJCAI", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1229--1235", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felipe Bravo-Marquez, Eibe Frank, and Bernhard Pfahringer. 2015. Positive, negative, or neu- tral: Learning an expanded opinion lexicon from emoticon-annotated tweets. In IJCAI, pages 1229- 1235.", |
|
"links": null |
|
}, |
|
"BIBREF1": { |
|
"ref_id": "b1", |
|
"title": "Meta-level sentiment models for big social data analysis. Knowledge-Based Systems", |
|
"authors": [ |
|
{ |
|
"first": "Felipe", |
|
"middle": [], |
|
"last": "Bravo-Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marcelo", |
|
"middle": [], |
|
"last": "Mendoza", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Barbara", |
|
"middle": [], |
|
"last": "Poblete", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "", |
|
"volume": "69", |
|
"issue": "", |
|
"pages": "86--99", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Felipe Bravo-Marquez, Marcelo Mendoza, and Bar- bara Poblete. 2014. Meta-level sentiment models for big social data analysis. Knowledge-Based Systems, 69:86-99.", |
|
"links": null |
|
}, |
|
"BIBREF2": { |
|
"ref_id": "b2", |
|
"title": "Do nlp and machine learning improve traditional readability formulas?", |
|
"authors": [ |
|
{ |
|
"first": "Thomas", |
|
"middle": [], |
|
"last": "Fran\u00e7ois", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eleni", |
|
"middle": [], |
|
"last": "Miltsakaki", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2012, |
|
"venue": "Proceedings of the First Workshop on Predicting and Improving Text Readability for target reader populations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "49--57", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Thomas Fran\u00e7ois and Eleni Miltsakaki. 2012. Do nlp and machine learning improve traditional readabil- ity formulas? In Proceedings of the First Work- shop on Predicting and Improving Text Readability for target reader populations, pages 49-57. Associ- ation for Computational Linguistics.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "From vector space models to vector space models of semantics", |
|
"authors": [ |
|
{ |
|
"first": "Anand", |
|
"middle": [], |
|
"last": "Hb Barathi Ganesh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Soman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Forum for Information Retrieval Evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "50--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "HB Barathi Ganesh, M Anand Kumar, and KP Soman. 2016. From vector space models to vector space models of semantics. In Forum for Information Re- trieval Evaluation, pages 50-60. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Sentiment composition of words with opposing polarities", |
|
"authors": [ |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1102--1108", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Svetlana Kiritchenko and Saif M Mohammad. 2016. Sentiment composition of words with opposing po- larities. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, pages 1102-1108.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "Sentiment analysis of short informal texts", |
|
"authors": [ |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saif M", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Journal of Artificial Intelligence Research", |
|
"volume": "50", |
|
"issue": "", |
|
"pages": "723--762", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Svetlana Kiritchenko, Xiaodan Zhu, and Saif M Mo- hammad. 2014. Sentiment analysis of short in- formal texts. Journal of Artificial Intelligence Re- search, 50:723-762.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Introduction to information retrieval", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Ray R Larson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2010, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ray R Larson. 2010. Introduction to information re- trieval.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Classification and regression by randomforest. R news", |
|
"authors": [ |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Liaw", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Wiener", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2002, |
|
"venue": "", |
|
"volume": "2", |
|
"issue": "", |
|
"pages": "18--22", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Andy Liaw, Matthew Wiener, et al. 2002. Classifi- cation and regression by randomforest. R news, 2(3):18-22.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "Word affect intensities", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 11th Edition of the Language Resources and Evaluation Conference (LREC-2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif M. Mohammad. 2018. Word affect intensities. In Proceedings of the 11th Edition of the Language Re- sources and Evaluation Conference (LREC-2018), Miyazaki, Japan.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Semeval-2018 Task 1: Affect in tweets", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Felipe", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mohammad", |
|
"middle": [], |
|
"last": "Bravo-Marquez", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Salameh", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of International Workshop on Semantic Evaluation (SemEval-2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif M. Mohammad, Felipe Bravo-Marquez, Mo- hammad Salameh, and Svetlana Kiritchenko. 2018. Semeval-2018 Task 1: Affect in tweets. In Proceed- ings of International Workshop on Semantic Evalu- ation (SemEval-2018), New Orleans, LA, USA.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Understanding emotions: A dataset of tweets to study interactions between affect categories", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "Proceedings of the 11th Edition of the Language Resources and Evaluation Conference (LREC-2018)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif M. Mohammad and Svetlana Kiritchenko. 2018. Understanding emotions: A dataset of tweets to study interactions between affect categories. In Pro- ceedings of the 11th Edition of the Language Re- sources and Evaluation Conference (LREC-2018), Miyazaki, Japan.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Nrc-canada: Building the stateof-the-art in sentiment analysis of tweets", |
|
"authors": [ |
|
{ |
|
"first": "M", |
|
"middle": [], |
|
"last": "Saif", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2013, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:1308.6242" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Saif M Mohammad, Svetlana Kiritchenko, and Xiao- dan Zhu. 2013. Nrc-canada: Building the state- of-the-art in sentiment analysis of tweets. arXiv preprint arXiv:1308.6242.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Using tf-idf to determine word relevance in document queries", |
|
"authors": [ |
|
{ |
|
"first": "Juan", |
|
"middle": [], |
|
"last": "Ramos", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2003, |
|
"venue": "Proceedings of the first instructional conference on machine learning", |
|
"volume": "242", |
|
"issue": "", |
|
"pages": "133--142", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Juan Ramos et al. 2003. Using tf-idf to determine word relevance in document queries. In Proceedings of the first instructional conference on machine learn- ing, volume 242, pages 133-142.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Amrita cen at semeval-2016 task 1: Semantic relation from word embeddings in higher dimension", |
|
"authors": [ |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Kp Soman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 10th International Workshop on Semantic Evaluation (SemEval-2016)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "706--711", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "KP Soman et al. 2016. Amrita cen at semeval- 2016 task 1: Semantic relation from word embed- dings in higher dimension. In Proceedings of the 10th International Workshop on Semantic Evalua- tion (SemEval-2016), pages 706-711.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Amritanlp@ panrusprofiling: Author profiling using machine learning techniques", |
|
"authors": [ |
|
{ |
|
"first": "Vivek", |
|
"middle": [], |
|
"last": "Vinayan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Naveen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Anand", |
|
"middle": [], |
|
"last": "Nb Harikrishnan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "K", |
|
"middle": [ |
|
"P" |
|
], |
|
"last": "Kumar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Soman", |
|
"suffix": "" |
|
} |
|
], |
|
"year": null, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Vivek Vinayan, JR Naveen, NB Harikrishnan, M Anand Kumar, and KP Soman. Amritanlp@ pan- rusprofiling: Author profiling using machine learn- ing techniques.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Nrc-canada-2014: Recent improvements in the sentiment analysis of tweets", |
|
"authors": [ |
|
{ |
|
"first": "Xiaodan", |
|
"middle": [], |
|
"last": "Zhu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Svetlana", |
|
"middle": [], |
|
"last": "Kiritchenko", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Saif", |
|
"middle": [], |
|
"last": "Mohammad", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Proceedings of the 8th international workshop on semantic evaluation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "443--447", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xiaodan Zhu, Svetlana Kiritchenko, and Saif Moham- mad. 2014. Nrc-canada-2014: Recent improve- ments in the sentiment analysis of tweets. In Pro- ceedings of the 8th international workshop on se- mantic evaluation (SemEval 2014), pages 443-447.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": { |
|
"TABREF1": { |
|
"num": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF3": { |
|
"num": null, |
|
"text": "V-reg vocabulary size with variation of parameters.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF5": { |
|
"num": null, |
|
"text": "EI-reg cross validation results.", |
|
"content": "<table><tr><td>lang Rep</td><td colspan=\"2\">min df n-gram</td><td>depth of tree</td><td>acc</td></tr><tr><td colspan=\"2\">Sp TDM 1</td><td>3</td><td>18</td><td>41.74</td></tr><tr><td colspan=\"2\">Sp TFIDF 1</td><td>2</td><td>19</td><td>42.62</td></tr><tr><td colspan=\"2\">En TDM 1</td><td>8</td><td>18</td><td>46.51</td></tr><tr><td colspan=\"2\">En TFIDF 1 Ara TDM 3</td><td>12 6</td><td>19 18</td><td>46.58 35.09</td></tr><tr><td colspan=\"2\">Ara TFIDF 3</td><td>14</td><td>15</td><td>33.73</td></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF6": { |
|
"num": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF7": { |
|
"num": null, |
|
"text": "", |
|
"content": "<table><tr><td>-5 show the representation we adopted</td></tr><tr><td>for making our model in each sub-tasks. Same</td></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF8": { |
|
"num": null, |
|
"text": "", |
|
"content": "<table><tr><td/><td colspan=\"4\">: V-reg cross validation results.</td></tr><tr><td>lang Rep</td><td colspan=\"4\">min df n-gram depth acc</td></tr><tr><td colspan=\"2\">Sp TDM 3</td><td>9</td><td>17</td><td>30</td></tr><tr><td colspan=\"2\">SP TFIDF 2</td><td>10</td><td>13</td><td>31.4</td></tr><tr><td colspan=\"2\">En TDM 2</td><td>13</td><td>18</td><td>29.4</td></tr><tr><td colspan=\"2\">En TFIDF 1 Ara TDM 1</td><td>9 14</td><td>17 16</td><td>29.4 26</td></tr><tr><td colspan=\"2\">Ara TFIDF 2</td><td>9</td><td>15</td><td>25.36</td></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF9": { |
|
"num": null, |
|
"text": "", |
|
"content": "<table><tr><td>: V-oc cross validation results.</td></tr><tr><td>model is created 3 different languages, Spanish,</td></tr><tr><td>English and Arabic. Both TDM and TFID fea-</td></tr><tr><td>ture matrix are tuned on the basis of accuracy and</td></tr><tr><td>Fscore values. Accuracy shown in bold letter are</td></tr><tr><td>used for making prediction and classification task</td></tr><tr><td>model.</td></tr></table>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF11": { |
|
"num": null, |
|
"text": "", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF13": { |
|
"num": null, |
|
"text": "EI-reg result.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
}, |
|
"TABREF14": { |
|
"num": null, |
|
"text": "V-reg result.", |
|
"content": "<table/>", |
|
"html": null, |
|
"type_str": "table" |
|
} |
|
} |
|
} |
|
} |