asahi417 commited on
Commit
2954102
·
1 Parent(s): cbc6a9c
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +43 -0
  2. data/tweet_hate/test.jsonl +3 -0
  3. data/tweet_hate/test_1.jsonl +3 -0
  4. data/tweet_hate/test_2.jsonl +3 -0
  5. data/tweet_hate/test_3.jsonl +3 -0
  6. data/tweet_hate/test_4.jsonl +3 -0
  7. data/tweet_hate/train.jsonl +3 -0
  8. data/tweet_hate/validation.jsonl +3 -0
  9. data/tweet_hate_test0_seed0/test.jsonl +3 -0
  10. data/tweet_hate_test0_seed0/train.jsonl +3 -0
  11. data/tweet_hate_test0_seed0/validation.jsonl +3 -0
  12. data/tweet_hate_test0_seed1/test.jsonl +3 -0
  13. data/tweet_hate_test0_seed1/train.jsonl +3 -0
  14. data/tweet_hate_test0_seed1/validation.jsonl +3 -0
  15. data/tweet_hate_test0_seed2/test.jsonl +3 -0
  16. data/tweet_hate_test0_seed2/train.jsonl +3 -0
  17. data/tweet_hate_test0_seed2/validation.jsonl +3 -0
  18. data/tweet_hate_test1_seed0/test.jsonl +3 -0
  19. data/tweet_hate_test1_seed0/train.jsonl +3 -0
  20. data/tweet_hate_test1_seed0/validation.jsonl +3 -0
  21. data/tweet_hate_test1_seed1/test.jsonl +3 -0
  22. data/tweet_hate_test1_seed1/train.jsonl +3 -0
  23. data/tweet_hate_test1_seed1/validation.jsonl +3 -0
  24. data/tweet_hate_test1_seed2/test.jsonl +3 -0
  25. data/tweet_hate_test1_seed2/train.jsonl +3 -0
  26. data/tweet_hate_test1_seed2/validation.jsonl +3 -0
  27. data/tweet_hate_test2_seed0/test.jsonl +3 -0
  28. data/tweet_hate_test2_seed0/train.jsonl +3 -0
  29. data/tweet_hate_test2_seed0/validation.jsonl +3 -0
  30. data/tweet_hate_test2_seed1/test.jsonl +3 -0
  31. data/tweet_hate_test2_seed1/train.jsonl +3 -0
  32. data/tweet_hate_test2_seed1/validation.jsonl +3 -0
  33. data/tweet_hate_test2_seed2/test.jsonl +3 -0
  34. data/tweet_hate_test2_seed2/train.jsonl +3 -0
  35. data/tweet_hate_test2_seed2/validation.jsonl +3 -0
  36. data/tweet_hate_test3_seed0/test.jsonl +3 -0
  37. data/tweet_hate_test3_seed0/train.jsonl +3 -0
  38. data/tweet_hate_test3_seed0/validation.jsonl +3 -0
  39. data/tweet_hate_test3_seed1/test.jsonl +3 -0
  40. data/tweet_hate_test3_seed1/train.jsonl +3 -0
  41. data/tweet_hate_test3_seed1/validation.jsonl +3 -0
  42. data/tweet_hate_test3_seed2/test.jsonl +3 -0
  43. data/tweet_hate_test3_seed2/train.jsonl +3 -0
  44. data/tweet_hate_test3_seed2/validation.jsonl +3 -0
  45. experiments/analysis/topic_2.csv +0 -0
  46. experiments/analysis_prediction_topic.py +79 -0
  47. process/tweet_emoji.py +81 -0
  48. process/tweet_hate.py +51 -0
  49. process/tweet_nerd.py +8 -8
  50. process/tweet_sentiment_small.py +0 -1
.gitattributes CHANGED
@@ -515,3 +515,46 @@ data/tweet_sentiment_small/test_1.jsonl filter=lfs diff=lfs merge=lfs -text
515
  data/tweet_sentiment_small/test_2.jsonl filter=lfs diff=lfs merge=lfs -text
516
  data/tweet_sentiment_small/test_3.jsonl filter=lfs diff=lfs merge=lfs -text
517
  data/tweet_sentiment_small/test_4.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515
  data/tweet_sentiment_small/test_2.jsonl filter=lfs diff=lfs merge=lfs -text
516
  data/tweet_sentiment_small/test_3.jsonl filter=lfs diff=lfs merge=lfs -text
517
  data/tweet_sentiment_small/test_4.jsonl filter=lfs diff=lfs merge=lfs -text
518
+ data/tweet_hate/train.jsonl filter=lfs diff=lfs merge=lfs -text
519
+ data/tweet_hate_test2_seed0/test.jsonl filter=lfs diff=lfs merge=lfs -text
520
+ data/tweet_hate_test2_seed0/train.jsonl filter=lfs diff=lfs merge=lfs -text
521
+ data/tweet_hate_test2_seed2/validation.jsonl filter=lfs diff=lfs merge=lfs -text
522
+ data/tweet_hate_test3_seed1/test.jsonl filter=lfs diff=lfs merge=lfs -text
523
+ data/tweet_hate_test3_seed2/train.jsonl filter=lfs diff=lfs merge=lfs -text
524
+ data/tweet_hate/test_1.jsonl filter=lfs diff=lfs merge=lfs -text
525
+ data/tweet_hate_test0_seed1/validation.jsonl filter=lfs diff=lfs merge=lfs -text
526
+ data/tweet_hate_test0_seed0/test.jsonl filter=lfs diff=lfs merge=lfs -text
527
+ data/tweet_hate_test0_seed2/test.jsonl filter=lfs diff=lfs merge=lfs -text
528
+ data/tweet_hate_test0_seed2/train.jsonl filter=lfs diff=lfs merge=lfs -text
529
+ data/tweet_hate_test1_seed0/test.jsonl filter=lfs diff=lfs merge=lfs -text
530
+ data/tweet_hate_test1_seed2/test.jsonl filter=lfs diff=lfs merge=lfs -text
531
+ data/tweet_hate_test0_seed0/validation.jsonl filter=lfs diff=lfs merge=lfs -text
532
+ data/tweet_hate_test1_seed0/validation.jsonl filter=lfs diff=lfs merge=lfs -text
533
+ data/tweet_hate_test1_seed1/train.jsonl filter=lfs diff=lfs merge=lfs -text
534
+ data/tweet_hate_test2_seed2/train.jsonl filter=lfs diff=lfs merge=lfs -text
535
+ data/tweet_hate_test3_seed0/validation.jsonl filter=lfs diff=lfs merge=lfs -text
536
+ data/tweet_hate_test3_seed2/test.jsonl filter=lfs diff=lfs merge=lfs -text
537
+ data/tweet_hate/test_4.jsonl filter=lfs diff=lfs merge=lfs -text
538
+ data/tweet_hate/validation.jsonl filter=lfs diff=lfs merge=lfs -text
539
+ data/tweet_hate_test0_seed0/train.jsonl filter=lfs diff=lfs merge=lfs -text
540
+ data/tweet_hate_test0_seed2/validation.jsonl filter=lfs diff=lfs merge=lfs -text
541
+ data/tweet_hate_test1_seed0/train.jsonl filter=lfs diff=lfs merge=lfs -text
542
+ data/tweet_hate_test1_seed1/validation.jsonl filter=lfs diff=lfs merge=lfs -text
543
+ data/tweet_hate_test2_seed0/validation.jsonl filter=lfs diff=lfs merge=lfs -text
544
+ data/tweet_hate_test2_seed1/validation.jsonl filter=lfs diff=lfs merge=lfs -text
545
+ data/tweet_hate/test_2.jsonl filter=lfs diff=lfs merge=lfs -text
546
+ data/tweet_hate_test0_seed1/test.jsonl filter=lfs diff=lfs merge=lfs -text
547
+ data/tweet_hate_test0_seed1/train.jsonl filter=lfs diff=lfs merge=lfs -text
548
+ data/tweet_hate_test2_seed1/train.jsonl filter=lfs diff=lfs merge=lfs -text
549
+ data/tweet_hate_test3_seed0/test.jsonl filter=lfs diff=lfs merge=lfs -text
550
+ data/tweet_hate_test3_seed1/validation.jsonl filter=lfs diff=lfs merge=lfs -text
551
+ data/tweet_hate_test3_seed2/validation.jsonl filter=lfs diff=lfs merge=lfs -text
552
+ data/tweet_hate/test_3.jsonl filter=lfs diff=lfs merge=lfs -text
553
+ data/tweet_hate_test2_seed1/test.jsonl filter=lfs diff=lfs merge=lfs -text
554
+ data/tweet_hate_test3_seed0/train.jsonl filter=lfs diff=lfs merge=lfs -text
555
+ data/tweet_hate/test.jsonl filter=lfs diff=lfs merge=lfs -text
556
+ data/tweet_hate_test1_seed1/test.jsonl filter=lfs diff=lfs merge=lfs -text
557
+ data/tweet_hate_test1_seed2/train.jsonl filter=lfs diff=lfs merge=lfs -text
558
+ data/tweet_hate_test1_seed2/validation.jsonl filter=lfs diff=lfs merge=lfs -text
559
+ data/tweet_hate_test2_seed2/test.jsonl filter=lfs diff=lfs merge=lfs -text
560
+ data/tweet_hate_test3_seed1/train.jsonl filter=lfs diff=lfs merge=lfs -text
data/tweet_hate/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43b9d4a9db852d8da4e299981096a83557cbbe28dde9184e781e89ea86008fb7
3
+ size 1068405
data/tweet_hate/test_1.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95ed6597a73c19371997a316d75cd864944c3c407cca334e7d791d8392510cf7
3
+ size 267275
data/tweet_hate/test_2.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fc150221e8da1c8375a4c02040a0a116847df2784e435927406236775e04d8a
3
+ size 269030
data/tweet_hate/test_3.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3222984613f3776a1b7238551b150623f1949e6ecf427810d0a0c452f0380b4
3
+ size 263958
data/tweet_hate/test_4.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89b7a3ef3b33df6cf9f852548cd1e5f17feb2f06583cbcebe2e4393065feed1a
3
+ size 268139
data/tweet_hate/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c18e35554c1a640ac1e8a9a715288153146267d7f6cd8f5b73c6e9ddaa7ed7e
3
+ size 815849
data/tweet_hate/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3834b8a9257daf7a0c3452ca30e9743727d227cd073497a9effaa21941730b36
3
+ size 203960
data/tweet_hate_test0_seed0/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95ed6597a73c19371997a316d75cd864944c3c407cca334e7d791d8392510cf7
3
+ size 267275
data/tweet_hate_test0_seed0/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:174544a9026d4119a2e7022617eb9c5d5e2ca0960e49ea4f211015a7a547f30d
3
+ size 840880
data/tweet_hate_test0_seed0/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a399256c2a55aea7f884b704ad8a7aab2720318f6e536f717d6e117466fa0538
3
+ size 210897
data/tweet_hate_test0_seed1/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95ed6597a73c19371997a316d75cd864944c3c407cca334e7d791d8392510cf7
3
+ size 267275
data/tweet_hate_test0_seed1/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2484567154aa9abbe66e77f936af53879b71136b2a4dcacd8343e77aeb3b8061
3
+ size 840660
data/tweet_hate_test0_seed1/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f9bfaa47f0d1e4fab93a2af581257e2e9df56347f67315981288ce77172b9a0
3
+ size 209114
data/tweet_hate_test0_seed2/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95ed6597a73c19371997a316d75cd864944c3c407cca334e7d791d8392510cf7
3
+ size 267275
data/tweet_hate_test0_seed2/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd7f623f6204d08e70cfa4ada9d15aa53448c22ab0999f2dc661b9772d5158b1
3
+ size 841027
data/tweet_hate_test0_seed2/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:351806667abfb3801f417e8b2f932d137302853fa4146fa2df59388782a82c74
3
+ size 211196
data/tweet_hate_test1_seed0/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fc150221e8da1c8375a4c02040a0a116847df2784e435927406236775e04d8a
3
+ size 269030
data/tweet_hate_test1_seed0/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6017139e0b986dd0ef2457dc0bdfa6f4ab1f22b3969fa65170331d97e9c5539
3
+ size 839166
data/tweet_hate_test1_seed0/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f00239f7d2efb1b426cde73b151ef81b4b6c0275275a3e85d19c817d5395259
3
+ size 209502
data/tweet_hate_test1_seed1/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fc150221e8da1c8375a4c02040a0a116847df2784e435927406236775e04d8a
3
+ size 269030
data/tweet_hate_test1_seed1/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac2f0fa53e488333c4bac8aecbf61ffea86159075a109d920d52129e6ac70bea
3
+ size 838927
data/tweet_hate_test1_seed1/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f455382ee8ba826d0ba941a3a25836f6694d1ec194f07962b17e1727c1dde228
3
+ size 209959
data/tweet_hate_test1_seed2/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fc150221e8da1c8375a4c02040a0a116847df2784e435927406236775e04d8a
3
+ size 269030
data/tweet_hate_test1_seed2/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1d5c7efa5db8ab2ecd6131c6958f4f0a41cdb5806808d45de34dc753c697723
3
+ size 841009
data/tweet_hate_test1_seed2/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43ad0ede79283aae0c0c58116e45503618ac11a475e39729e9d19c88aee243f0
3
+ size 210602
data/tweet_hate_test2_seed0/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3222984613f3776a1b7238551b150623f1949e6ecf427810d0a0c452f0380b4
3
+ size 263958
data/tweet_hate_test2_seed0/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c3413462ad64727c505eb8cb6572671aa5b05ea2140a0e227c5d564a22d4bbe
3
+ size 844785
data/tweet_hate_test2_seed0/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7aed7f2e55776d1e59a870387a868575168016683c7b8c1d3cc1669786b822c2
3
+ size 211090
data/tweet_hate_test2_seed1/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3222984613f3776a1b7238551b150623f1949e6ecf427810d0a0c452f0380b4
3
+ size 263958
data/tweet_hate_test2_seed1/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1761a32da30f2a6cfa17995bcbc5f5f131fb94ae85b020d455c31236825c1610
3
+ size 838534
data/tweet_hate_test2_seed1/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df48ca1357eacb231bcc2e3bb77509e3b41c7f7e513eb4c301b50728f40b068b
3
+ size 212723
data/tweet_hate_test2_seed2/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3222984613f3776a1b7238551b150623f1949e6ecf427810d0a0c452f0380b4
3
+ size 263958
data/tweet_hate_test2_seed2/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a2f2207f3bcb451d067743f16866693524bfc2bb16bdbaab813c5ce106f3b40
3
+ size 841369
data/tweet_hate_test2_seed2/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a93b57db080ef7cb71be8606b07f5cf9b5431f16b5a0ae52d4387af13b330c2c
3
+ size 211008
data/tweet_hate_test3_seed0/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89b7a3ef3b33df6cf9f852548cd1e5f17feb2f06583cbcebe2e4393065feed1a
3
+ size 268139
data/tweet_hate_test3_seed0/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:216a53acac01a66030236282738da43adb48ac26935eee415fd3a1e678936316
3
+ size 841157
data/tweet_hate_test3_seed0/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bc31bfcbcc0f0f3d6212c98a22ff142909ee58fe736b63eda1e08c72872a15d
3
+ size 213049
data/tweet_hate_test3_seed1/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89b7a3ef3b33df6cf9f852548cd1e5f17feb2f06583cbcebe2e4393065feed1a
3
+ size 268139
data/tweet_hate_test3_seed1/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c47d6e191f32f757bcc6f58ba89e1ff9adb3d24eeaf192a596c5c86e5f1f2dd3
3
+ size 840179
data/tweet_hate_test3_seed1/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:864db3e45cd644e2f552488dc06a76ad973533ba04eddaf9cb5d87cab1ab5c07
3
+ size 210892
data/tweet_hate_test3_seed2/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89b7a3ef3b33df6cf9f852548cd1e5f17feb2f06583cbcebe2e4393065feed1a
3
+ size 268139
data/tweet_hate_test3_seed2/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd43253ba78ce975eceb84a130b13b39d6192794257941a3b90f78741ba680d5
3
+ size 842557
data/tweet_hate_test3_seed2/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78fddd1e3e29b93c91889b8f6e5b48077e593ca9c6bf1ff2c267c9215f7f6369
3
+ size 211761
experiments/analysis/topic_2.csv ADDED
The diff for this file is too large to render. See raw diff
 
experiments/analysis_prediction_topic.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ from datasets import load_dataset
7
+
8
+ os.makedirs("experiments/analysis_topic", exist_ok=True)
9
+ root_dir = "experiments/prediction_files"
10
+ id_to_label = {
11
+ '0': 'arts_&_culture',
12
+ '1': 'business_&_entrepreneurs',
13
+ '2': 'celebrity_&_pop_culture',
14
+ '3': 'diaries_&_daily_life',
15
+ '4': 'family',
16
+ '5': 'fashion_&_style',
17
+ '6': 'film_tv_&_video',
18
+ '7': 'fitness_&_health',
19
+ '8': 'food_&_dining',
20
+ '9': 'gaming',
21
+ '10': 'learning_&_educational',
22
+ '11': 'music',
23
+ '12': 'news_&_social_concern',
24
+ '13': 'other_hobbies',
25
+ '14': 'relationships',
26
+ '15': 'science_&_technology',
27
+ '16': 'sports',
28
+ '17': 'travel_&_adventure',
29
+ '18': 'youth_&_student_life'
30
+ }
31
+ splits = ["test_1", "test_2", "test_3", "test_4"]
32
+ model_list = [
33
+ "roberta-base",
34
+ "bertweet-base",
35
+ "bernice",
36
+ "roberta-large",
37
+ "bertweet-large",
38
+ "twitter-roberta-base-2019-90m",
39
+ "twitter-roberta-base-dec2020",
40
+ "twitter-roberta-base-2021-124m",
41
+ "twitter-roberta-base-2022-154m",
42
+ "twitter-roberta-large-2022-154m"
43
+ ]
44
+ references = {}
45
+ for s in splits:
46
+ data = load_dataset("tweettemposhift/tweet_temporal_shift", f"topic_temporal", split=s)
47
+ references[s] = [{id_to_label[str(n)] for n, k in enumerate(i) if k == 1} for i in data['gold_label_list']]
48
+
49
+ count = {}
50
+ pred_tmp = {}
51
+ for model_m in model_list:
52
+ flags = []
53
+ pred_all = []
54
+ for s in splits:
55
+ with open(f"{root_dir}/topic-topic_temporal-{model_m}/{s}.jsonl") as f:
56
+ pred = [set(json.loads(i)["label"]) for i in f.read().split('\n') if len(i)]
57
+ flags += [len(a.intersection(b)) > 0 for a, b in zip(references[s], pred)]
58
+ pred_all += pred
59
+ for seed_s in range(3):
60
+ flags_rand = []
61
+ for random_r in range(4):
62
+ with open(f"{root_dir}/topic-topic_random{random_r}_seed{seed_s}-{model_m}/test_{random_r + 1}.jsonl") as f:
63
+ pred = [set(json.loads(i)["label"]) for i in f.read().split('\n') if len(i)]
64
+ label = references[f"test_{random_r + 1}"]
65
+ flags_rand += [len(a.intersection(b)) > 0 for a, b in zip(label, pred)]
66
+ tmp_flag = [not x and y for x, y in zip(flags, flags_rand)]
67
+ count[f"{model_m}_{seed_s}"] = tmp_flag
68
+ pred_tmp[f"{model_m}_{seed_s}"] = [list(x) if y else [] for x, y in zip(pred_all, tmp_flag)]
69
+
70
+ df_tmp = pd.DataFrame([[dict(zip(*np.unique(i, return_counts=True))) for i in pd.DataFrame(pred_tmp).sum(1).values]], index=["errors"]).T
71
+ df_tmp["error_count"] = pd.DataFrame(count).sum(1).values
72
+ gold_label = []
73
+ text = []
74
+ for s in splits:
75
+ gold_label += load_dataset("tweettemposhift/tweet_temporal_shift", "topic_temporal", split=s)['gold_label_list']
76
+ text += load_dataset("tweettemposhift/tweet_temporal_shift", "topic_temporal", split=s)['text']
77
+ df_tmp["true_label"] = [", ".join([id_to_label[str(n)] for n, k in enumerate(i) if k == 1]) for i in gold_label]
78
+ df_tmp["text"] = text
79
+ df_tmp.sort_values("error_count", ascending=False).to_csv("experiments/analysis/topic_2.csv")
process/tweet_emoji.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # TODO
2
+ import json
3
+ import os
4
+ from random import shuffle, seed
5
+
6
+
7
+ with open("data/tweet_sentiment/test.jsonl") as f:
8
+ test = [json.loads(i) for i in f if len(i)]
9
+ with open("data/tweet_sentiment/test_1.jsonl") as f:
10
+ test_1 = [json.loads(i) for i in f if len(i)]
11
+ with open("data/tweet_sentiment/test_2.jsonl") as f:
12
+ test_2 = [json.loads(i) for i in f if len(i)]
13
+ with open("data/tweet_sentiment/test_3.jsonl") as f:
14
+ test_3 = [json.loads(i) for i in f if len(i)]
15
+ with open("data/tweet_sentiment/test_4.jsonl") as f:
16
+ test_4 = [json.loads(i) for i in f if len(i)]
17
+ with open("data/tweet_sentiment/train.jsonl") as f:
18
+ train = [json.loads(i) for i in f if len(i)]
19
+ with open("data/tweet_sentiment/validation.jsonl") as f:
20
+ validation = [json.loads(i) for i in f if len(i)]
21
+
22
+
23
+ os.makedirs(f"data/tweet_sentiment_small", exist_ok=True)
24
+ with open(f"data/tweet_sentiment_small/test.jsonl", "w") as f:
25
+ f.write("\n".join([json.dumps(i) for i in test]))
26
+
27
+ with open(f"data/tweet_sentiment_small/test_1.jsonl", "w") as f:
28
+ f.write("\n".join([json.dumps(i) for i in test_1]))
29
+ with open(f"data/tweet_sentiment_small/test_2.jsonl", "w") as f:
30
+ f.write("\n".join([json.dumps(i) for i in test_2]))
31
+ with open(f"data/tweet_sentiment_small/test_3.jsonl", "w") as f:
32
+ f.write("\n".join([json.dumps(i) for i in test_3]))
33
+ with open(f"data/tweet_sentiment_small/test_4.jsonl", "w") as f:
34
+ f.write("\n".join([json.dumps(i) for i in test_4]))
35
+
36
+ with open(f"data/tweet_sentiment_small/validation.jsonl", "w") as f:
37
+ f.write("\n".join([json.dumps(i) for i in validation]))
38
+
39
+ # down sample training set
40
+ n_train_p = 2500
41
+ n_train_n = 2500
42
+ seed(123)
43
+ shuffle(train)
44
+ train_p = [i for i in train if i["gold_label_binary"] == 0][:n_train_p]
45
+ train_n = [i for i in train if i["gold_label_binary"] == 1][:n_train_n]
46
+ train = train_p + train_n
47
+ shuffle(train)
48
+ with open(f"data/tweet_sentiment_small/train.jsonl", "w") as f:
49
+ f.write("\n".join([json.dumps(i) for i in train]))
50
+
51
+
52
+ n_train = len(train)
53
+ n_validation = len(validation)
54
+ n_test = int(len(test)/4)
55
+
56
+ def sampler(dataset_test, r_seed):
57
+ seed(r_seed)
58
+ shuffle(dataset_test)
59
+ shuffle(train)
60
+ shuffle(validation)
61
+ test_tr = dataset_test[:int(n_train / 2)]
62
+ test_vl = dataset_test[int(n_train / 2): int(n_train / 2) + int(n_validation / 2)]
63
+ new_train = test_tr + train[:n_train - len(test_tr)]
64
+ new_validation = test_vl + validation[:n_validation - len(test_vl)]
65
+ return new_train, new_validation
66
+
67
+ id2test = {n: t for n, t in enumerate([test_1, test_2, test_3, test_4])}
68
+ for n, _test in enumerate([
69
+ test_4 + test_2 + test_3,
70
+ test_1 + test_4 + test_3,
71
+ test_1 + test_2 + test_4,
72
+ test_1 + test_2 + test_3]):
73
+ for s in range(3):
74
+ os.makedirs(f"data/tweet_sentiment_small_test{n}_seed{s}", exist_ok=True)
75
+ _train, _valid = sampler(_test, s)
76
+ with open(f"data/tweet_sentiment_small_test{n}_seed{s}/train.jsonl", "w") as f:
77
+ f.write("\n".join([json.dumps(i) for i in _train]))
78
+ with open(f"data/tweet_sentiment_small_test{n}_seed{s}/validation.jsonl", "w") as f:
79
+ f.write("\n".join([json.dumps(i) for i in _valid]))
80
+ with open(f"data/tweet_sentiment_small_test{n}_seed{s}/test.jsonl", "w") as f:
81
+ f.write("\n".join([json.dumps(i) for i in id2test[n]]))
process/tweet_hate.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from random import shuffle, seed
4
+
5
+
6
+ with open("data/tweet_hate/test.jsonl") as f:
7
+ test = [json.loads(i) for i in f if len(i)]
8
+ with open("data/tweet_hate/test_1.jsonl") as f:
9
+ test_1 = [json.loads(i) for i in f if len(i)]
10
+ with open("data/tweet_hate/test_2.jsonl") as f:
11
+ test_2 = [json.loads(i) for i in f if len(i)]
12
+ with open("data/tweet_hate/test_3.jsonl") as f:
13
+ test_3 = [json.loads(i) for i in f if len(i)]
14
+ with open("data/tweet_hate/test_4.jsonl") as f:
15
+ test_4 = [json.loads(i) for i in f if len(i)]
16
+ with open("data/tweet_hate/train.jsonl") as f:
17
+ train = [json.loads(i) for i in f if len(i)]
18
+ with open("data/tweet_hate/validation.jsonl") as f:
19
+ validation = [json.loads(i) for i in f if len(i)]
20
+
21
+ n_train = len(train)
22
+ n_validation = len(validation)
23
+ n_test = int(len(test)/4)
24
+
25
+
26
+ def sampler(dataset_test, r_seed):
27
+ seed(r_seed)
28
+ shuffle(dataset_test)
29
+ shuffle(train)
30
+ shuffle(validation)
31
+ test_tr = dataset_test[:int(n_train / 2)]
32
+ test_vl = dataset_test[int(n_train / 2): int(n_train / 2) + int(n_validation / 2)]
33
+ new_train = test_tr + train[:n_train - len(test_tr)]
34
+ new_validation = test_vl + validation[:n_validation - len(test_vl)]
35
+ return new_train, new_validation
36
+
37
+ id2test = {n: t for n, t in enumerate([test_1, test_2, test_3, test_4])}
38
+ for n, _test in enumerate([
39
+ test_4 + test_2 + test_3,
40
+ test_1 + test_4 + test_3,
41
+ test_1 + test_2 + test_4,
42
+ test_1 + test_2 + test_3]):
43
+ for s in range(3):
44
+ os.makedirs(f"data/tweet_hate_test{n}_seed{s}", exist_ok=True)
45
+ _train, _valid = sampler(_test, s)
46
+ with open(f"data/tweet_hate_test{n}_seed{s}/train.jsonl", "w") as f:
47
+ f.write("\n".join([json.dumps(i) for i in _train]))
48
+ with open(f"data/tweet_hate_test{n}_seed{s}/validation.jsonl", "w") as f:
49
+ f.write("\n".join([json.dumps(i) for i in _valid]))
50
+ with open(f"data/tweet_hate_test{n}_seed{s}/test.jsonl", "w") as f:
51
+ f.write("\n".join([json.dumps(i) for i in id2test[n]]))
process/tweet_nerd.py CHANGED
@@ -48,20 +48,20 @@ test_1 = test[:n_test]
48
  test_2 = test[n_test:n_test*2]
49
  test_3 = test[n_test*2:n_test*3]
50
  test_4 = test[n_test*3:]
51
- os.makedirs("data/tweet_nerd_new", exist_ok=True)
52
- with open("data/tweet_nerd_new/test.jsonl", "w") as f:
53
  f.write("\n".join([json.dumps(i) for i in test]))
54
- with open("data/tweet_nerd_new/test_1.jsonl", "w") as f:
55
  f.write("\n".join([json.dumps(i) for i in test_1]))
56
- with open("data/tweet_nerd_new/test_2.jsonl", "w") as f:
57
  f.write("\n".join([json.dumps(i) for i in test_2]))
58
- with open("data/tweet_nerd_new/test_3.jsonl", "w") as f:
59
  f.write("\n".join([json.dumps(i) for i in test_3]))
60
- with open("data/tweet_nerd_new/test_4.jsonl", "w") as f:
61
  f.write("\n".join([json.dumps(i) for i in test_4]))
62
- with open("data/tweet_nerd_new/train.jsonl", "w") as f:
63
  f.write("\n".join([json.dumps(i) for i in train]))
64
- with open("data/tweet_nerd_new/validation.jsonl", "w") as f:
65
  f.write("\n".join([json.dumps(i) for i in valid]))
66
 
67
 
 
48
  test_2 = test[n_test:n_test*2]
49
  test_3 = test[n_test*2:n_test*3]
50
  test_4 = test[n_test*3:]
51
+ os.makedirs("data/tweet_hate", exist_ok=True)
52
+ with open("data/tweet_hate/test.jsonl", "w") as f:
53
  f.write("\n".join([json.dumps(i) for i in test]))
54
+ with open("data/tweet_hate/test_1.jsonl", "w") as f:
55
  f.write("\n".join([json.dumps(i) for i in test_1]))
56
+ with open("data/tweet_hate/test_2.jsonl", "w") as f:
57
  f.write("\n".join([json.dumps(i) for i in test_2]))
58
+ with open("data/tweet_hate/test_3.jsonl", "w") as f:
59
  f.write("\n".join([json.dumps(i) for i in test_3]))
60
+ with open("data/tweet_hate/test_4.jsonl", "w") as f:
61
  f.write("\n".join([json.dumps(i) for i in test_4]))
62
+ with open("data/tweet_hate/train.jsonl", "w") as f:
63
  f.write("\n".join([json.dumps(i) for i in train]))
64
+ with open("data/tweet_hate/validation.jsonl", "w") as f:
65
  f.write("\n".join([json.dumps(i) for i in valid]))
66
 
67
 
process/tweet_sentiment_small.py CHANGED
@@ -1,4 +1,3 @@
1
- # TODO
2
  import json
3
  import os
4
  from random import shuffle, seed
 
 
1
  import json
2
  import os
3
  from random import shuffle, seed