asahi417 commited on
Commit
cc76dca
·
1 Parent(s): f504e2e
.gitattributes CHANGED
@@ -511,3 +511,7 @@ data/tweet_sentiment_small_test2_seed1/train.jsonl filter=lfs diff=lfs merge=lfs
511
  data/tweet_sentiment_small_test2_seed1/validation.jsonl filter=lfs diff=lfs merge=lfs -text
512
  data/tweet_sentiment_small_test3_seed0/validation.jsonl filter=lfs diff=lfs merge=lfs -text
513
  data/tweet_sentiment_small_test3_seed1/train.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
511
  data/tweet_sentiment_small_test2_seed1/validation.jsonl filter=lfs diff=lfs merge=lfs -text
512
  data/tweet_sentiment_small_test3_seed0/validation.jsonl filter=lfs diff=lfs merge=lfs -text
513
  data/tweet_sentiment_small_test3_seed1/train.jsonl filter=lfs diff=lfs merge=lfs -text
514
+ data/tweet_sentiment_small/test_1.jsonl filter=lfs diff=lfs merge=lfs -text
515
+ data/tweet_sentiment_small/test_2.jsonl filter=lfs diff=lfs merge=lfs -text
516
+ data/tweet_sentiment_small/test_3.jsonl filter=lfs diff=lfs merge=lfs -text
517
+ data/tweet_sentiment_small/test_4.jsonl filter=lfs diff=lfs merge=lfs -text
data/tweet_sentiment_small/test_1.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09b915a486b09ab99730b2177ed285467e9db60f2c782768411d241d28f50a21
3
+ size 47187
data/tweet_sentiment_small/test_2.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bf7c917640a83060e28a6450999bca55da3fead2a522993c1c721380700ffc3
3
+ size 47265
data/tweet_sentiment_small/test_3.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26bdf025b4e171101f303be4dcead3e75e5e7461124132abaab9ed7bdc6ac3c7
3
+ size 47824
data/tweet_sentiment_small/test_4.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:395be43c04fa28853568e1c0771e450e0aac1dfaf55b986ed77cde76662fb51e
3
+ size 46171
process/tweet_sentiment_small.py CHANGED
@@ -23,6 +23,16 @@ with open("data/tweet_sentiment/validation.jsonl") as f:
23
  os.makedirs(f"data/tweet_sentiment_small", exist_ok=True)
24
  with open(f"data/tweet_sentiment_small/test.jsonl", "w") as f:
25
  f.write("\n".join([json.dumps(i) for i in test]))
 
 
 
 
 
 
 
 
 
 
26
  with open(f"data/tweet_sentiment_small/validation.jsonl", "w") as f:
27
  f.write("\n".join([json.dumps(i) for i in validation]))
28
 
 
23
  os.makedirs(f"data/tweet_sentiment_small", exist_ok=True)
24
  with open(f"data/tweet_sentiment_small/test.jsonl", "w") as f:
25
  f.write("\n".join([json.dumps(i) for i in test]))
26
+
27
+ with open(f"data/tweet_sentiment_small/test_1.jsonl", "w") as f:
28
+ f.write("\n".join([json.dumps(i) for i in test_1]))
29
+ with open(f"data/tweet_sentiment_small/test_2.jsonl", "w") as f:
30
+ f.write("\n".join([json.dumps(i) for i in test_2]))
31
+ with open(f"data/tweet_sentiment_small/test_3.jsonl", "w") as f:
32
+ f.write("\n".join([json.dumps(i) for i in test_3]))
33
+ with open(f"data/tweet_sentiment_small/test_4.jsonl", "w") as f:
34
+ f.write("\n".join([json.dumps(i) for i in test_4]))
35
+
36
  with open(f"data/tweet_sentiment_small/validation.jsonl", "w") as f:
37
  f.write("\n".join([json.dumps(i) for i in validation]))
38
 
statistics.py CHANGED
@@ -5,7 +5,7 @@ from transformers import AutoTokenizer
5
 
6
  tokenizer = AutoTokenizer.from_pretrained("roberta-base")
7
  stats = []
8
- for i in ["nerd_temporal", "ner_temporal", "topic_temporal", "sentiment_temporal"]:
9
  for s in ["train", "validation", "test"]:
10
  dataset = load_dataset("tweettemposhift/tweet_temporal_shift", i, split=s)
11
  df = dataset.to_pandas()
 
5
 
6
  tokenizer = AutoTokenizer.from_pretrained("roberta-base")
7
  stats = []
8
+ for i in ["nerd_temporal", "ner_temporal", "topic_temporal", "sentiment_small_temporal"]:
9
  for s in ["train", "validation", "test"]:
10
  dataset = load_dataset("tweettemposhift/tweet_temporal_shift", i, split=s)
11
  df = dataset.to_pandas()