asahi417 commited on
Commit
7784c61
·
1 Parent(s): bc6fa4c
Files changed (3) hide show
  1. experiments/main.sh +5 -5
  2. process/tweet_nerd.py +5 -0
  3. statistics.py +5 -3
experiments/main.sh CHANGED
@@ -12,16 +12,16 @@ MODEL="jhu-clsp/bernice"
12
 
13
 
14
  # NER
15
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_temporal"
16
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed0"
17
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed0"
18
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed0"
19
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed0"
20
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed1"
21
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed1"
22
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed1"
23
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed1"
24
- python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed2"
25
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed2"
26
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed2"
27
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed2"
 
12
 
13
 
14
  # NER
15
+ python model_finetuning_ner.py -m "${MODEL}" -d "ner_temporal" --skip-train --skip-test
16
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed0"
17
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed0"
18
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed0"
19
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed0"
20
+ python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed1" --skip-train --skip-test
21
+ python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed1" --skip-train --skip-test
22
+ python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed1" --skip-train --skip-test
23
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed1"
24
+ python model_finetuning_ner.py -m "${MODEL}" -d "ner_random0_seed2" --skip-train --skip-test
25
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random1_seed2"
26
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random2_seed2"
27
  python model_finetuning_ner.py -m "${MODEL}" -d "ner_random3_seed2"
process/tweet_nerd.py CHANGED
@@ -23,8 +23,13 @@ while True:
23
  if dist_date[:n].sum() > total_n/2:
24
  break
25
  split_date = dist_date.index[n]
 
 
26
  train = df[df["date_dt"] <= split_date]
27
  test = df[df["date_dt"] > split_date]
 
 
 
28
  train.pop("date_dt")
29
  test.pop("date_dt")
30
  train = list(train.T.to_dict().values())
 
23
  if dist_date[:n].sum() > total_n/2:
24
  break
25
  split_date = dist_date.index[n]
26
+ input(split_date)
27
+
28
  train = df[df["date_dt"] <= split_date]
29
  test = df[df["date_dt"] > split_date]
30
+ print(train.date_dt.min(), train.date_dt.max())
31
+ print(test.date_dt.min(), test.date_dt.max())
32
+ input()
33
  train.pop("date_dt")
34
  test.pop("date_dt")
35
  train = list(train.T.to_dict().values())
statistics.py CHANGED
@@ -5,23 +5,25 @@ from transformers import AutoTokenizer
5
 
6
  tokenizer = AutoTokenizer.from_pretrained("roberta-base")
7
  stats = []
8
- for i in ["topic_temporal", "nerd_temporal", "ner_temporal"]:
9
  # for s in ["train", "validation", "test", "test_1", "test_2", "test_3", "test_4"]:
10
  for s in ["train", "validation", "test"]:
11
- dataset = load_dataset("tweettemposhift/tweet_temporal_shift", i, split=s, download_mode="force_redownload")
12
  df = dataset.to_pandas()
13
  if i != "nerd_temporal":
14
  token_length = [len(tokenizer.tokenize(t)) for t in dataset['text']]
15
  else:
16
  token_length = [len(tokenizer.tokenize(f"{d['target']} {tokenizer.sep_token} {d['definition']} {tokenizer.sep_token} {d['text']}")) for d in dataset]
17
  token_length_in = [i for i in token_length if i <= 126]
 
18
  stats.append({
19
  "data": i,
20
  "split": s,
21
  "size": len(dataset),
22
  "size (token length < 128)": len(token_length_in),
23
  "mean_token_length": sum(token_length)/len(token_length),
24
- "date": f'{str(pd.to_datetime(df.date).min()).split(" ")[0]} / {str(pd.to_datetime(df.date).max()).split(" ")[0]}',
25
  })
 
26
  df = pd.DataFrame(stats)
27
  print(df)
 
5
 
6
  tokenizer = AutoTokenizer.from_pretrained("roberta-base")
7
  stats = []
8
+ for i in ["nerd_temporal", "ner_temporal", "topic_temporal"]:
9
  # for s in ["train", "validation", "test", "test_1", "test_2", "test_3", "test_4"]:
10
  for s in ["train", "validation", "test"]:
11
+ dataset = load_dataset("tweettemposhift/tweet_temporal_shift", i, split=s)
12
  df = dataset.to_pandas()
13
  if i != "nerd_temporal":
14
  token_length = [len(tokenizer.tokenize(t)) for t in dataset['text']]
15
  else:
16
  token_length = [len(tokenizer.tokenize(f"{d['target']} {tokenizer.sep_token} {d['definition']} {tokenizer.sep_token} {d['text']}")) for d in dataset]
17
  token_length_in = [i for i in token_length if i <= 126]
18
+ date = pd.to_datetime(df.date).sort_values().values
19
  stats.append({
20
  "data": i,
21
  "split": s,
22
  "size": len(dataset),
23
  "size (token length < 128)": len(token_length_in),
24
  "mean_token_length": sum(token_length)/len(token_length),
25
+ "date": f'{str(date[0]).split(" ")[0]} / {str(date[-1]).split(" ")[0]}',
26
  })
27
+ break
28
  df = pd.DataFrame(stats)
29
  print(df)