rain1024 commited on
Commit
32de449
·
1 Parent(s): 7de6350
Files changed (4) hide show
  1. README.md +1 -1
  2. data/UTS_Text_v1.txt +0 -0
  3. eval.py +18 -0
  4. generate_dataset.py +12 -6
README.md CHANGED
@@ -39,7 +39,7 @@ task_categories:
39
 
40
  The UTS_Text_v1 dataset is a collection of 10,000 sentences sourced from various news articles.
41
 
42
- Out of the 10,000 sentences in the dataset, 5,000 sentences have a length greater than 50, while the other 5,000 sentences have a length ranging from 20 to 50. This distribution of sentence lengths provides a diverse range of text samples that can be used to train and test natural language processing models.
43
 
44
  ### Dataset Summary
45
 
 
39
 
40
  The UTS_Text_v1 dataset is a collection of 10,000 sentences sourced from various news articles.
41
 
42
+ Out of the 10,000 sentences in the dataset, 5,000 sentences have a length ranging from 50 to 150, while the other 5,000 sentences have a length ranging from 20 to 50. This distribution of sentence lengths provides a diverse range of text samples that can be used to train and test natural language processing models.
43
 
44
  ### Dataset Summary
45
 
data/UTS_Text_v1.txt CHANGED
The diff for this file is too large to render. See raw diff
 
eval.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datasets import load_dataset
2
+ import matplotlib.pyplot as plt
3
+
4
+ dataset = load_dataset("undertheseanlp/UTS_Text_v1")
5
+ sentences = dataset["train"]["text"]
6
+
7
+ # compute histogram of sentence lengths with bin size = 10
8
+ lengths = [len(s) for s in sentences]
9
+ plt.hist(lengths, bins=range(0, max(lengths), 10))
10
+ plt.show()
11
+
12
+ # n_sample = 0
13
+ # for s in sentences:
14
+ # if len(s) > 150:
15
+ # print(s)
16
+ # n_sample += 1
17
+ # if n_sample == 10:
18
+ # break
generate_dataset.py CHANGED
@@ -3,17 +3,23 @@ from underthesea.file_utils import DATASETS_FOLDER
3
  import random
4
 
5
  random.seed(10)
6
- # sampling data
7
  text_file = join(DATASETS_FOLDER, "VNESES", "VNESEScorpus.txt")
8
  with open(text_file) as f:
9
  lines = f.read().splitlines()
10
  NUM_LONG_TOKENS = 50
11
  NUM_SHORT_TOKENS = 20
12
- long_lines = [
13
- line
14
- for line in lines
15
- if len(line) >= NUM_LONG_TOKENS and line[0].isupper() and line[-1] == "."
16
- ]
 
 
 
 
 
 
 
17
  # get random 1000 lines
18
  random_long_lines = random.sample(long_lines, 5000)
19
  for line in random_long_lines[:20]:
 
3
  import random
4
 
5
  random.seed(10)
 
6
  text_file = join(DATASETS_FOLDER, "VNESES", "VNESEScorpus.txt")
7
  with open(text_file) as f:
8
  lines = f.read().splitlines()
9
  NUM_LONG_TOKENS = 50
10
  NUM_SHORT_TOKENS = 20
11
+ NUM_MAX_TOKENS = 150
12
+
13
+
14
+ def longline_conditions(line):
15
+ if len(line) < NUM_LONG_TOKENS or len(line) > NUM_MAX_TOKENS:
16
+ return False
17
+ if not (line[0].isupper() and line[-1] == "."):
18
+ return False
19
+ return True
20
+
21
+
22
+ long_lines = [line for line in lines if longline_conditions(line)]
23
  # get random 1000 lines
24
  random_long_lines = random.sample(long_lines, 5000)
25
  for line in random_long_lines[:20]: