Datasets:

License:
tmvllrrl commited on
Commit
154a40d
·
verified ·
1 Parent(s): 8711427

Upload tvillarr.py

Browse files
Files changed (1) hide show
  1. tvillarr.py +129 -0
tvillarr.py ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json, re
2
+ import requests
3
+ import sys, gzip
4
+ import bibtexparser
5
+
6
+ from tqdm import tqdm
7
+ from urlextract import URLExtract
8
+
9
+ utid: str = 'tvillarr'
10
+ base: dict[str, str] = {
11
+ 'model':'https://huggingface.co/',
12
+ 'data': 'https://huggingface.co/datasets/',
13
+ 'source': 'https://raw.githubusercontent.com/'
14
+ }
15
+
16
+ # Path to README file in a HuggingFace repo
17
+ hf_readme_path: str = '/raw/main/README.md'
18
+ # Path to raw README file in GitHub repo
19
+ gh_readme_path: str = '/master/README.md'
20
+ gh_readme_path_alt: str = '/main/README.md'
21
+
22
+ extU = URLExtract()
23
+
24
+ #r1\b(10[.][0-9]{4,}(?:[.][0-9]+)*/(?:(?!["&\'<>])[[:graph:]])+)\b'
25
+ DOIpattern = r'\b(10\.\d{4,9}\/[-._;()/:A-Z0-9]+)\b/i'
26
+
27
+ def extract_urls(content: str) -> list:
28
+ '''
29
+ Extracts URLs from provided text content
30
+
31
+ Args:
32
+ content (str): text to extract URLs from
33
+
34
+ Returns:
35
+ list: list of URLs found
36
+ '''
37
+
38
+ res = extU.find_urls(content)
39
+ return res
40
+
41
+ def extract_dois(content: str) -> list:
42
+ '''
43
+ Extracts DOIs from provided content following the regexp
44
+
45
+ Args:
46
+ content (str): text to extract DOIs from
47
+
48
+ Returns:
49
+ list: list of DOIs found
50
+ '''
51
+
52
+ res = re.findall(DOIpattern, content)
53
+ return res
54
+
55
+ def extract_bibs(content: str) -> list:
56
+ '''
57
+ Extracts BIB entries from content following the regexp
58
+
59
+ Args:
60
+ content (str): text to extract BIBs from
61
+
62
+ Returns:
63
+ list: list of BIBS found
64
+ '''
65
+
66
+ try:
67
+ b = bibtexparser.loads(content)
68
+ bibs = b.entries
69
+ except:
70
+ bibs = []
71
+
72
+ return bibs
73
+
74
+ def run(source_type: str) -> None:
75
+ with open(f"input/{utid}_{source_type}", 'r') as f:
76
+ for line in tqdm(f):
77
+ line = line.strip()
78
+
79
+ # Source (GitHub repos)
80
+ if source_type == "source":
81
+ # Splitting the line as source file format is num;repo
82
+ npapers, line = line.split(';')
83
+
84
+ # Not using "github.com/" from line
85
+ current_url = f'{base[source_type]}{line[11:]}{gh_readme_path}'
86
+
87
+ # Model and Data
88
+ else:
89
+ current_url = f'{base[source_type]}{line}{hf_readme_path}'
90
+
91
+ r = requests.get(current_url)
92
+
93
+ # If the request isn't OK; mainly used for GitHub
94
+ if r.status_code != 200:
95
+ if source_type == "source":
96
+ # Trying to get main branch instead of master branch
97
+ current_url = f'{base[source_type]}{line[11:]}{gh_readme_path_alt}'
98
+ r = requests.get(current_url)
99
+
100
+ # If we're okay with new URL, then continue on
101
+ if r.status_code == 200:
102
+ continue
103
+ # If not, just return URL back to original URL and request
104
+ else:
105
+ current_url = f'{base[source_type]}{line[11:]}{gh_readme_path}'
106
+ r = requests.get(current_url)
107
+
108
+ # print(f"Current URL: {current_url}")
109
+ content = r.text
110
+
111
+ urls = extract_urls(content)
112
+ # print(f"URLS: {urls}")
113
+ dois = extract_dois(content)
114
+ # print(f"DOIs: {dois}")
115
+ bibs = extract_bibs(content)
116
+ # print(f"BIBs: {bibs}")
117
+
118
+ res = {'ID': line, 'type': source_type, 'url': current_url, 'content': content, 'links': urls, 'dois': dois, 'bibs': bibs}
119
+
120
+ out = json.dumps(res, ensure_ascii=False)
121
+ output_file.write((out+"\n").encode())
122
+
123
+
124
+ output_file = gzip.open(f"output/{utid}.json.gz", 'w')
125
+
126
+ run("model")
127
+ run("data")
128
+ run("source")
129
+