File size: 3,796 Bytes
a4b6d0b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import json
import xml.etree.ElementTree as ET
from bs4 import BeautifulSoup
import requests
import threading
import utils

def fetch_links(link):
    links = []
    xml_data = utils.fetch_page(link)
    items = ET.fromstring(xml_data).findall('channel/item')
    for item in items:
        link = item.find('link').text
        links.append(link)
    return links

def fetch_all_links():
    category_link_data = {
        "Earth": "https://phys.org/rss-feed/breaking/earth-news/",
        "Science": "https://phys.org/rss-feed/breaking/science-news/",
        "Nano-technology": "https://phys.org/rss-feed/breaking/nanotech-news/",
        "Physics": "https://phys.org/rss-feed/breaking/physics-news/",
        "Astronomy & Space": "https://phys.org/rss-feed/breaking/space-news/",
        "Biology": "https://phys.org/rss-feed/breaking/biology-news/",
        "Chemistry": "https://phys.org/rss-feed/breaking/chemistry-news/",
    }
    sd_links_data = {}
    for category, link in category_link_data.items():
        links = fetch_links(link)
        sd_links_data[category] = links
    return json.dumps(sd_links_data, indent=4, ensure_ascii=False)

def fetch_dois():
    doi_data = {}
    data = json.loads(fetch_all_links())
    for topic, links in data.items():
        doi_list = []
        for link in links:
            page_content = utils.fetch_page(link)
            page_datas = BeautifulSoup(page_content, 'html.parser').find_all("div",class_="article-main__more")
            for page_data in page_datas:
                doi_link = page_data.find("a", attrs={"data-doi":"1"})
                if doi_link:
                    doi = doi_link.text.split('DOI: ')[-1]
                    if doi.startswith('10.'):
                        doi_list.append(doi)
        doi_data[topic] = doi_list
    return json.dumps(doi_data, indent=4, ensure_ascii=False)

def fetch_doi_data():
    result = []
    def fetch_and_store():
        result.append(fetch_dois())
    thread = threading.Thread(target=fetch_and_store)
    thread.start()
    thread.join()
    return result[0] if result else {}

def doi_to_pmc():
    data = json.loads(fetch_doi_data())
    pmc_data = {}
    for topic, dois in data.items():
        if not dois:
            continue
        doi_list = ",".join(dois)
        try:
            url = f"https://www.ncbi.nlm.nih.gov/pmc/utils/idconv/v1.0/[email protected]&ids={doi_list}&format=json"
            doi_pmc_data = requests.get(url).json()

            if doi_pmc_data['status'] == 'ok':
                pmc_list = [record['pmcid'] for record in doi_pmc_data['records'] if 'pmcid' in record and record.get('live', True)]
                pmc_data[topic] = pmc_list[:2]
        except Exception as e:
            print(f"Error: {str(e)}")
    return json.dumps(pmc_data, indent=4, ensure_ascii=False)

def extract_phys_data():
    if not utils.download_datafile('phys.txt'):
        raise Exception("Failed to download datafile")
    pmc_data = {}
    pmcid_data = json.loads(doi_to_pmc())
    for topic, pmcids in pmcid_data.items():
        pmc_ids = []
        for pmcid in pmcids:
            if len(pmc_ids) >= 2:
                break
            if not utils.check_data_in_file(pmcid, 'phys.txt'):
                utils.write_data_to_file(pmcid, 'phys.txt')
                pmc_ids.append(pmcid)
        pmc_data[topic] = {"ids": pmc_ids, "count": len(pmc_ids)}
    if not utils.upload_datafile('phys.txt'):
        raise Exception("Failed to upload datafile")
    return json.dumps(pmc_data, indent=4, ensure_ascii=False)

if __name__ == "__main__":
    data = extract_phys_data()
    with open('phys_data.json', 'w') as f:
        f.write(data)