|
"""TODO: Add a description here.""" |
|
|
|
from __future__ import absolute_import, division, print_function |
|
|
|
import json |
|
import os |
|
import datetime |
|
import pandas as pd |
|
import numpy as np |
|
from pathlib import Path |
|
|
|
|
|
import datasets |
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {A great new dataset}, |
|
authors={huggingface, Inc. |
|
}, |
|
year={2020} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """TODO: Add description""" |
|
|
|
|
|
_METADATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/metadata-2021-02-10.feather" |
|
|
|
_DATA_URL = "https://patentdiag.blob.core.windows.net/patent-data/distilled-2021-01-07.tar" |
|
_DATA_SUBFOLDER_NAME = 'distilled' |
|
|
|
|
|
|
|
|
|
|
|
|
|
RANDOM_STATE = 1729 |
|
|
|
|
|
|
|
_FEATURES = [ |
|
"patent_number", |
|
"decision", |
|
"title", |
|
"abstract", |
|
"claims", |
|
"background", |
|
"summary", |
|
"description", |
|
"cpc_label", |
|
"ipc_label", |
|
"filing_date", |
|
"patent_issue_date", |
|
"date_published", |
|
"examiner_id" |
|
] |
|
|
|
|
|
def str_to_date(s): |
|
"""A helper function to convert strings to dates""" |
|
return datetime.datetime.strptime(s, '%Y-%m-%d') |
|
|
|
|
|
class PatentsConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Patents""" |
|
|
|
def __init__( |
|
self, |
|
ipcr_label: str = None, |
|
cpc_label: str = None, |
|
train_filing_start_date: str = None, |
|
train_filing_end_date: str = None, |
|
val_filing_start_date: str = None, |
|
val_filing_end_date: str = None, |
|
query_string: str = None, |
|
val_set_balancer=False, |
|
uniform_split=False, |
|
train_only=False, |
|
**kwargs |
|
): |
|
""" |
|
If train_filing_end_date is None, then a random train-val split will be used. If it is |
|
specified, then the specified date range will be used for the split. If train_filing_end_date |
|
if specified and val_filing_start_date is not specifed, then val_filing_start_date defaults to |
|
train_filing_end_date. |
|
|
|
Args: |
|
ipcr_label: International Patent Classification code |
|
cpc_label: Cooperative Patent Classification code |
|
train_filing_start_date: Start date for patents in train set (and val set if random split is used) |
|
train_filing_end_date: End date for patents in train set |
|
val_filing_start_date: Start date for patents in val set |
|
val_filing_end_date: End date for patents in val set (and train set if random split is used) |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(**kwargs) |
|
self.ipcr_label = ipcr_label |
|
self.cpc_label = cpc_label |
|
self.train_filing_start_date = train_filing_start_date |
|
self.train_filing_end_date = train_filing_end_date |
|
self.val_filing_start_date = val_filing_start_date |
|
self.val_filing_end_date = val_filing_end_date |
|
self.query_string = query_string |
|
self.val_set_balancer = val_set_balancer |
|
self.uniform_split = uniform_split |
|
self.train_only = train_only |
|
|
|
|
|
class Patents(datasets.GeneratorBasedBuilder): |
|
"""TODO: Add description""" |
|
|
|
VERSION = datasets.Version("1.0.1") |
|
|
|
|
|
|
|
|
|
BUILDER_CONFIG_CLASS = PatentsConfig |
|
|
|
|
|
|
|
|
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{k: datasets.Value("string") for k in _FEATURES} |
|
), |
|
|
|
|
|
|
|
supervised_keys=("claims", "decision"), |
|
|
|
homepage="https://huggingface.co/great-new-dataset", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager): |
|
"""Returns SplitGenerators.""" |
|
print(f'Loading dataset with config: {self.config}') |
|
|
|
|
|
|
|
if self.config.data_files is None: |
|
print(f'Loading / downloading metadata file: {_METADATA_URL}') |
|
metadata_file = dl_manager.download_and_extract(_METADATA_URL) |
|
else: |
|
print(f'Using metadata file: {self.config.data_files}') |
|
metadata_file = Path(self.config.data_files) |
|
|
|
|
|
|
|
|
|
if self.config.data_dir is None: |
|
print('Loading / downloading data. This is a big file (360GB)!') |
|
json_dir = Path(dl_manager.download_and_extract(_DATA_URL)) |
|
|
|
json_dir = json_dir / _DATA_SUBFOLDER_NAME |
|
else: |
|
json_dir = Path(self.config.data_dir) |
|
|
|
|
|
print(f'Reading metadata file: {metadata_file}') |
|
df = pd.read_feather(metadata_file) |
|
|
|
|
|
if self.config.ipcr_label: |
|
print(f'Filtering by IPCR label: {self.config.ipcr_label}') |
|
df = df[df['main_ipcr_label'].str.startswith(self.config.ipcr_label)] |
|
elif self.config.cpc_label: |
|
print(f'Filtering by CPC label: {self.config.cpc_label}') |
|
df = df[df['main_cpc_label'].str.startswith(self.config.cpc_label)] |
|
|
|
|
|
|
|
if self.config.query_string: |
|
df = df.query(self.config.query_string) |
|
|
|
|
|
if self.config.train_only: |
|
if self.config.train_filing_start_date: |
|
print(f'Filtering by train filing start date: {self.config.train_filing_start_date}') |
|
df = df[df['filing_date'] >= self.config.train_filing_start_date] |
|
if self.config.train_filing_end_date: |
|
print(f'Filtering by train filing end date: {self.config.train_filing_end_date}') |
|
df = df[df['filing_date'] <= self.config.train_filing_end_date] |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs=dict( |
|
df=df, |
|
json_dir=json_dir, |
|
split='train', |
|
), |
|
) |
|
] |
|
|
|
|
|
if self.config.uniform_split: |
|
|
|
|
|
if self.config.train_filing_start_date: |
|
df = df[df['filing_date'] >= self.config.train_filing_start_date] |
|
if self.config.val_filing_end_date: |
|
df = df[df['filing_date'] <= self.config.val_filing_end_date] |
|
df = df.sample(frac=1.0, random_state=RANDOM_STATE) |
|
num_train_samples = int(len(df) * 0.85) |
|
train_df = df.iloc[0:num_train_samples] |
|
val_df = df.iloc[num_train_samples:-1] |
|
|
|
else: |
|
|
|
|
|
if self.config.train_filing_start_date: |
|
print(f'Filtering by train filing start date: {self.config.train_filing_start_date}') |
|
tdf = df[df['filing_date'] >= self.config.train_filing_start_date] |
|
if self.config.train_filing_end_date: |
|
print(f'Filtering by train filing end date: {self.config.train_filing_end_date}') |
|
train_df = tdf[tdf['filing_date'] <= self.config.train_filing_end_date] |
|
|
|
if self.config.val_filing_start_date: |
|
print(f'Filtering by val filing start date: {self.config.val_filing_start_date}') |
|
vdf = df[df['filing_date'] >= self.config.val_filing_start_date] |
|
if self.config.val_filing_end_date: |
|
print(f'Filtering by val filing end date: {self.config.val_filing_end_date}') |
|
val_df = vdf[vdf['filing_date'] <= self.config.val_filing_end_date] |
|
|
|
|
|
if self.config.val_set_balancer: |
|
rejected_df = val_df[val_df.status == 'REJECTED'] |
|
num_rejected = len(rejected_df) |
|
accepted_df = val_df[val_df.status == 'ACCEPTED'] |
|
num_accepted = len(accepted_df) |
|
if num_rejected < num_accepted: |
|
accepted_df = accepted_df.sample(frac=1.0, random_state=RANDOM_STATE) |
|
accepted_df = accepted_df[:num_rejected] |
|
else: |
|
rejected_df = rejected_df.sample(frac=1.0, random_state=RANDOM_STATE) |
|
rejected_df = rejected_df[:num_accepted] |
|
val_df = pd.concat([rejected_df, accepted_df]) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs=dict( |
|
df=train_df, |
|
json_dir=json_dir, |
|
split='train', |
|
), |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs=dict( |
|
df=val_df, |
|
json_dir=json_dir, |
|
split='val', |
|
), |
|
), |
|
] |
|
|
|
def _generate_examples(self, df, json_dir, split): |
|
""" Yields examples by loading JSON files containing patent applications. """ |
|
|
|
|
|
for id_, x in enumerate(df.itertuples()): |
|
|
|
|
|
application_number = x.application_number |
|
filepath = json_dir / (application_number + '.json') |
|
try: |
|
with open(filepath, 'r') as f: |
|
patent = json.load(f) |
|
except Exception as e: |
|
print('------------') |
|
print(f'ERROR WITH {filepath}\n') |
|
print(repr(e)) |
|
print() |
|
yield id_, {k: "error" for k in _FEATURES} |
|
|
|
|
|
decision = x.decision |
|
yield id_, { |
|
"patent_number": application_number, |
|
"decision": decision, |
|
"title": patent["title"], |
|
"abstract": patent["abstract"], |
|
"claims": patent["claims"], |
|
"description": patent["full_description"], |
|
"background": patent["background"], |
|
"summary": patent["summary"], |
|
"cpc_label": patent["main_cpc_label"], |
|
'filing_date': patent['filing_date'], |
|
'patent_issue_date': patent['patent_issue_date'], |
|
'date_published': patent['date_published'], |
|
'examiner_id': patent['examiner_id'], |
|
"ipc_label": patent["main_ipcr_label"], |
|
|
|
|
|
} |
|
|