|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
|
|
import datasets |
|
|
|
_DESCRIPTION = """\ |
|
The dataset contains the annual report of US public firms filing with the SEC EDGAR system. |
|
Each annual report (10K filing) is broken into 20 sections. Each section is split into individual sentences. |
|
Sentiment labels are provided on a per filing basis from the market reaction around the filing data. |
|
Additional metadata for each filing is included in the dataset. |
|
""" |
|
|
|
_LICENSE = "apache-2.0" |
|
|
|
_NOS_SHARDS = 10 |
|
|
|
_SMALL_THRESHOLD_TRAIN = 200_000 |
|
|
|
_SMALL_THRESHOLD_OTHERS = 20_000 |
|
|
|
_URLS = {item: ["data/"+item+"/shard_"+str(shard)+".jsonl" for shard in range(_NOS_SHARDS)] for item in ['test', 'train', 'validate']} |
|
|
|
_ALL_FEATURES = { |
|
"cik": datasets.Value("string"), |
|
"sentence": datasets.Value("string"), |
|
"section": datasets.ClassLabel(num_classes=20, |
|
names=['section_1', 'section_10', |
|
'section_11', 'section_12', |
|
'section_13', 'section_14', |
|
'section_15', 'section_1A', |
|
'section_1B', 'section_2', |
|
'section_3', 'section_4', |
|
'section_5', 'section_6', |
|
'section_7', 'section_7A', |
|
'section_8', 'section_9', |
|
'section_9A', 'section_9B']), |
|
"labels": { |
|
"1d": datasets.ClassLabel(num_classes=2, names=["positive", "negative"]), |
|
"5d": datasets.ClassLabel(num_classes=2, names=["positive", "negative"]), |
|
"30d": datasets.ClassLabel(num_classes=2, names=["positive", "negative"]), |
|
}, |
|
"filingDate": datasets.Value("string"), |
|
"name": datasets.Value("string"), |
|
"tickers": [datasets.Value("string")], |
|
"exchanges": [datasets.Value("string")], |
|
"entityType": datasets.Value("string"), |
|
"sic": datasets.Value("string"), |
|
"stateOfIncorporation": datasets.Value("string"), |
|
"tickerCount": datasets.Value("int32"), |
|
"acceptanceDateTime": datasets.Value("string"), |
|
"form": datasets.Value("string"), |
|
"reportDate": datasets.Value("string"), |
|
"returns": { |
|
"1d": { |
|
"closePriceEndDate": datasets.Value("float32"), |
|
"closePriceStartDate": datasets.Value("float32"), |
|
"endDate": datasets.Value("string"), |
|
"startDate": datasets.Value("string"), |
|
"ret": datasets.Value("float32"), |
|
}, |
|
"5d": { |
|
"closePriceEndDate": datasets.Value("float32"), |
|
"closePriceStartDate": datasets.Value("float32"), |
|
"endDate": datasets.Value("string"), |
|
"startDate": datasets.Value("string"), |
|
"ret": datasets.Value("float32"), |
|
}, |
|
"30d": { |
|
"closePriceEndDate": datasets.Value("float32"), |
|
"closePriceStartDate": datasets.Value("float32"), |
|
"endDate": datasets.Value("string"), |
|
"startDate": datasets.Value("string"), |
|
"ret": datasets.Value("float32"), |
|
} |
|
}, |
|
} |
|
|
|
|
|
class FinancialReportsSec(datasets.GeneratorBasedBuilder): |
|
|
|
VERSION = datasets.Version("1.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="lite", version=VERSION, description="This returns the dataset with only the critical data needed for analysis."), |
|
datasets.BuilderConfig(name="full", version=VERSION, description="This returns the dataset with all metadata included."), |
|
datasets.BuilderConfig(name="small_lite", version=VERSION, description="This returns a smaller version of the dataset with only the critical data needed for analysis."), |
|
datasets.BuilderConfig(name="small_full", version=VERSION, description="This returns a smaller version of the dataset with all metadata included."), |
|
] |
|
|
|
def _info(self): |
|
|
|
lite_features = datasets.Features({k: v for k, v in _ALL_FEATURES.items() if k in ["cik", "sentence", "section", "labels", "filingDate"]}) |
|
full_features = datasets.Features(_ALL_FEATURES) |
|
|
|
features = full_features if self.config.name == "full" or self.config.name == 'small_full' else lite_features |
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=features, |
|
|
|
license=_LICENSE, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
urls = _URLS |
|
data_dir = dl_manager.download_and_extract(urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
|
|
gen_kwargs={ |
|
"filepaths": data_dir["train"], |
|
"split": "train", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
|
|
gen_kwargs={ |
|
"filepaths": data_dir["validate"], |
|
"split": "validate", |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
|
|
gen_kwargs={ |
|
"filepaths": data_dir["test"], |
|
"split": "test" |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, filepaths, split): |
|
reads = 0 |
|
threshold = _SMALL_THRESHOLD_TRAIN if split == 'train' else _SMALL_THRESHOLD_OTHERS |
|
for filepath in filepaths: |
|
with open(filepath, encoding="utf-8") as f: |
|
for firmIdx, row in enumerate(f): |
|
data = json.loads(row) |
|
for filing in data["filings"]: |
|
for sec_id, section in filing["report"].items(): |
|
for idx, sentence in enumerate(section): |
|
reads += 1 |
|
if self.config.name == 'small_full' or self.config.name == 'small_lite': |
|
if reads > threshold: |
|
return None |
|
key = data["cik"]+'_'+filing["form"]+'_'+filing["reportDate"].split('-')[0]+'_'+sec_id+'_'+str(idx) |
|
if self.config.name == "lite" or self.config.name == 'small_lite': |
|
yield key, { |
|
"cik": data["cik"], |
|
"sentence": sentence, |
|
"section": sec_id, |
|
"labels": filing["labels"], |
|
"filingDate": filing["filingDate"], |
|
} |
|
else: |
|
yield key, { |
|
"cik": data["cik"], |
|
"sentence": sentence, |
|
"section": sec_id, |
|
"labels": filing["labels"], |
|
"filingDate": filing["filingDate"], |
|
"name": data["name"], |
|
"tickers": data["tickers"], |
|
"exchanges": data["exchanges"], |
|
"entityType": data["entityType"], |
|
"sic": data["sic"], |
|
"stateOfIncorporation": data["stateOfIncorporation"], |
|
"tickerCount": data["tickerCount"], |
|
"acceptanceDateTime": filing["acceptanceDateTime"], |
|
"form": filing["form"], |
|
"reportDate": filing["reportDate"], |
|
"returns": filing["returns"], |
|
} |