Dannong Wang
commited on
Commit
·
734e80e
1
Parent(s):
d642557
add dataset
Browse files- README.md +8 -0
- generate_xbrl_extract_hf_split.py +8 -8
README.md
CHANGED
@@ -27,3 +27,11 @@ configs:
|
|
27 |
---
|
28 |
|
29 |
# XBRL Extraction Dataset
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
---
|
28 |
|
29 |
# XBRL Extraction Dataset
|
30 |
+
|
31 |
+
The is the official dataset introduced in the paper [FinLoRA: Benchmarking LoRA Methods for Fine-Tuning LLMs on Financial Datasets](https://arxiv.org/abs/2505.19819)
|
32 |
+
|
33 |
+
<p>
|
34 |
+
<a href="https://huggingface.co/spaces/wangd12/xbrl_llm_demo"><img src="https://raw.githubusercontent.com/wangd12rpi/FinLoRA/main/static/demo_btn.svg"></a>
|
35 |
+
<a href="https://huggingface.co/spaces/wangd12/xbrl_llm_demo"><img src="https://raw.githubusercontent.com/wangd12rpi/FinLoRA/main/static/models_btn.svg"></a>
|
36 |
+
<a href="https://finlora-docs.readthedocs.io/en/latest/"><img src="https://raw.githubusercontent.com/wangd12rpi/FinLoRA/main/static/doc_btn.svg"></a>
|
37 |
+
</p>
|
generate_xbrl_extract_hf_split.py
CHANGED
@@ -100,14 +100,14 @@ def gen_xbrl(cat):
|
|
100 |
|
101 |
print(f"train size: {len(train_data)}, test size: {len(test_data)}\n")
|
102 |
|
103 |
-
with open(f"{cat}_test.csv", "w", newline="") as f:
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
with open(f"{cat}_train.csv", "w", newline="") as f:
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
|
112 |
return train_data, test_data
|
113 |
|
|
|
100 |
|
101 |
print(f"train size: {len(train_data)}, test size: {len(test_data)}\n")
|
102 |
|
103 |
+
# with open(f"{cat}_test.csv", "w", newline="") as f:
|
104 |
+
# w = csv.DictWriter(f, test_data[0].keys(), quoting=csv.QUOTE_ALL)
|
105 |
+
# w.writeheader()
|
106 |
+
# w.writerows(test_data)
|
107 |
+
# with open(f"{cat}_train.csv", "w", newline="") as f:
|
108 |
+
# w = csv.DictWriter(f, train_data[0].keys(), quoting=csv.QUOTE_ALL)
|
109 |
+
# w.writeheader()
|
110 |
+
# w.writerows(train_data)
|
111 |
|
112 |
return train_data, test_data
|
113 |
|