eskayML commited on
Commit
a0f25c4
·
verified ·
1 Parent(s): 4616bfd

eskayML/electra_interview_new

Browse files
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  license: apache-2.0
3
  base_model: mrm8488/electra-small-finetuned-squadv2
4
  tags:
@@ -17,8 +18,8 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [mrm8488/electra-small-finetuned-squadv2](https://huggingface.co/mrm8488/electra-small-finetuned-squadv2) on the None dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 1.6205
21
- - Accuracy: 0.8981
22
 
23
  ## Model description
24
 
@@ -43,37 +44,27 @@ The following hyperparameters were used during training:
43
  - seed: 42
44
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
45
  - lr_scheduler_type: linear
46
- - num_epochs: 20
47
 
48
  ### Training results
49
 
50
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
51
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
52
- | No log | 1.0 | 54 | 2.2879 | 0.1481 |
53
- | No log | 2.0 | 108 | 2.2626 | 0.2963 |
54
- | No log | 3.0 | 162 | 2.2317 | 0.3704 |
55
- | No log | 4.0 | 216 | 2.1939 | 0.3704 |
56
- | No log | 5.0 | 270 | 2.1577 | 0.3796 |
57
- | No log | 6.0 | 324 | 2.0902 | 0.5741 |
58
- | No log | 7.0 | 378 | 2.0270 | 0.7407 |
59
- | No log | 8.0 | 432 | 1.9740 | 0.7222 |
60
- | No log | 9.0 | 486 | 1.9228 | 0.7685 |
61
- | 2.1738 | 10.0 | 540 | 1.8774 | 0.7778 |
62
- | 2.1738 | 11.0 | 594 | 1.8201 | 0.8519 |
63
- | 2.1738 | 12.0 | 648 | 1.7920 | 0.8611 |
64
- | 2.1738 | 13.0 | 702 | 1.7574 | 0.8333 |
65
- | 2.1738 | 14.0 | 756 | 1.7125 | 0.8704 |
66
- | 2.1738 | 15.0 | 810 | 1.6907 | 0.8611 |
67
- | 2.1738 | 16.0 | 864 | 1.6571 | 0.8981 |
68
- | 2.1738 | 17.0 | 918 | 1.6424 | 0.8981 |
69
- | 2.1738 | 18.0 | 972 | 1.6284 | 0.8981 |
70
- | 1.7996 | 19.0 | 1026 | 1.6225 | 0.8981 |
71
- | 1.7996 | 20.0 | 1080 | 1.6205 | 0.8981 |
72
 
73
 
74
  ### Framework versions
75
 
76
- - Transformers 4.41.2
77
- - Pytorch 2.3.0+cu121
78
- - Datasets 2.20.0
79
  - Tokenizers 0.19.1
 
1
  ---
2
+ library_name: transformers
3
  license: apache-2.0
4
  base_model: mrm8488/electra-small-finetuned-squadv2
5
  tags:
 
18
 
19
  This model is a fine-tuned version of [mrm8488/electra-small-finetuned-squadv2](https://huggingface.co/mrm8488/electra-small-finetuned-squadv2) on the None dataset.
20
  It achieves the following results on the evaluation set:
21
+ - Loss: 2.3959
22
+ - Accuracy: 0.2675
23
 
24
  ## Model description
25
 
 
44
  - seed: 42
45
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
  - lr_scheduler_type: linear
47
+ - num_epochs: 10
48
 
49
  ### Training results
50
 
51
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
52
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
53
+ | No log | 1.0 | 380 | 2.6074 | 0.2266 |
54
+ | 2.7429 | 2.0 | 760 | 2.4872 | 0.2266 |
55
+ | 2.5203 | 3.0 | 1140 | 2.4483 | 0.2266 |
56
+ | 2.4479 | 4.0 | 1520 | 2.4349 | 0.2266 |
57
+ | 2.4479 | 5.0 | 1900 | 2.4114 | 0.2306 |
58
+ | 2.3919 | 6.0 | 2280 | 2.3933 | 0.2424 |
59
+ | 2.2714 | 7.0 | 2660 | 2.3914 | 0.2530 |
60
+ | 2.1536 | 8.0 | 3040 | 2.3968 | 0.2714 |
61
+ | 2.1536 | 9.0 | 3420 | 2.3913 | 0.2648 |
62
+ | 2.1058 | 10.0 | 3800 | 2.3959 | 0.2675 |
 
 
 
 
 
 
 
 
 
 
63
 
64
 
65
  ### Framework versions
66
 
67
+ - Transformers 4.44.2
68
+ - Pytorch 2.4.1+cu121
69
+ - Datasets 3.0.1
70
  - Tokenizers 0.19.1
config.json CHANGED
@@ -10,30 +10,50 @@
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 256,
12
  "id2label": {
13
- "0": "Provider Characteristics",
14
- "1": "Finanicial Impact",
15
- "2": "Imaging modalities in general",
16
- "3": "Clinical utility & efficiency-Provider perspective",
17
- "4": "Health System Characteristics",
18
- "5": "Training",
19
- "6": "Value equation",
20
- "7": "Workflow related problems",
21
- "8": "Credentialing / Quality Assurance Infrastructure",
22
- "9": "Patient/Physican interaction in LUS"
 
 
 
 
 
 
 
 
 
 
23
  },
24
  "initializer_range": 0.02,
25
  "intermediate_size": 1024,
26
  "label2id": {
27
- "Clinical utility & efficiency-Provider perspective": 3,
28
- "Credentialing / Quality Assurance Infrastructure": 8,
29
- "Finanicial Impact": 1,
30
- "Health System Characteristics": 4,
31
- "Imaging modalities in general": 2,
32
- "Patient/Physican interaction in LUS": 9,
33
- "Provider Characteristics": 0,
34
- "Training": 5,
35
- "Value equation": 6,
36
- "Workflow related problems": 7
 
 
 
 
 
 
 
 
 
 
37
  },
38
  "layer_norm_eps": 1e-12,
39
  "max_position_embeddings": 512,
@@ -48,7 +68,7 @@
48
  "summary_type": "first",
49
  "summary_use_proj": true,
50
  "torch_dtype": "float32",
51
- "transformers_version": "4.41.2",
52
  "type_vocab_size": 2,
53
  "use_cache": true,
54
  "vocab_size": 30522
 
10
  "hidden_dropout_prob": 0.1,
11
  "hidden_size": 256,
12
  "id2label": {
13
+ "0": "ALGORITHM",
14
+ "1": "COMANAGEMENT",
15
+ "2": "COMPETING PRIORITIES",
16
+ "3": "COST",
17
+ "4": "COVID",
18
+ "5": "EHR",
19
+ "6": "GUIDELINES",
20
+ "7": "HFREF COMFORT MANAGING",
21
+ "8": "INFO NEEDS",
22
+ "9": "INTERNAL & EXTERNAL SUPPORT BAR",
23
+ "10": "JUICY QUOTE",
24
+ "11": "MAGIC WAND",
25
+ "12": "MED MANAGEMENT",
26
+ "13": "OTHER",
27
+ "14": "PATIENT DIGITAL TOOLS",
28
+ "15": "PATIENTS",
29
+ "16": "PROVIDER EDUCATION",
30
+ "17": "ROLE OF OTHER STAFF",
31
+ "18": "TIME CONSTRAINTS",
32
+ "19": "WORKFLOW"
33
  },
34
  "initializer_range": 0.02,
35
  "intermediate_size": 1024,
36
  "label2id": {
37
+ "ALGORITHM": 0,
38
+ "COMANAGEMENT": 1,
39
+ "COMPETING PRIORITIES": 2,
40
+ "COST": 3,
41
+ "COVID": 4,
42
+ "EHR": 5,
43
+ "GUIDELINES": 6,
44
+ "HFREF COMFORT MANAGING": 7,
45
+ "INFO NEEDS": 8,
46
+ "INTERNAL & EXTERNAL SUPPORT BAR": 9,
47
+ "JUICY QUOTE": 10,
48
+ "MAGIC WAND": 11,
49
+ "MED MANAGEMENT": 12,
50
+ "OTHER": 13,
51
+ "PATIENT DIGITAL TOOLS": 14,
52
+ "PATIENTS": 15,
53
+ "PROVIDER EDUCATION": 16,
54
+ "ROLE OF OTHER STAFF": 17,
55
+ "TIME CONSTRAINTS": 18,
56
+ "WORKFLOW": 19
57
  },
58
  "layer_norm_eps": 1e-12,
59
  "max_position_embeddings": 512,
 
68
  "summary_type": "first",
69
  "summary_use_proj": true,
70
  "torch_dtype": "float32",
71
+ "transformers_version": "4.44.2",
72
  "type_vocab_size": 2,
73
  "use_cache": true,
74
  "vocab_size": 30522
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6955f6b521a9cdc5a5c9c28c4e89deb76d60850c8835d285addadec9097204d0
3
- size 54229432
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32a787c62a68c4edf191c25b81828f9bfa6b04143e2c06ea1ca0a61109ec1912
3
+ size 54239712
runs/Oct11_16-43-21_767204f4c226/events.out.tfevents.1728665001.767204f4c226.710.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:473c4cfa33e820a5d7f4706615e48795081d3f40692fc122361638c320f9f537
3
+ size 11159
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81f54b2cd5340bed4ef76683aa2951f979fc12e7b1fe816fb1479b166b9e15e7
3
- size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dc7fecae041a354b7d4d039a2b4259515d9a268bcecee94453f6c65468f9b80
3
+ size 5176