AbrahamSanders commited on
Commit
daa8956
1 Parent(s): 0511d87

First model version

Browse files
README.md CHANGED
@@ -1,3 +1,35 @@
1
  ---
2
  license: mit
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: mit
3
  ---
4
+
5
+ Base model: [roberta-large](https://huggingface.co/roberta-large)
6
+
7
+ Fine tuned for persuadee donation detection on the [Persuasion For Good Dataset](https://gitlab.com/ucdavisnlp/persuasionforgood) (Wang et al., 2019):
8
+
9
+ Given a complete dialogue from Persuasion For Good, the task is to predict the binary label:
10
+ - 0: the persuadee does not intend to donate
11
+ - 1: the persuadee intends to donate
12
+
13
+ Only persuadee utterances are input to the model for this task - persuader utterances are discarded. Each training example is the concatenation of all persuadee utterances in a single dialogue, each separated by the `</s>` token.
14
+
15
+ For example:
16
+
17
+ **Input**: `<s>How are you?</s>Can you tell me more about the charity?</s>...</s>Sure, I'll donate a dollar.</s>...</s>`
18
+
19
+ **Label**: 1
20
+
21
+ **Input**: `<s>How are you?</s>Can you tell me more about the charity?</s>...</s>I am not interested.</s>...</s>`
22
+
23
+ **Label**: 0
24
+
25
+ The following Dialogues were excluded:
26
+ - 146 dialogues where a donation of 0 was made at the end of the task but a non-zero amount was pledged by the persuadee in the dialogue, per the following regular expression: `(?:\$(?:0\.)?[1-9]|[1-9][.0-9]*?(?: ?\$| dollars?| cents?))`
27
+
28
+ Stats:
29
+ - **Training set**: 587 dialogues, using actual end-task donations as labels
30
+ - **Validation set**: 141 dialogues, using manual donation intention labels from Persuasion For Good 'AnnSet'
31
+ - **Test set**: 143 dialogues, using manual donation intention labels from Persuasion For Good 'AnnSet'
32
+
33
+ **Test Macro F1**: 0.88
34
+
35
+ **Test Accuracy**: 0.90
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "roberta-large",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 1024,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 4096,
15
+ "layer_norm_eps": 1e-05,
16
+ "max_position_embeddings": 514,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 24,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.11.3",
24
+ "type_vocab_size": 1,
25
+ "use_cache": true,
26
+ "vocab_size": 50265
27
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a8a1a99878e330a2b226c429280e2c3d6800342792bf7ee41bae1c3a8cbbc90
3
+ size 1421605805
runs/version_0/events.out.tfevents.1649251721.panacea.34025.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8f6d9f468458d8f0f946f1bb8048f2e0a406d005c6cc426a81fb0ebba953138
3
+ size 21348
runs/version_0/events.out.tfevents.1649252717.panacea.34025.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebef0b5f87d987f5bf4a425df175d60b63f6ef6f623e17d13f94b29dc1d0b1ba
3
+ size 4335
runs/version_0/hparams.yaml ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerator: null
2
+ accumulate_grad_batches: 2
3
+ adam_epsilon: 1.0e-08
4
+ amp_backend: native
5
+ amp_level: null
6
+ auto_lr_find: false
7
+ auto_scale_batch_size: false
8
+ auto_select_gpus: false
9
+ base_modelpath: roberta-large
10
+ batch_size: 16
11
+ benchmark: false
12
+ check_val_every_n_epoch: 1
13
+ checkpoint_callback: null
14
+ checkpoint_save_top_k: 1
15
+ checkpoint_save_weights_only: true
16
+ data_random_state: 42
17
+ datasetpath: ../data/persuasion_for_good/AnnotatedData
18
+ default_root_dir: null
19
+ detect_anomaly: false
20
+ deterministic: false
21
+ devices: null
22
+ early_stopping_patience: 40
23
+ enable_checkpointing: true
24
+ enable_model_summary: true
25
+ enable_progress_bar: true
26
+ fast_dev_run: false
27
+ flush_logs_every_n_steps: null
28
+ gpus: 1
29
+ gradient_clip_algorithm: null
30
+ gradient_clip_val: 1.0
31
+ ipus: null
32
+ learning_rate: 3.0e-05
33
+ limit_predict_batches: 1.0
34
+ limit_test_batches: 1.0
35
+ limit_train_batches: 1.0
36
+ limit_val_batches: 1.0
37
+ log_every_n_steps: 5
38
+ log_gpu_memory: null
39
+ logger: true
40
+ max_epochs: 30
41
+ max_steps: -1
42
+ max_time: null
43
+ min_epochs: null
44
+ min_steps: null
45
+ model_random_state: 42
46
+ move_metrics_to_cpu: false
47
+ multiple_trainloader_mode: max_size_cycle
48
+ num_nodes: 1
49
+ num_processes: 1
50
+ num_sanity_val_steps: 2
51
+ overfit_batches: 0.0
52
+ plugins: null
53
+ precision: 32
54
+ prepare_data_per_node: null
55
+ process_position: 0
56
+ profiler: null
57
+ progress_bar_refresh_rate: null
58
+ reload_dataloaders_every_epoch: false
59
+ reload_dataloaders_every_n_epochs: 0
60
+ replace_sampler_ddp: true
61
+ resume_from_checkpoint: null
62
+ stochastic_weight_avg: false
63
+ strategy: null
64
+ sync_batchnorm: false
65
+ terminate_on_nan: null
66
+ test_split: 0.2
67
+ tpu_cores: null
68
+ track_grad_norm: -1
69
+ val_check_interval: 0.25
70
+ val_split: 0.1
71
+ warmup_steps: 0
72
+ weight_decay: 0.0
73
+ weights_save_path: null
74
+ weights_summary: top
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 512, "special_tokens_map_file": null, "name_or_path": "roberta-large", "tokenizer_class": "RobertaTokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff