pszemraj commited on
Commit
d4ed403
1 Parent(s): 0d65f29

add additional 2-epoch checkpoint, better regularization

Browse files
config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "pszemraj/long-t5-tglobal-base-16384-booksum-V6-partial",
3
+ "architectures": [
4
+ "LongT5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "early_stopping": true,
13
+ "encoder_attention_type": "transient-global",
14
+ "encoder_no_repeat_ngram_size": 4,
15
+ "eos_token_id": 1,
16
+ "feed_forward_proj": "gated-gelu",
17
+ "global_block_size": 16,
18
+ "initializer_factor": 1.0,
19
+ "is_encoder_decoder": true,
20
+ "is_gated_act": true,
21
+ "layer_norm_epsilon": 1e-06,
22
+ "length_penalty": 0.8,
23
+ "local_radius": 127,
24
+ "max_length": 512,
25
+ "min_length": 8,
26
+ "model_type": "longt5",
27
+ "n_positions": 4096,
28
+ "no_repeat_ngram_size": 3,
29
+ "num_beams": 2,
30
+ "num_decoder_layers": 12,
31
+ "num_heads": 12,
32
+ "num_layers": 12,
33
+ "output_past": true,
34
+ "pad_token_id": 0,
35
+ "relative_attention_max_distance": 128,
36
+ "relative_attention_num_buckets": 32,
37
+ "repetition_penalty": 3.5,
38
+ "tie_word_embeddings": false,
39
+ "torch_dtype": "bfloat16",
40
+ "transformers_version": "4.20.1",
41
+ "use_cache": false,
42
+ "vocab_size": 32128
43
+ }
long-t5-tglobal-base-16384-booksum-V6-partial-ft3-booksum_training_metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"output_dir": "/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-base-16384-booksum-V6-partial-ft3-booksum", "overwrite_output_dir": true, "do_train": false, "do_eval": false, "do_predict": false, "evaluation_strategy": "no", "prediction_loss_only": false, "per_device_train_batch_size": 2, "per_device_eval_batch_size": 1, "per_gpu_train_batch_size": "None", "per_gpu_eval_batch_size": "None", "gradient_accumulation_steps": 64, "eval_accumulation_steps": "None", "eval_delay": 0, "learning_rate": 0.0004, "weight_decay": 0.05, "adam_beta1": 0.9, "adam_beta2": 0.999, "adam_epsilon": 1e-08, "max_grad_norm": 0.2, "num_train_epochs": 2, "max_steps": -1, "lr_scheduler_type": "cosine_with_restarts", "warmup_ratio": 0.02, "warmup_steps": 0, "log_level": -1, "log_level_replica": -1, "log_on_each_node": true, "logging_dir": "/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-base-16384-booksum-V6-partial-ft3-booksum/logs", "logging_strategy": "steps", "logging_first_step": false, "logging_steps": 3, "logging_nan_inf_filter": true, "save_strategy": "epoch", "save_steps": 500, "save_total_limit": 1, "save_on_each_node": false, "no_cuda": false, "seed": 42, "data_seed": "None", "jit_mode_eval": false, "use_ipex": false, "bf16": false, "fp16": false, "fp16_opt_level": "O1", "half_precision_backend": "auto", "bf16_full_eval": false, "fp16_full_eval": false, "tf32": "None", "local_rank": 0, "xpu_backend": "None", "tpu_num_cores": "None", "tpu_metrics_debug": false, "debug": "[]", "dataloader_drop_last": false, "eval_steps": "None", "dataloader_num_workers": 0, "past_index": -1, "run_name": "/content/drive/MyDrive/Programming/hf-trainer/long-t5-tglobal-base-16384-booksum-V6-partial-ft3-booksum", "disable_tqdm": false, "remove_unused_columns": true, "label_names": "None", "load_best_model_at_end": false, "metric_for_best_model": "None", "greater_is_better": "None", "ignore_data_skip": false, "sharded_ddp": "[]", "fsdp": "[]", "fsdp_min_num_params": 0, "deepspeed": "/content/ds_config_zero2_bf16.json", "label_smoothing_factor": 0.0, "optim": "adamw_hf", "adafactor": false, "group_by_length": false, "length_column_name": "length", "report_to": "['tensorboard']", "ddp_find_unused_parameters": "None", "ddp_bucket_cap_mb": "None", "dataloader_pin_memory": true, "skip_memory_metrics": true, "use_legacy_prediction_loop": false, "push_to_hub": true, "resume_from_checkpoint": "None", "hub_model_id": "long-t5-tglobal-base-16384-booksum-V6-partial-ft3-booksum", "hub_strategy": "end", "hub_token": "<HUB_TOKEN>", "hub_private_repo": true, "gradient_checkpointing": true, "include_inputs_for_metrics": false, "fp16_backend": "auto", "push_to_hub_model_id": "None", "push_to_hub_organization": "None", "push_to_hub_token": "<PUSH_TO_HUB_TOKEN>", "_n_gpu": 1, "mp_parameters": "", "auto_find_batch_size": false, "full_determinism": false, "torchdynamo": "None", "ray_scope": "last", "sortish_sampler": false, "predict_with_generate": false, "generation_max_length": "None", "generation_num_beams": "None", "train_batch_size": 2, "eval_batch_size": 1, "configs_src": "long-t5-tglobal-base-16384-booksum-V6-partial-ft3-booksum"}
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4adb405b88e1acd599988a723bb03f622f2bc5b7b441d9a480ed5b5ed75fa190
3
+ size 990388907
special_tokens_map.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "pad_token": "<pad>",
106
+ "unk_token": "<unk>"
107
+ }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "extra_ids": 100,
106
+ "name_or_path": "pszemraj/long-t5-tglobal-base-16384-booksum-V6-partial",
107
+ "pad_token": "<pad>",
108
+ "special_tokens_map_file": null,
109
+ "tokenizer_class": "T5Tokenizer",
110
+ "unk_token": "<unk>"
111
+ }
trainer_state.json ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.991123701605288,
5
+ "global_step": 164,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.04,
12
+ "learning_rate": 0.00030000000000000003,
13
+ "loss": 2.5934,
14
+ "step": 3
15
+ },
16
+ {
17
+ "epoch": 0.07,
18
+ "learning_rate": 0.0003998458072481446,
19
+ "loss": 2.5887,
20
+ "step": 6
21
+ },
22
+ {
23
+ "epoch": 0.11,
24
+ "learning_rate": 0.0003990369453344394,
25
+ "loss": 2.5383,
26
+ "step": 9
27
+ },
28
+ {
29
+ "epoch": 0.15,
30
+ "learning_rate": 0.00039753766811902755,
31
+ "loss": 2.5382,
32
+ "step": 12
33
+ },
34
+ {
35
+ "epoch": 0.18,
36
+ "learning_rate": 0.0003953531762641745,
37
+ "loss": 2.5684,
38
+ "step": 15
39
+ },
40
+ {
41
+ "epoch": 0.22,
42
+ "learning_rate": 0.00039249104729072946,
43
+ "loss": 2.5694,
44
+ "step": 18
45
+ },
46
+ {
47
+ "epoch": 0.25,
48
+ "learning_rate": 0.00038896120929337566,
49
+ "loss": 2.5685,
50
+ "step": 21
51
+ },
52
+ {
53
+ "epoch": 0.29,
54
+ "learning_rate": 0.0003847759065022574,
55
+ "loss": 2.5249,
56
+ "step": 24
57
+ },
58
+ {
59
+ "epoch": 0.33,
60
+ "learning_rate": 0.00037994965681044433,
61
+ "loss": 2.5835,
62
+ "step": 27
63
+ },
64
+ {
65
+ "epoch": 0.36,
66
+ "learning_rate": 0.00037449920141455944,
67
+ "loss": 2.5326,
68
+ "step": 30
69
+ },
70
+ {
71
+ "epoch": 0.4,
72
+ "learning_rate": 0.00036844344674325733,
73
+ "loss": 2.564,
74
+ "step": 33
75
+ },
76
+ {
77
+ "epoch": 0.44,
78
+ "learning_rate": 0.0003618033988749895,
79
+ "loss": 2.5717,
80
+ "step": 36
81
+ },
82
+ {
83
+ "epoch": 0.47,
84
+ "learning_rate": 0.0003546020906725474,
85
+ "loss": 2.5149,
86
+ "step": 39
87
+ },
88
+ {
89
+ "epoch": 0.51,
90
+ "learning_rate": 0.0003468645018871371,
91
+ "loss": 2.5695,
92
+ "step": 42
93
+ },
94
+ {
95
+ "epoch": 0.54,
96
+ "learning_rate": 0.0003386174725091272,
97
+ "loss": 2.5374,
98
+ "step": 45
99
+ },
100
+ {
101
+ "epoch": 0.58,
102
+ "learning_rate": 0.0003298896096660367,
103
+ "loss": 2.5413,
104
+ "step": 48
105
+ },
106
+ {
107
+ "epoch": 0.62,
108
+ "learning_rate": 0.0003207111883907143,
109
+ "loss": 2.509,
110
+ "step": 51
111
+ },
112
+ {
113
+ "epoch": 0.65,
114
+ "learning_rate": 0.00031111404660392046,
115
+ "loss": 2.5628,
116
+ "step": 54
117
+ },
118
+ {
119
+ "epoch": 0.69,
120
+ "learning_rate": 0.00030113147467559695,
121
+ "loss": 2.5069,
122
+ "step": 57
123
+ },
124
+ {
125
+ "epoch": 0.73,
126
+ "learning_rate": 0.00029079809994790937,
127
+ "loss": 2.5331,
128
+ "step": 60
129
+ },
130
+ {
131
+ "epoch": 0.76,
132
+ "learning_rate": 0.0002801497666206282,
133
+ "loss": 2.5527,
134
+ "step": 63
135
+ },
136
+ {
137
+ "epoch": 0.8,
138
+ "learning_rate": 0.0002692234114154986,
139
+ "loss": 2.6179,
140
+ "step": 66
141
+ },
142
+ {
143
+ "epoch": 0.83,
144
+ "learning_rate": 0.00025805693545089247,
145
+ "loss": 2.5411,
146
+ "step": 69
147
+ },
148
+ {
149
+ "epoch": 0.87,
150
+ "learning_rate": 0.00024668907277118114,
151
+ "loss": 2.5583,
152
+ "step": 72
153
+ },
154
+ {
155
+ "epoch": 0.91,
156
+ "learning_rate": 0.00023515925598687094,
157
+ "loss": 2.534,
158
+ "step": 75
159
+ },
160
+ {
161
+ "epoch": 0.94,
162
+ "learning_rate": 0.00022350747949156756,
163
+ "loss": 2.5433,
164
+ "step": 78
165
+ },
166
+ {
167
+ "epoch": 0.98,
168
+ "learning_rate": 0.0002117741607302378,
169
+ "loss": 2.5487,
170
+ "step": 81
171
+ },
172
+ {
173
+ "epoch": 1.02,
174
+ "learning_rate": 0.0002,
175
+ "loss": 3.1324,
176
+ "step": 84
177
+ },
178
+ {
179
+ "epoch": 1.06,
180
+ "learning_rate": 0.00018822583926976218,
181
+ "loss": 2.4641,
182
+ "step": 87
183
+ },
184
+ {
185
+ "epoch": 1.1,
186
+ "learning_rate": 0.00017649252050843252,
187
+ "loss": 2.4806,
188
+ "step": 90
189
+ },
190
+ {
191
+ "epoch": 1.13,
192
+ "learning_rate": 0.0001648407440131291,
193
+ "loss": 2.4693,
194
+ "step": 93
195
+ },
196
+ {
197
+ "epoch": 1.17,
198
+ "learning_rate": 0.000153310927228819,
199
+ "loss": 2.5011,
200
+ "step": 96
201
+ },
202
+ {
203
+ "epoch": 1.21,
204
+ "learning_rate": 0.00014194306454910757,
205
+ "loss": 2.4595,
206
+ "step": 99
207
+ },
208
+ {
209
+ "epoch": 1.24,
210
+ "learning_rate": 0.00013077658858450138,
211
+ "loss": 2.4893,
212
+ "step": 102
213
+ },
214
+ {
215
+ "epoch": 1.28,
216
+ "learning_rate": 0.00011985023337937184,
217
+ "loss": 2.4932,
218
+ "step": 105
219
+ },
220
+ {
221
+ "epoch": 1.31,
222
+ "learning_rate": 0.00010920190005209065,
223
+ "loss": 2.4871,
224
+ "step": 108
225
+ },
226
+ {
227
+ "epoch": 1.35,
228
+ "learning_rate": 9.886852532440312e-05,
229
+ "loss": 2.4672,
230
+ "step": 111
231
+ },
232
+ {
233
+ "epoch": 1.39,
234
+ "learning_rate": 8.888595339607961e-05,
235
+ "loss": 2.4597,
236
+ "step": 114
237
+ },
238
+ {
239
+ "epoch": 1.42,
240
+ "learning_rate": 7.928881160928572e-05,
241
+ "loss": 2.4531,
242
+ "step": 117
243
+ },
244
+ {
245
+ "epoch": 1.46,
246
+ "learning_rate": 7.011039033396329e-05,
247
+ "loss": 2.4749,
248
+ "step": 120
249
+ },
250
+ {
251
+ "epoch": 1.5,
252
+ "learning_rate": 6.138252749087286e-05,
253
+ "loss": 2.4551,
254
+ "step": 123
255
+ },
256
+ {
257
+ "epoch": 1.53,
258
+ "learning_rate": 5.313549811286293e-05,
259
+ "loss": 2.4796,
260
+ "step": 126
261
+ },
262
+ {
263
+ "epoch": 1.57,
264
+ "learning_rate": 4.53979093274526e-05,
265
+ "loss": 2.4705,
266
+ "step": 129
267
+ },
268
+ {
269
+ "epoch": 1.6,
270
+ "learning_rate": 3.819660112501053e-05,
271
+ "loss": 2.4915,
272
+ "step": 132
273
+ },
274
+ {
275
+ "epoch": 1.64,
276
+ "learning_rate": 3.1556553256742713e-05,
277
+ "loss": 2.4958,
278
+ "step": 135
279
+ },
280
+ {
281
+ "epoch": 1.68,
282
+ "learning_rate": 2.5500798585440567e-05,
283
+ "loss": 2.4814,
284
+ "step": 138
285
+ },
286
+ {
287
+ "epoch": 1.71,
288
+ "learning_rate": 2.0050343189555743e-05,
289
+ "loss": 2.5034,
290
+ "step": 141
291
+ },
292
+ {
293
+ "epoch": 1.75,
294
+ "learning_rate": 1.5224093497742653e-05,
295
+ "loss": 2.4671,
296
+ "step": 144
297
+ },
298
+ {
299
+ "epoch": 1.79,
300
+ "learning_rate": 1.1038790706624391e-05,
301
+ "loss": 2.4987,
302
+ "step": 147
303
+ },
304
+ {
305
+ "epoch": 1.82,
306
+ "learning_rate": 7.508952709270567e-06,
307
+ "loss": 2.4618,
308
+ "step": 150
309
+ },
310
+ {
311
+ "epoch": 1.86,
312
+ "learning_rate": 4.646823735825523e-06,
313
+ "loss": 2.4815,
314
+ "step": 153
315
+ },
316
+ {
317
+ "epoch": 1.89,
318
+ "learning_rate": 2.462331880972468e-06,
319
+ "loss": 2.4595,
320
+ "step": 156
321
+ },
322
+ {
323
+ "epoch": 1.93,
324
+ "learning_rate": 9.630546655606364e-07,
325
+ "loss": 2.4925,
326
+ "step": 159
327
+ },
328
+ {
329
+ "epoch": 1.97,
330
+ "learning_rate": 1.5419275185541982e-07,
331
+ "loss": 2.4333,
332
+ "step": 162
333
+ },
334
+ {
335
+ "epoch": 1.99,
336
+ "step": 164,
337
+ "total_flos": 4.620604962546647e+17,
338
+ "train_loss": 2.525294606278582,
339
+ "train_runtime": 28715.0205,
340
+ "train_samples_per_second": 0.738,
341
+ "train_steps_per_second": 0.006
342
+ }
343
+ ],
344
+ "max_steps": 164,
345
+ "num_train_epochs": 2,
346
+ "total_flos": 4.620604962546647e+17,
347
+ "trial_name": null,
348
+ "trial_params": null
349
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:673e02f4e38479f806c1a20434ea2efa4f9cf7a6aa07067907fde9dc0160405c
3
+ size 4527