w11wo commited on
Commit
916050f
1 Parent(s): ebbddfa

Saving weights and logs of epoch 1

Browse files
events.out.tfevents.1625649517.t1v-n-b95d739e-w-0.10251.3.v2 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1049dd09f81c1c4446d4258b96c037dc61968c1e94aa392f0e17259daeea6bff
3
- size 19919742
 
 
 
 
events.out.tfevents.1625719131.t1v-n-b95d739e-w-0.74969.3.v2 → events.out.tfevents.1625741152.t1v-n-b95d739e-w-0.95396.3.v2 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a5ed7d976c899e522aa6461159606c91b4403064284cd5e826e2794ea03afee8
3
- size 6822986
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a393b11affb3db2764adb97b61578f82378808344c246f16ecff4a67ebc47e77
3
+ size 3581817
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bfd8f69603b008f0f11fbab5affecb148a8a56123d4730e4963cc3ec32a4c9c1
3
  size 498796983
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a309f76b746c4515f4ef732ac73e29dca90a8c6e074f7b46936375e1980fa30
3
  size 498796983
nohup.out CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:921b6699c57a4d28372b4f6e58fc2181661621faec5dfae908d2affac3d4ea70
3
- size 15574040
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a8b1dee60938c59f43e6e76b676ef82307e1b911c45cb3a172607d950505880
3
+ size 2613881
run.sh CHANGED
@@ -8,15 +8,12 @@ python3 run_mlm_flax.py \
8
  --dataset_name="oscar" \
9
  --dataset_config_name="unshuffled_deduplicated_id" \
10
  --max_seq_length="128" \
11
- --weight_decay="0.01" \
12
  --per_device_train_batch_size="128" \
13
  --per_device_eval_batch_size="128" \
14
- --learning_rate="3e-4" \
15
  --warmup_steps="1000" \
16
  --overwrite_output_dir \
17
- --pad_to_max_length \
18
- --num_train_epochs="10" \
19
- --adam_beta1="0.9" \
20
- --adam_beta2="0.98" \
21
  --push_to_hub
22
 
 
8
  --dataset_name="oscar" \
9
  --dataset_config_name="unshuffled_deduplicated_id" \
10
  --max_seq_length="128" \
11
+ --preprocessing_num_workers="64" \
12
  --per_device_train_batch_size="128" \
13
  --per_device_eval_batch_size="128" \
14
+ --learning_rate="2e-4" \
15
  --warmup_steps="1000" \
16
  --overwrite_output_dir \
17
+ --num_train_epochs="8" \
 
 
 
18
  --push_to_hub
19