finiteautomata commited on
Commit
22986cf
1 Parent(s): e92219d

Upload robertuito-base-uncased

Browse files
README.md ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # robertuito-base-uncased
2
+
3
+ **WORK IN PROGRESS**
4
+
5
+ RoBERTa model trained on tweets.
6
+
7
+ For the time being, please use [this function](https://github.com/finiteautomata/finetune_vs_scratch/blob/main/finetune_vs_scratch/preprocessing.py#L13) before feeding it to the model. We still need to create a proper tokenizer for this model
8
+
9
+
10
+
11
+ ## Masked LM
12
+
13
+ To test the masked LM, take into account that space is encoded inside SentencePiece's tokens. So, if you want to test
14
+
15
+ ```
16
+ Este es un día<mask>
17
+ ```
18
+
19
+ don't put a space between `día` and `<mask>`
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "classifier_dropout": null,
8
+ "eos_token_id": 2,
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 130,
17
+ "model_type": "roberta",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 1,
21
+ "position_embedding_type": "absolute",
22
+ "torch_dtype": "float32",
23
+ "transformers_version": "4.10.0.dev0",
24
+ "type_vocab_size": 1,
25
+ "use_cache": true,
26
+ "vocab_size": 30000
27
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2a131ecf8d5f0eb9d5ce4f76bc42c2bf9bbfab01e1a424aca307b69731d6f77
3
+ size 435341035
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": "<mask>"}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "sep_token": "</s>", "cls_token": "<s>", "unk_token": "<unk>", "pad_token": "<pad>", "mask_token": "<mask>", "special_tokens_map_file": "models/twerto-base-uncased/special_tokens_map.json", "name_or_path": "models/twerto-base-uncased", "tokenizer_class": "PreTrainedTokenizerFast"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:818f209a9cc11c74ce7d236c4f6ab2c1854049aa96c77eb8d6d686080e404132
3
+ size 2607