patrickvonplaten commited on
Commit
0b362f5
1 Parent(s): 544b258

upload all files

Browse files
README.md ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Wav2Vec2 Acoustic Model fine-tuned on LibriSpeech
2
+
3
+ Original model can be found under https://github.com/pytorch/fairseq/tree/master/examples/wav2vec#wav2vec-20.
4
+
5
+ Paper: https://arxiv.org/abs/2006.11477
6
+
7
+ ## Usage
8
+
9
+ Make sure you are working on [this branch](https://github.com/huggingface/transformers/tree/add_wav2vec) (which will be merged to master soon hopefully) of transformers:
10
+
11
+ ```bash
12
+ $ git checkout add_wav2vec
13
+ ```
14
+
15
+ In the following, we'll show a simple example of how the model can be used for automatic speech recognition.
16
+
17
+ First, let's load the model
18
+
19
+ ```python
20
+ from transformers import AutoModelForMaskedLM
21
+
22
+ model = AutoModelForMaskedLM.from_pretrained("patrickvonplaten/wav2vec2-base-960h")
23
+
24
+ ```
25
+
26
+ Next, let's load a dummy librispeech dataset
27
+
28
+ ```python
29
+ from datasets import load_dataset
30
+ import soundfile as sf
31
+
32
+ libri_speech_dummy = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation")
33
+
34
+ def map_to_array(batch):
35
+ speech_array, _ = sf.read(batch["file"])
36
+ batch["speech"] = speech_array
37
+ return batch
38
+
39
+ libri_speech_dummy = libri_speech_dummy.map(map_to_array, remove_columns=["file"])
40
+
41
+ # check out dataset
42
+ print(libri_speech_dummy)
43
+
44
+ input_speech_16kHz = libri_speech_dummy[2]["speech"]
45
+ expected_trans = libri_speech_dummy[2]["text"]
46
+ ```
47
+
48
+ Cool, now we can run an inference pass to retrieve the logits:
49
+
50
+ ```python
51
+ import torch
52
+ logits = model(torch.tensor(input_speech_16kHz)[None, :])
53
+
54
+ # use highest probability logits
55
+ pred_ids = torch.argmax(logits[0], axis=-1)
56
+ ```
57
+
58
+ Finally, let's decode the prediction.
59
+ Let's create a simple CTC-Decoder:
60
+
61
+ ```python
62
+ import numpy as np
63
+ from itertools import groupby
64
+
65
+ class Decoder:
66
+ def __init__(self, json_dict):
67
+ self.dict = json_dict
68
+ self.look_up = np.asarray(list(self.dict.keys()))
69
+
70
+ def decode(self, ids):
71
+ converted_tokens = self.look_up[ids]
72
+ fused_tokens = [tok[0] for tok in groupby(converted_tokens)]
73
+ output = ' '.join(''.join(''.join(fused_tokens).split("<s>")).split("|"))
74
+ return output
75
+ ```
76
+
77
+ and instantiate with the corresponding dict.
78
+
79
+ ```python
80
+ # hard-coded json dict taken from: https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt
81
+ json_dict = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3, "|": 4, "E": 5, "T": 6, "A": 7, "O": 8, "N": 9, "I": 10, "H": 11, "S": 12, "R": 13, "D": 14, "L": 15, "U": 16, "M": 17, "W": 18, "C": 19, "F": 20, "G": 21, "Y": 22, "P": 23, "B": 24, "V": 25, "K": 26, "'": 27, "X": 28, "J": 29, "Q": 30, "Z": 31}
82
+
83
+ decoder = Decoder(json_dict=json_dict)
84
+ ```
85
+
86
+ and decode the result
87
+
88
+ ```python
89
+ pred_trans = decoder.decode(pred_ids)
90
+
91
+ print("Prediction:\n", pred_trans)
92
+ print("\n" + 50 * "=" + "\n")
93
+ print("Correct result:\n", expected_trans)
94
+ ```
95
+
96
+ 🎉
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Wav2Vec2ForMaskedLM"
4
+ ],
5
+ "conv_bias": false,
6
+ "conv_dim": [
7
+ 512,
8
+ 512,
9
+ 512,
10
+ 512,
11
+ 512,
12
+ 512,
13
+ 512
14
+ ],
15
+ "conv_kernel": [
16
+ 10,
17
+ 3,
18
+ 3,
19
+ 3,
20
+ 3,
21
+ 2,
22
+ 2
23
+ ],
24
+ "conv_stride": [
25
+ 5,
26
+ 2,
27
+ 2,
28
+ 2,
29
+ 2,
30
+ 2,
31
+ 2
32
+ ],
33
+ "do_stable_layer_norm": false,
34
+ "feat_extract_activation": "gelu",
35
+ "feat_extract_dropout": 0.0,
36
+ "feat_extract_norm": "group",
37
+ "hidden_act": "gelu",
38
+ "hidden_dropout_prob": 0.1,
39
+ "hidden_size": 768,
40
+ "initializer_range": 0.02,
41
+ "intermediate_size": 3072,
42
+ "layer_norm_eps": 1e-05,
43
+ "model_type": "wav2vec2",
44
+ "num_attention_heads": 12,
45
+ "num_conv_pos_embedding_groups": 16,
46
+ "num_conv_pos_embeddings": 128,
47
+ "num_feat_extract_layers": 7,
48
+ "num_hidden_layers": 12,
49
+ "transformers_version": "4.3.0.dev0",
50
+ "vocab_size": 32
51
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c34f9827b034a1b9141dbf6f652f8a60eda61cdf5771c9e05bfa99033c92cd96
3
+ size 377667514
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "<pad>", "do_lower_case": false}
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "E": 5, "T": 6, "A": 7, "O": 8, "N": 9, "I": 10, "H": 11, "S": 12, "R": 13, "D": 14, "L": 15, "U": 16, "M": 17, "W": 18, "C": 19, "F": 20, "G": 21, "Y": 22, "P": 23, "B": 24, "V": 25, "K": 26, "'": 27, "X": 28, "J": 29, "Q": 30, "Z": 31}