preprocessor.py 8.69 KB
Newer Older
Anthony Larcher's avatar
Anthony Larcher committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# coding: utf-8 -*-
#
# This file is part of SIDEKIT.
#
# SIDEKIT is a python package for speaker verification.
# Home page: http://www-lium.univ-lemans.fr/sidekit/
#
# SIDEKIT is a python package for speaker verification.
# Home page: http://www-lium.univ-lemans.fr/sidekit/
#
# SIDEKIT is free software: you can redistribute it and/or modify
# it under the terms of the GNU LLesser General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# SIDEKIT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with SIDEKIT.  If not, see <http://www.gnu.org/licenses/>.

"""
Copyright 2014-2021 Anthony Larcher, Yevhenii Prokopalo
"""


import logging
import math
import os
import numpy
import pandas
import pickle
import shutil
import time
import torch
import torchaudio
import tqdm
import yaml

from collections import OrderedDict
from torch.utils.data import DataLoader
from sklearn.model_selection import train_test_split
Anthony Larcher's avatar
debug    
Anthony Larcher committed
45
from .augmentation import PreEmphasis
Anthony Larcher's avatar
Anthony Larcher committed
46
47
from .xsets import SideSet
from .xsets import IdMapSet
Anthony Larcher's avatar
debug    
Anthony Larcher committed
48
from .xsets import IdMapSetPerSpeaker
Anthony Larcher's avatar
Anthony Larcher committed
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
from .xsets import SideSampler
from .res_net import ResBlockWFMS
from .res_net import ResBlock
from .res_net import PreResNet34
from .res_net import PreFastResNet34
from ..bosaris import IdMap
from ..bosaris import Key
from ..bosaris import Ndx
from ..statserver import StatServer
from ..iv_scoring import cosine_scoring
from .sincnet import SincNet
from .loss import ArcLinear
from .loss import l2_norm
from .loss import ArcMarginProduct


os.environ['MKL_THREADING_LAYER'] = 'GNU'

__license__ = "LGPL"
__author__ = "Anthony Larcher"
__copyright__ = "Copyright 2015-2021 Anthony Larcher"
__maintainer__ = "Anthony Larcher"
__email__ = "anthony.larcher@univ-lemans.fr"
__status__ = "Production"
__docformat__ = 'reS'


logging.basicConfig(format='%(asctime)s %(message)s')


# Make PyTorch Deterministic
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
numpy.random.seed(0)



class MfccFrontEnd(torch.nn.Module):
    """

    """

    def __init__(self,
                 pre_emphasis=0.97,
                 sample_rate=16000,
                 n_fft=2048,
                 f_min=133.333,
                 f_max=6855.4976,
                 win_length=1024,
                 window_fn=torch.hann_window,
                 hop_length=512,
                 power=2.0,
                 n_mels=100,
                 n_mfcc=80):

        super(MfccFrontEnd, self).__init__()

        self.pre_emphasis = pre_emphasis
        self.sample_rate = sample_rate
        self.n_fft = n_fft
        self.f_min = f_min
        self.f_max = f_max
        self.win_length = win_length
        self.window_fn=window_fn
        self.hop_length = hop_length
        self.power=power
        self.window_fn = window_fn
        self.n_mels = n_mels
        self.n_mfcc = n_mfcc

        self.PreEmphasis = PreEmphasis(self.pre_emphasis)

        self.melkwargs = {"n_fft":self.n_fft,
                          "f_min":self.f_min,
                          "f_max":self.f_max,
                          "win_length":self.win_length,
                          "window_fn":self.window_fn,
                          "hop_length":self.hop_length,
                          "power":self.power,
                          "n_mels":self.n_mels}

        self.MFCC = torchaudio.transforms.MFCC(
            sample_rate=self.sample_rate,
            n_mfcc=self.n_mfcc,
            dct_type=2,
            log_mels=True,
            melkwargs=self.melkwargs)

        self.CMVN = torch.nn.InstanceNorm1d(self.n_mfcc)

    def forward(self, x):
        """

        :param x:
        :return:
        """
        with torch.no_grad():
            with torch.cuda.amp.autocast(enabled=False):
                mfcc = self.PreEmphasis(x)
                mfcc = self.MFCC(mfcc)
                mfcc = self.CMVN(mfcc)
        return mfcc


class MelSpecFrontEnd(torch.nn.Module):
    """

    """

    def __init__(self,
                 pre_emphasis=0.97,
                 sample_rate=16000,
                 n_fft=1024,
                 f_min=90,
                 f_max=7600,
Anthony Larcher's avatar
Anthony Larcher committed
165
                 win_length=400,
Anthony Larcher's avatar
Anthony Larcher committed
166
                 window_fn=torch.hann_window,
Anthony Larcher's avatar
Anthony Larcher committed
167
                 hop_length=160,
Anthony Larcher's avatar
Anthony Larcher committed
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
                 power=2.0,
                 n_mels=80):

        super(MelSpecFrontEnd, self).__init__()

        self.pre_emphasis = pre_emphasis
        self.sample_rate = sample_rate
        self.n_fft = n_fft
        self.f_min = f_min
        self.f_max = f_max
        self.win_length = win_length
        self.window_fn=window_fn
        self.hop_length = hop_length
        self.power=power
        self.window_fn = window_fn
        self.n_mels = n_mels

        self.PreEmphasis = PreEmphasis(self.pre_emphasis)

        self.melkwargs = {"n_fft":self.n_fft,
                          "f_min":self.f_min,
                          "f_max":self.f_max,
                          "win_length":self.win_length,
                          "window_fn":self.window_fn,
                          "hop_length":self.hop_length,
                          "power":self.power,
                          "n_mels":self.n_mels}

        self.MelSpec = torchaudio.transforms.MelSpectrogram(sample_rate=self.sample_rate,
                                                            n_fft=self.melkwargs['n_fft'],
                                                            f_min=self.melkwargs['f_min'],
                                                            f_max=self.melkwargs['f_max'],
                                                            win_length=self.melkwargs['win_length'],
                                                            hop_length=self.melkwargs['hop_length'],
                                                            window_fn=self.melkwargs['window_fn'],
                                                            power=self.melkwargs['power'],
                                                            n_mels=self.melkwargs['n_mels'])

        self.CMVN = torch.nn.InstanceNorm1d(self.n_mels)
Anthony Larcher's avatar
merge    
Anthony Larcher committed
207
208
        self.time_masking = torchaudio.transforms.TimeMasking(time_mask_param=5)
        self.freq_masking = torchaudio.transforms.FrequencyMasking(freq_mask_param=10)
Anthony Larcher's avatar
Anthony Larcher committed
209

Anthony Larcher's avatar
merge    
Anthony Larcher committed
210
    def forward(self, x, is_eval=False):
Anthony Larcher's avatar
Anthony Larcher committed
211
212
213
        """

        :param x:
Anthony Larcher's avatar
Anthony Larcher committed
214
        :param is_eval:
Anthony Larcher's avatar
Anthony Larcher committed
215
216
217
218
        :return:
        """
        with torch.no_grad():
            with torch.cuda.amp.autocast(enabled=False):
Anthony Larcher's avatar
debug    
Anthony Larcher committed
219
220
                if x.dim() == 1:
                    x = x.unsqueeze(0)
Anthony Larcher's avatar
Anthony Larcher committed
221
222
223
224
                out = self.PreEmphasis(x)
                out = self.MelSpec(out)+1e-6
                out = torch.log(out)
                out = self.CMVN(out)
Anthony Larcher's avatar
merge    
Anthony Larcher committed
225
226
227
                if not is_eval:
                    out = self.freq_masking(out)
                    out = self.time_masking(out)
Anthony Larcher's avatar
Anthony Larcher committed
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
        return out


class RawPreprocessor(torch.nn.Module):
    """

    """
    def __init__(self, nb_samp, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bias=False, groups=1, min_low_hz=50, min_band_hz=50, sample_rate=16000):
        """

        :param nb_samp:
        :param in_channels:
        :param filts:
        :param first_conv:
        """
        super(RawPreprocessor, self).__init__()
        self.ln = LayerNorm(nb_samp)
        self.first_conv = SincConv1d(in_channels = in_channels,
                                     out_channels = out_channels,
                                     kernel_size = kernel_size,
                                     sample_rate = sample_rate,
                                     stride=stride,
                                     padding=padding,
                                     dilation=dilation,
                                     bias=bias,
                                     groups=groups,
                                     min_low_hz=min_low_hz,
                                     min_band_hz=min_band_hz
                                     )
        self.first_bn = torch.nn.BatchNorm1d(num_features = out_channels)
        self.lrelu = torch.nn.LeakyReLU()
        self.lrelu_keras = torch.nn.LeakyReLU(negative_slope = 0.3)

    def forward(self, x):
        """

        :param x:
        :return:
        """
        nb_samp = x.shape[0]
        len_seq = x.shape[1]
        out = self.ln(x)
        out = out.view(nb_samp, 1, len_seq)
        out = torch.nn.functional.max_pool1d(torch.abs(self.first_conv(out)), 3)
        out = self.first_bn(out)
        out = self.lrelu_keras(out)

        return out