Commit 99e43094 authored by Florent Desnous 's avatar Florent Desnous
Browse files
parents 28cedd13 33800b29
...@@ -3,4 +3,4 @@ s4d.egg-info/ ...@@ -3,4 +3,4 @@ s4d.egg-info/
*/__pycache__/ */__pycache__/
*.pyc *.pyc
dist/ dist/
tutorials/
...@@ -221,9 +221,10 @@ def automatonAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,diarFinal__clus ...@@ -221,9 +221,10 @@ def automatonAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,diarFinal__clus
## tolerance: In centiseconds ## tolerance: In centiseconds
## diarFinal__clusterToDeleteAccordingToDiarRef: List of clusters to delete in the diarFinal only ## diarFinal__clusterToDeleteAccordingToDiarRef: List of clusters to delete in the diarFinal only
## modeNoGap: Drops or not the segment actions (i.e. createSegment & deleteSegment) ## modeNoGap: Drops or not the segment actions (i.e. createSegment & deleteSegment)
## mergeStrat_BiggestCluster: Whether we merge in the temporal order or first the biggest cluster for a given reference segment ## modeNoGap__mergeStrat_BiggestCluster: Whether we merge in the temporal order or first the biggest cluster for a given reference segment (only useful when the modeNoGap is False)
def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=False,mergeStrat_BiggestCluster=False,diarFinal__clusterToDeleteAccordingToDiarRef=list()): ## deleteBoundarySameConsecutiveSpk: Whether we delete a boundary for two consecutive segments with the same speaker
assert isinstance(diarHyp,Diar) and isinstance(diarRef,Diar) and isinstance(mergeStrat_BiggestCluster,bool) and isinstance(modeNoGap,bool) and (diarUem is None or isinstance(diarUem,Diar)) and isinstance(tolerance,numbers.Number) and isinstance(diarFinal__clusterToDeleteAccordingToDiarRef,list) def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=False,modeNoGap__mergeStrat_BiggestCluster=False,diarFinal__clusterToDeleteAccordingToDiarRef=list(),deleteBoundarySameConsecutiveSpk=False):
assert isinstance(diarHyp,Diar) and isinstance(diarRef,Diar) and isinstance(modeNoGap__mergeStrat_BiggestCluster,bool) and isinstance(modeNoGap,bool) and (diarUem is None or isinstance(diarUem,Diar)) and isinstance(tolerance,numbers.Number) and isinstance(diarFinal__clusterToDeleteAccordingToDiarRef,list) and isinstance(deleteBoundarySameConsecutiveSpk,bool)
for u in diarFinal__clusterToDeleteAccordingToDiarRef: for u in diarFinal__clusterToDeleteAccordingToDiarRef:
assert isinstance(u,str) assert isinstance(u,str)
...@@ -288,7 +289,10 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal ...@@ -288,7 +289,10 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
showname=diarRef.unique('show')[0] showname=diarRef.unique('show')[0]
diarRef.sort() diarRef.sort()
diarHyp.sort() diarHyp.sort()
tolerance=abs(tolerance) tolerance=abs(tolerance)
if not strictBoundary:
diarRef.pack()
diarHyp.pack()
assert len(diarOverlapArea(diarRef))==0, "Error: diarRef parameter have some overlapped segments.\nReason: No overlap segment allowed.\nSolution: Please put them apart.\n" assert len(diarOverlapArea(diarRef))==0, "Error: diarRef parameter have some overlapped segments.\nReason: No overlap segment allowed.\nSolution: Please put them apart.\n"
assert len(diarOverlapArea(diarHyp))==0, "Error: diarHyp parameter have some overlapped segments.\nReason: No overlap segment allowed.\nSolution: Please put them apart.\n" assert len(diarOverlapArea(diarHyp))==0, "Error: diarHyp parameter have some overlapped segments.\nReason: No overlap segment allowed.\nSolution: Please put them apart.\n"
...@@ -527,7 +531,7 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal ...@@ -527,7 +531,7 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
valueBoundaryStart=copy.deepcopy(y['stop']) valueBoundaryStart=copy.deepcopy(y['stop'])
if valueBoundaryStart is None: if valueBoundaryStart is None:
valueBoundaryStart=valueRef['start'] valueBoundaryStart=valueRef['start']
if mergeStrat_BiggestCluster == True: if modeNoGap__mergeStrat_BiggestCluster == True:
# Gets the cluster (it which has the most present frames) # Gets the cluster (it which has the most present frames)
dictHypRefSegmentDuration=dict() dictHypRefSegmentDuration=dict()
for y in listHypRefSegment: for y in listHypRefSegment:
...@@ -542,10 +546,10 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal ...@@ -542,10 +546,10 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
if cls['start']>y['start']: if cls['start']>y['start']:
cls=y cls=y
clusterName=cls['cluster'] clusterName=cls['cluster']
# Moves the boundaries
# Pre-string for a good running: listHypRefSegment sorted in ascending order on start, don't overtake the value valueRef['stop'] and valueRef['start']
if modeNoGap == False: if modeNoGap == False:
for idx,z in enumerate(listHypRefSegment): for idx,z in enumerate(listHypRefSegment):
# Moves the boundaries
# Pre-string for a good running: listHypRefSegment sorted in ascending order on start, don't overtake the value valueRef['stop'] and valueRef['start']
nearStop=valueRef['stop'] nearStop=valueRef['stop']
if idx==0: if idx==0:
boundStop=z['stop'] boundStop=z['stop']
...@@ -592,22 +596,21 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal ...@@ -592,22 +596,21 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
elif tolerance!=0 and y['start']>=(valueRef['start']-tolerance): elif tolerance!=0 and y['start']>=(valueRef['start']-tolerance):
listHypRefSegment.append(y) listHypRefSegment.append(y)
# Replaces the segments which are not in the correct cluster # Replaces the segments which are not in the correct cluster
replaced=False if modeNoGap == False:
for y in listHypRefSegment: replaced=False
if y['cluster']!=clusterName: for y in listHypRefSegment:
replaced=True if y['cluster']!=clusterName:
yTmp=copy.deepcopy(y) replaced=True
yTmp['cluster']=clusterName yTmp=copy.deepcopy(y)
if modeNoGap == False: yTmp['cluster']=clusterName
actionsSegmentationSegmentDelete.append(copy.deepcopy(y)) actionsSegmentationSegmentDelete.append(copy.deepcopy(y))
actionsIncrementalSegmentationSegmentDeleteTurn.append(copy.deepcopy(y)) actionsIncrementalSegmentationSegmentDeleteTurn.append(copy.deepcopy(y))
valueTmp=dropSegment(y,valueTmp) valueTmp=dropSegment(y,valueTmp)
if modeNoGap == False:
actionsSegmentationSegmentCreate.append(copy.deepcopy(Segment([valueRef['show'],yTmp['cluster'],yTmp['cluster_type'],yTmp['start'],yTmp['stop']],['show','cluster','cluster_type','start','stop']))) actionsSegmentationSegmentCreate.append(copy.deepcopy(Segment([valueRef['show'],yTmp['cluster'],yTmp['cluster_type'],yTmp['start'],yTmp['stop']],['show','cluster','cluster_type','start','stop'])))
actionsIncrementalSegmentationSegmentCreateTurn.append(copy.deepcopy(Segment([valueRef['show'],yTmp['cluster'],yTmp['cluster_type'],yTmp['start'],yTmp['stop']],['show','cluster','cluster_type','start','stop']))) actionsIncrementalSegmentationSegmentCreateTurn.append(copy.deepcopy(Segment([valueRef['show'],yTmp['cluster'],yTmp['cluster_type'],yTmp['start'],yTmp['stop']],['show','cluster','cluster_type','start','stop'])))
valueTmp.append_seg(yTmp) valueTmp.append_seg(yTmp)
if replaced: if replaced:
valueTmp.sort() valueTmp.sort()
# Merges among them if > 1 # Merges among them if > 1
if len(listHypRefSegment)>1: if len(listHypRefSegment)>1:
# Gets the new segments, modified by the previous steps # Gets the new segments, modified by the previous steps
...@@ -618,17 +621,27 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal ...@@ -618,17 +621,27 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
listTmp.append(y) listTmp.append(y)
elif tolerance!=0 and y['start']>=(valueRef['start']-tolerance): elif tolerance!=0 and y['start']>=(valueRef['start']-tolerance):
listTmp.append(y) listTmp.append(y)
actionsSegmentationBoundaryMerge.append(copy.deepcopy([listTmp[0],listTmp[1]])) if not (not deleteBoundarySameConsecutiveSpk and listTmp[0]['cluster']==listTmp[1]['cluster']):
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([listTmp[0],listTmp[1]])) actionsSegmentationBoundaryMerge.append(copy.deepcopy([listTmp[0],listTmp[1]]))
newSegment,valueTmp=mergeSegment(listTmp[0],listTmp[1],valueTmp) actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([listTmp[0],listTmp[1]]))
if modeNoGap == True and listTmp[0]['cluster']!=listTmp[1]['cluster']:
listTmp[1]['cluster']=listTmp[0]['cluster']
newSegment,valueTmp=mergeSegment(listTmp[0],listTmp[1],valueTmp)
else:
newSegment=listTmp[1]
for y in range(2,len(listTmp)): for y in range(2,len(listTmp)):
if modeNoGap == True: if modeNoGap == True:
if not (Segment.intersection(newSegment,listTmp[y]) is not None or newSegment["stop"]==listTmp[y]["start"] or newSegment["start"]==listTmp[y]["stop"]): if not (Segment.intersection(newSegment,listTmp[y]) is not None or newSegment["stop"]==listTmp[y]["start"] or newSegment["start"]==listTmp[y]["stop"]):
logging.error("Cannot have absence of a segment in Transcriber mode.") logging.error("Cannot have absence of a segment in Transcriber mode.")
raise Exception("Absence of a segment.") raise Exception("Absence of a segment.")
actionsSegmentationBoundaryMerge.append(copy.deepcopy([newSegment,listTmp[y]])) if not (not deleteBoundarySameConsecutiveSpk and newSegment['cluster']==listTmp[y]['cluster']):
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([newSegment,listTmp[y]])) actionsSegmentationBoundaryMerge.append(copy.deepcopy([newSegment,listTmp[y]]))
newSegment,valueTmp=mergeSegment(newSegment,listTmp[y],valueTmp) actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([newSegment,listTmp[y]]))
if modeNoGap == True and newSegment['cluster']!=listTmp[y]['cluster']:
listTmp[y]['cluster']=newSegment['cluster']
newSegment,valueTmp=mergeSegment(newSegment,listTmp[y],valueTmp)
else:
newSegment=listTmp[y]
# Updates diarHyp # Updates diarHyp
diarHyp=valueTmp diarHyp=valueTmp
...@@ -709,8 +722,10 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal ...@@ -709,8 +722,10 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
## tolerance: In centiseconds ## tolerance: In centiseconds
## diarFinal__clusterToDeleteAccordingToDiarRef: List of clusters to delete in the diarFinal only ## diarFinal__clusterToDeleteAccordingToDiarRef: List of clusters to delete in the diarFinal only
## modeNoGap: Drops or not the segment actions (i.e. createSegment & deleteSegment) ## modeNoGap: Drops or not the segment actions (i.e. createSegment & deleteSegment)
def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=False,diarFinal__clusterToDeleteAccordingToDiarRef=list()): ## deleteBoundarySameConsecutiveSpk: Whether we delete a boundary for two consecutive segments with the same speaker
assert isinstance(diarHyp,Diar) and isinstance(diarRef,Diar) and isinstance(modeNoGap,bool) and (diarUem is None or isinstance(diarUem,Diar)) and isinstance(tolerance,numbers.Number) and isinstance(diarFinal__clusterToDeleteAccordingToDiarRef,list) ## deleteBoundaryMergeCluster: The action "delete a boundary" can merge two consecutive segments with different cluster names (it takes the name of the left/first segment)
def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=False,diarFinal__clusterToDeleteAccordingToDiarRef=list(),deleteBoundarySameConsecutiveSpk=False,deleteBoundaryMergeCluster=False):
assert isinstance(diarHyp,Diar) and isinstance(diarRef,Diar) and isinstance(modeNoGap,bool) and (diarUem is None or isinstance(diarUem,Diar)) and isinstance(tolerance,numbers.Number) and isinstance(diarFinal__clusterToDeleteAccordingToDiarRef,list) and isinstance(deleteBoundarySameConsecutiveSpk,bool) and isinstance(deleteBoundaryMergeCluster,bool)
for u in diarFinal__clusterToDeleteAccordingToDiarRef: for u in diarFinal__clusterToDeleteAccordingToDiarRef:
assert isinstance(u,str) assert isinstance(u,str)
...@@ -1056,13 +1071,11 @@ def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,mod ...@@ -1056,13 +1071,11 @@ def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,mod
actionsIncrementalAssignmentCreateTurn.append(copy.deepcopy([valueRef['cluster'],z['cluster'],copy.deepcopy(z)])) actionsIncrementalAssignmentCreateTurn.append(copy.deepcopy([valueRef['cluster'],z['cluster'],copy.deepcopy(z)]))
else: else:
if z['cluster'] == dictionary[valueRef['cluster']]: if z['cluster'] == dictionary[valueRef['cluster']]:
if (modeNoGap == True and idx==0) or (modeNoGap == False): actionsAssignmentNothing.append(copy.deepcopy(z))
actionsAssignmentNothing.append(copy.deepcopy(z)) actionsIncrementalAssignmentNothingTurn.append(copy.deepcopy(z))
actionsIncrementalAssignmentNothingTurn.append(copy.deepcopy(z))
else: else:
if (modeNoGap == True and idx==0) or (modeNoGap == False): actionsAssignmentChange.append(copy.deepcopy([dictionary[valueRef['cluster']],z]))
actionsAssignmentChange.append(copy.deepcopy([dictionary[valueRef['cluster']],z])) actionsIncrementalAssignmentChangeTurn.append(copy.deepcopy([dictionary[valueRef['cluster']],z]))
actionsIncrementalAssignmentChangeTurn.append(copy.deepcopy([dictionary[valueRef['cluster']],z]))
applyChange=True applyChange=True
if applyChange: if applyChange:
# Updates the diar for the merges afterward # Updates the diar for the merges afterward
...@@ -1071,6 +1084,8 @@ def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,mod ...@@ -1071,6 +1084,8 @@ def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,mod
valueTmp=dropSegment(z,valueTmp) valueTmp=dropSegment(z,valueTmp)
valueTmp.append_seg(segmentTmp) valueTmp.append_seg(segmentTmp)
valueTmp.sort() valueTmp.sort()
if deleteBoundaryMergeCluster:
break
if not perfectBoundary: if not perfectBoundary:
# Gets the new segments, modified by the previous steps # Gets the new segments, modified by the previous steps
listHypRefSegment=list() listHypRefSegment=list()
...@@ -1145,22 +1160,30 @@ def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,mod ...@@ -1145,22 +1160,30 @@ def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,mod
listTmp.append(y) listTmp.append(y)
elif tolerance!=0 and y['start']>=(valueRef['start']-tolerance): elif tolerance!=0 and y['start']>=(valueRef['start']-tolerance):
listTmp.append(y) listTmp.append(y)
if modeNoGap == True: if not (not deleteBoundarySameConsecutiveSpk and listTmp[0]['cluster']==listTmp[1]['cluster']):
clusterSelected=listTmp[0]['cluster'] actionsSegmentationBoundaryMerge.append(copy.deepcopy([listTmp[0],listTmp[1]]))
actionsSegmentationBoundaryMerge.append(copy.deepcopy([listTmp[0],listTmp[1]])) actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([listTmp[0],listTmp[1]]))
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([listTmp[0],listTmp[1]])) if modeNoGap == True and listTmp[0]['cluster']!=listTmp[1]['cluster']:
if modeNoGap == True: listTmp[1]['cluster']=listTmp[0]['cluster']
listTmp[1]['cluster']=clusterSelected newSegment,valueTmp=mergeSegment(listTmp[0],listTmp[1],valueTmp)
newSegment,valueTmp=mergeSegment(listTmp[0],listTmp[1],valueTmp) else:
newSegment=listTmp[1]
for y in range(2,len(listTmp)): for y in range(2,len(listTmp)):
if modeNoGap == True: if modeNoGap == True:
listTmp[y]['cluster']=clusterSelected
if not (Segment.intersection(newSegment,listTmp[y]) is not None or newSegment["stop"]==listTmp[y]["start"] or newSegment["start"]==listTmp[y]["stop"]): if not (Segment.intersection(newSegment,listTmp[y]) is not None or newSegment["stop"]==listTmp[y]["start"] or newSegment["start"]==listTmp[y]["stop"]):
logging.error("Cannot have absence of a segment in Transcriber mode.") logging.error("Cannot have absence of a segment in Transcriber mode.")
raise Exception("Absence of a segment.") raise Exception("Absence of a segment.")
actionsSegmentationBoundaryMerge.append(copy.deepcopy([newSegment,listTmp[y]])) if not (not deleteBoundarySameConsecutiveSpk and newSegment['cluster']==listTmp[y]['cluster']):
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([newSegment,listTmp[y]])) actionsSegmentationBoundaryMerge.append(copy.deepcopy([newSegment,listTmp[y]]))
newSegment,valueTmp=mergeSegment(newSegment,listTmp[y],valueTmp) actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([newSegment,listTmp[y]]))
if modeNoGap == True and newSegment['cluster']!=listTmp[y]['cluster']:
valueTmp=dropSegment(listTmp[y],valueTmp)
listTmp[y]['cluster']=newSegment['cluster']
valueTmp.append_seg(listTmp[y])
valueTmp.sort()
newSegment,valueTmp=mergeSegment(newSegment,listTmp[y],valueTmp)
else:
newSegment=listTmp[y]
# Updates diarHyp # Updates diarHyp
diarHyp=valueTmp diarHyp=valueTmp
......
S4D tutorials
===
Here you will find short tutorials on how to use different components of S4D to train and run a complete speaker diarization system.
1. [Train a PLDA model for i-vector clustering](tuto_1_iv_model.ipynb)
2. [Perform a BIC diarization](tuto_2_diar_bic.ipynb)
3. [Use i-vectors for speaker clustering](tuto_3_iv_plda_clustering.ipynb)
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Train model for Diarization\n",
"====\n",
"\n",
"This script trains UBM, TV and PLDA models for a diarization system.\n",
"\n",
"Initialization\n",
"---"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"from s4d.diar import Diar\n",
"from s4d.utils import *\n",
"\n",
"from sidekit import Mixture, FactorAnalyser, StatServer, IdMap\n",
"import numpy\n",
"import logging\n",
"import re\n",
"import sidekit\n",
"from sidekit.sidekit_io import *\n",
"try:\n",
" from sortedcontainers import SortedDict as dict\n",
"except ImportError:\n",
" pass"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"init_logging(level=logging.INFO)\n",
"num_thread = 4\n",
"audio_dir = '../data/train/{}.wav'\n",
"\n",
"\n",
"ubm_seg_fn = './data/seg/ubm_ester.seg'\n",
"nb_gauss = 1024\n",
"mfcc_ubm_fn = './data/mfcc/ubm.h5'\n",
"ubm_idmap_fn = './data/mfcc/ubm_idmap.txt'\n",
"ubm_fn = './data/model/ester_ubm_'+str(nb_gauss)+'.h5'\n",
"\n",
"\n",
"tv_seg_fn = './data/seg/train.tv.seg'\n",
"rank_tv = 300\n",
"it_max_tv = 10\n",
"mfcc_tv_fn = './data/mfcc/tv.h5'\n",
"tv_idmap_fn = './data/mfcc/tv_idmap.h5'\n",
"tv_stat_fn = './data/model/tv.stat.h5'\n",
"tv_fn = './data/model/tv_'+str(rank_tv)+'.h5'\n",
"\n",
"\n",
"plda_seg_fn = './data/seg/train.plda.seg'\n",
"rank_plda = 150\n",
"it_max_plda = 10\n",
"mfcc_plda_fn = './data/mfcc/norm_plda.h5'\n",
"plda_idmap_fn = './data/mfcc/plda_idmap.h5'\n",
"plda_fn = './data/model/plda_'+str(rank_tv)+'_'+str(rank_plda)+'.h5'\n",
"norm_stat_fn = './data/model/norm.stat.h5'\n",
"norm_fn = './data/model/norm.h5'\n",
"norm_iv_fn = './data/model/norm.iv.h5'\n",
"\n",
"\n",
"matrices_fn = './data/model/matrices.h5'\n",
"model_fn = './data/model/ester_model_{}_{}_{}.h5'.format(nb_gauss, rank_tv, rank_plda)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Step 1: UBM\n",
"---\n",
"Extract MFCC for the UBM"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"logging.info('Computing MFCC for UBM')\n",
"diar_ubm = Diar.read_seg(ubm_seg_fn, normalize_cluster=True)\n",
"fe = get_feature_extractor(audio_dir, 'sid')\n",
"ubm_idmap = fe.save_multispeakers(diar_ubm.id_map(), output_feature_filename=mfcc_ubm_fn, keep_all=False)\n",
"ubm_idmap.write_txt(ubm_idmap_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Train the UBM by EM"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ubm_idmap = IdMap.read_txt(ubm_idmap_fn)\n",
"\n",
"fs = get_feature_server(mfcc_ubm_fn, 'sid')\n",
"\n",
"spk_lst = ubm_idmap.rightids\n",
"ubm = Mixture()\n",
"ubm.EM_split(fs, spk_lst, nb_gauss,\n",
" iterations=(1, 2, 2, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8), num_thread=num_thread,\n",
" llk_gain=0.01)\n",
"ubm.write(ubm_fn, prefix='ubm/')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Step 2: TV\n",
"---\n",
"Extract MFCC for TV"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"logging.info('Computing MFCC for TV')\n",
"diar_tv = Diar.read_seg(tv_seg_fn, normalize_cluster=True)\n",
"fe = get_feature_extractor(audio_dir, 'sid')\n",
"tv_idmap = fe.save_multispeakers(diar_tv.id_map(), output_feature_filename=mfcc_tv_fn, keep_all=False)\n",
"tv_idmap.write(tv_idmap_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Train a Total Variability model using the FactorAnalyser class"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tv_idmap = IdMap.read(tv_idmap_fn)\n",
"\n",
"ubm = Mixture()\n",
"ubm.read(ubm_fn, prefix='ubm/')\n",
"\n",
"fs = get_feature_server(mfcc_tv_fn, 'sid')\n",
"\n",
"tv_idmap.leftids = numpy.copy(tv_idmap.rightids)\n",
"\n",
"tv_stat = StatServer(tv_idmap, ubm.get_distrib_nb(), ubm.dim())\n",
"tv_stat.accumulate_stat(ubm=ubm, feature_server=fs, seg_indices=range(tv_stat.segset.shape[0]), num_thread=num_thread)\n",
"tv_stat.write(tv_stat_fn)\n",
"fa = FactorAnalyser()\n",
"fa.total_variability(tv_stat_fn, ubm, rank_tv, nb_iter=it_max_tv, batch_size=1000, num_thread=num_thread)\n",
"\n",
"write_tv_hdf5([fa.F, fa.mean, fa.Sigma], tv_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Step 3: PLDA\n",
"---\n",
"Extract the MFCC for the PLDA"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"logging.info('Computing MFCC for PLDA')\n",
"diar_plda = Diar.read_seg(plda_seg_fn, normalize_cluster=True)\n",
"fe = get_feature_extractor(audio_dir, 'sid')\n",
"plda_idmap = fe.save_multispeakers(diar_plda.id_map(), output_feature_filename=mfcc_plda_fn, keep_all=False)\n",
"plda_idmap.write(plda_idmap_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Accumulate statistics"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plda_idmap = IdMap.read(plda_idmap_fn)\n",
"\n",
"ubm = Mixture()\n",
"ubm.read(ubm_fn, prefix='ubm/')\n",
"tv, tv_mean, tv_sigma = read_tv_hdf5(tv_fn)\n",
"\n",
"fs = get_feature_server(mfcc_plda_fn, 'sid')\n",
"\n",
"plda_norm_stat = StatServer(plda_idmap, ubm.get_distrib_nb(), ubm.dim())\n",
"plda_norm_stat.accumulate_stat(ubm=ubm, feature_server=fs, \n",
" seg_indices=range(plda_norm_stat.segset.shape[0]), num_thread=num_thread)\n",
"plda_norm_stat.write(norm_stat_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Extract i-vectors and compute norm"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fa = FactorAnalyser(F=tv, mean=tv_mean, Sigma=tv_sigma)\n",
"norm_iv = fa.extract_ivectors(ubm, norm_stat_fn, num_thread=num_thread)\n",
"norm_iv.write(norm_iv_fn)\n",
"\n",
"norm_mean, norm_cov = norm_iv.estimate_spectral_norm_stat1(1, 'sphNorm')\n",
"\n",
"write_norm_hdf5([norm_mean, norm_cov], norm_fn)\n",
"\n",
"norm_iv.spectral_norm_stat1(norm_mean[:1], norm_cov[:1])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Train the PLDA model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fa = FactorAnalyser()\n",
"fa.plda(norm_iv, rank_plda, nb_iter=it_max_plda)\n",
"write_plda_hdf5([fa.mean, fa.F, numpy.zeros((rank_tv, 0)), fa.Sigma], plda_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Step 4: Compute additional data (optional)\n",
"---\n",
"Adding matrices for additional scoring methods: \n",
"* Mahalonobis matrix\n",
"* Lower Choleski decomposition of the WCCN matrix\n",
"* Within- and Between-class Covariance matrices"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"iv = StatServer(norm_iv_fn)\n",
"matrix_dict = {}\n",
"\n",
"logging.info('compute mahalanobis_matrix')\n",
"mahalanobis_matrix = iv.get_mahalanobis_matrix_stat1()\n",
"matrix_dict['mahalanobis_matrix'] = mahalanobis_matrix\n",
"\n",
"logging.info('compute wccn_choleski')\n",
"wccn_choleski = iv.get_wccn_choleski_stat1()\n",
"matrix_dict['wccn_choleski'] = wccn_choleski\n",
"\n",
"logging.info('compute two_covariance')\n",
"within_covariance = iv.get_within_covariance_stat1()\n",
"matrix_dict['two_covariance/within_covariance'] = within_covariance\n",
"between_covariance = iv.get_between_covariance_stat1()\n",
"matrix_dict['two_covariance/between_covariance'] = between_covariance\n",
"\n",
"write_dict_hdf5(matrix_dict, matrices_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Step 5: Merge in one model\n",
"---"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"with h5py.File(model_fn, 'w') as model:\n",
" for fn in [ubm_fn, tv_fn, norm_fn, plda_fn, matrices_fn]:\n",
" if not os.path.exists(fn):\n",
" continue\n",
" with h5py.File(fn, 'r') as fh:\n",
" for group in fh:\n",
" logging.info(group)\n",