Commit 99e43094 authored by Florent Desnous 's avatar Florent Desnous
Browse files
parents 28cedd13 33800b29
......@@ -3,4 +3,4 @@ s4d.egg-info/
*/__pycache__/
*.pyc
dist/
tutorials/
......@@ -221,9 +221,10 @@ def automatonAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,diarFinal__clus
## tolerance: In centiseconds
## diarFinal__clusterToDeleteAccordingToDiarRef: List of clusters to delete in the diarFinal only
## modeNoGap: Drops or not the segment actions (i.e. createSegment & deleteSegment)
## mergeStrat_BiggestCluster: Whether we merge in the temporal order or first the biggest cluster for a given reference segment
def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=False,mergeStrat_BiggestCluster=False,diarFinal__clusterToDeleteAccordingToDiarRef=list()):
assert isinstance(diarHyp,Diar) and isinstance(diarRef,Diar) and isinstance(mergeStrat_BiggestCluster,bool) and isinstance(modeNoGap,bool) and (diarUem is None or isinstance(diarUem,Diar)) and isinstance(tolerance,numbers.Number) and isinstance(diarFinal__clusterToDeleteAccordingToDiarRef,list)
## modeNoGap__mergeStrat_BiggestCluster: Whether we merge in the temporal order or first the biggest cluster for a given reference segment (only useful when the modeNoGap is False)
## deleteBoundarySameConsecutiveSpk: Whether we delete a boundary for two consecutive segments with the same speaker
def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=False,modeNoGap__mergeStrat_BiggestCluster=False,diarFinal__clusterToDeleteAccordingToDiarRef=list(),deleteBoundarySameConsecutiveSpk=False):
assert isinstance(diarHyp,Diar) and isinstance(diarRef,Diar) and isinstance(modeNoGap__mergeStrat_BiggestCluster,bool) and isinstance(modeNoGap,bool) and (diarUem is None or isinstance(diarUem,Diar)) and isinstance(tolerance,numbers.Number) and isinstance(diarFinal__clusterToDeleteAccordingToDiarRef,list) and isinstance(deleteBoundarySameConsecutiveSpk,bool)
for u in diarFinal__clusterToDeleteAccordingToDiarRef:
assert isinstance(u,str)
......@@ -288,7 +289,10 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
showname=diarRef.unique('show')[0]
diarRef.sort()
diarHyp.sort()
tolerance=abs(tolerance)
tolerance=abs(tolerance)
if not strictBoundary:
diarRef.pack()
diarHyp.pack()
assert len(diarOverlapArea(diarRef))==0, "Error: diarRef parameter have some overlapped segments.\nReason: No overlap segment allowed.\nSolution: Please put them apart.\n"
assert len(diarOverlapArea(diarHyp))==0, "Error: diarHyp parameter have some overlapped segments.\nReason: No overlap segment allowed.\nSolution: Please put them apart.\n"
......@@ -527,7 +531,7 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
valueBoundaryStart=copy.deepcopy(y['stop'])
if valueBoundaryStart is None:
valueBoundaryStart=valueRef['start']
if mergeStrat_BiggestCluster == True:
if modeNoGap__mergeStrat_BiggestCluster == True:
# Gets the cluster (it which has the most present frames)
dictHypRefSegmentDuration=dict()
for y in listHypRefSegment:
......@@ -542,10 +546,10 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
if cls['start']>y['start']:
cls=y
clusterName=cls['cluster']
# Moves the boundaries
# Pre-string for a good running: listHypRefSegment sorted in ascending order on start, don't overtake the value valueRef['stop'] and valueRef['start']
if modeNoGap == False:
for idx,z in enumerate(listHypRefSegment):
# Moves the boundaries
# Pre-string for a good running: listHypRefSegment sorted in ascending order on start, don't overtake the value valueRef['stop'] and valueRef['start']
nearStop=valueRef['stop']
if idx==0:
boundStop=z['stop']
......@@ -592,22 +596,21 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
elif tolerance!=0 and y['start']>=(valueRef['start']-tolerance):
listHypRefSegment.append(y)
# Replaces the segments which are not in the correct cluster
replaced=False
for y in listHypRefSegment:
if y['cluster']!=clusterName:
replaced=True
yTmp=copy.deepcopy(y)
yTmp['cluster']=clusterName
if modeNoGap == False:
if modeNoGap == False:
replaced=False
for y in listHypRefSegment:
if y['cluster']!=clusterName:
replaced=True
yTmp=copy.deepcopy(y)
yTmp['cluster']=clusterName
actionsSegmentationSegmentDelete.append(copy.deepcopy(y))
actionsIncrementalSegmentationSegmentDeleteTurn.append(copy.deepcopy(y))
valueTmp=dropSegment(y,valueTmp)
if modeNoGap == False:
valueTmp=dropSegment(y,valueTmp)
actionsSegmentationSegmentCreate.append(copy.deepcopy(Segment([valueRef['show'],yTmp['cluster'],yTmp['cluster_type'],yTmp['start'],yTmp['stop']],['show','cluster','cluster_type','start','stop'])))
actionsIncrementalSegmentationSegmentCreateTurn.append(copy.deepcopy(Segment([valueRef['show'],yTmp['cluster'],yTmp['cluster_type'],yTmp['start'],yTmp['stop']],['show','cluster','cluster_type','start','stop'])))
valueTmp.append_seg(yTmp)
if replaced:
valueTmp.sort()
valueTmp.append_seg(yTmp)
if replaced:
valueTmp.sort()
# Merges among them if > 1
if len(listHypRefSegment)>1:
# Gets the new segments, modified by the previous steps
......@@ -618,17 +621,27 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
listTmp.append(y)
elif tolerance!=0 and y['start']>=(valueRef['start']-tolerance):
listTmp.append(y)
actionsSegmentationBoundaryMerge.append(copy.deepcopy([listTmp[0],listTmp[1]]))
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([listTmp[0],listTmp[1]]))
newSegment,valueTmp=mergeSegment(listTmp[0],listTmp[1],valueTmp)
if not (not deleteBoundarySameConsecutiveSpk and listTmp[0]['cluster']==listTmp[1]['cluster']):
actionsSegmentationBoundaryMerge.append(copy.deepcopy([listTmp[0],listTmp[1]]))
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([listTmp[0],listTmp[1]]))
if modeNoGap == True and listTmp[0]['cluster']!=listTmp[1]['cluster']:
listTmp[1]['cluster']=listTmp[0]['cluster']
newSegment,valueTmp=mergeSegment(listTmp[0],listTmp[1],valueTmp)
else:
newSegment=listTmp[1]
for y in range(2,len(listTmp)):
if modeNoGap == True:
if not (Segment.intersection(newSegment,listTmp[y]) is not None or newSegment["stop"]==listTmp[y]["start"] or newSegment["start"]==listTmp[y]["stop"]):
logging.error("Cannot have absence of a segment in Transcriber mode.")
raise Exception("Absence of a segment.")
actionsSegmentationBoundaryMerge.append(copy.deepcopy([newSegment,listTmp[y]]))
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([newSegment,listTmp[y]]))
newSegment,valueTmp=mergeSegment(newSegment,listTmp[y],valueTmp)
if not (not deleteBoundarySameConsecutiveSpk and newSegment['cluster']==listTmp[y]['cluster']):
actionsSegmentationBoundaryMerge.append(copy.deepcopy([newSegment,listTmp[y]]))
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([newSegment,listTmp[y]]))
if modeNoGap == True and newSegment['cluster']!=listTmp[y]['cluster']:
listTmp[y]['cluster']=newSegment['cluster']
newSegment,valueTmp=mergeSegment(newSegment,listTmp[y],valueTmp)
else:
newSegment=listTmp[y]
# Updates diarHyp
diarHyp=valueTmp
......@@ -709,8 +722,10 @@ def automatonSegmentation(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=Fal
## tolerance: In centiseconds
## diarFinal__clusterToDeleteAccordingToDiarRef: List of clusters to delete in the diarFinal only
## modeNoGap: Drops or not the segment actions (i.e. createSegment & deleteSegment)
def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=False,diarFinal__clusterToDeleteAccordingToDiarRef=list()):
assert isinstance(diarHyp,Diar) and isinstance(diarRef,Diar) and isinstance(modeNoGap,bool) and (diarUem is None or isinstance(diarUem,Diar)) and isinstance(tolerance,numbers.Number) and isinstance(diarFinal__clusterToDeleteAccordingToDiarRef,list)
## deleteBoundarySameConsecutiveSpk: Whether we delete a boundary for two consecutive segments with the same speaker
## deleteBoundaryMergeCluster: The action "delete a boundary" can merge two consecutive segments with different cluster names (it takes the name of the left/first segment)
def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,modeNoGap=False,diarFinal__clusterToDeleteAccordingToDiarRef=list(),deleteBoundarySameConsecutiveSpk=False,deleteBoundaryMergeCluster=False):
assert isinstance(diarHyp,Diar) and isinstance(diarRef,Diar) and isinstance(modeNoGap,bool) and (diarUem is None or isinstance(diarUem,Diar)) and isinstance(tolerance,numbers.Number) and isinstance(diarFinal__clusterToDeleteAccordingToDiarRef,list) and isinstance(deleteBoundarySameConsecutiveSpk,bool) and isinstance(deleteBoundaryMergeCluster,bool)
for u in diarFinal__clusterToDeleteAccordingToDiarRef:
assert isinstance(u,str)
......@@ -1056,13 +1071,11 @@ def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,mod
actionsIncrementalAssignmentCreateTurn.append(copy.deepcopy([valueRef['cluster'],z['cluster'],copy.deepcopy(z)]))
else:
if z['cluster'] == dictionary[valueRef['cluster']]:
if (modeNoGap == True and idx==0) or (modeNoGap == False):
actionsAssignmentNothing.append(copy.deepcopy(z))
actionsIncrementalAssignmentNothingTurn.append(copy.deepcopy(z))
actionsAssignmentNothing.append(copy.deepcopy(z))
actionsIncrementalAssignmentNothingTurn.append(copy.deepcopy(z))
else:
if (modeNoGap == True and idx==0) or (modeNoGap == False):
actionsAssignmentChange.append(copy.deepcopy([dictionary[valueRef['cluster']],z]))
actionsIncrementalAssignmentChangeTurn.append(copy.deepcopy([dictionary[valueRef['cluster']],z]))
actionsAssignmentChange.append(copy.deepcopy([dictionary[valueRef['cluster']],z]))
actionsIncrementalAssignmentChangeTurn.append(copy.deepcopy([dictionary[valueRef['cluster']],z]))
applyChange=True
if applyChange:
# Updates the diar for the merges afterward
......@@ -1071,6 +1084,8 @@ def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,mod
valueTmp=dropSegment(z,valueTmp)
valueTmp.append_seg(segmentTmp)
valueTmp.sort()
if deleteBoundaryMergeCluster:
break
if not perfectBoundary:
# Gets the new segments, modified by the previous steps
listHypRefSegment=list()
......@@ -1145,22 +1160,30 @@ def automatonSegmentationAssignment(diarHyp,diarRef,diarUem=None,tolerance=0,mod
listTmp.append(y)
elif tolerance!=0 and y['start']>=(valueRef['start']-tolerance):
listTmp.append(y)
if modeNoGap == True:
clusterSelected=listTmp[0]['cluster']
actionsSegmentationBoundaryMerge.append(copy.deepcopy([listTmp[0],listTmp[1]]))
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([listTmp[0],listTmp[1]]))
if modeNoGap == True:
listTmp[1]['cluster']=clusterSelected
newSegment,valueTmp=mergeSegment(listTmp[0],listTmp[1],valueTmp)
if not (not deleteBoundarySameConsecutiveSpk and listTmp[0]['cluster']==listTmp[1]['cluster']):
actionsSegmentationBoundaryMerge.append(copy.deepcopy([listTmp[0],listTmp[1]]))
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([listTmp[0],listTmp[1]]))
if modeNoGap == True and listTmp[0]['cluster']!=listTmp[1]['cluster']:
listTmp[1]['cluster']=listTmp[0]['cluster']
newSegment,valueTmp=mergeSegment(listTmp[0],listTmp[1],valueTmp)
else:
newSegment=listTmp[1]
for y in range(2,len(listTmp)):
if modeNoGap == True:
listTmp[y]['cluster']=clusterSelected
if not (Segment.intersection(newSegment,listTmp[y]) is not None or newSegment["stop"]==listTmp[y]["start"] or newSegment["start"]==listTmp[y]["stop"]):
logging.error("Cannot have absence of a segment in Transcriber mode.")
raise Exception("Absence of a segment.")
actionsSegmentationBoundaryMerge.append(copy.deepcopy([newSegment,listTmp[y]]))
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([newSegment,listTmp[y]]))
newSegment,valueTmp=mergeSegment(newSegment,listTmp[y],valueTmp)
if not (not deleteBoundarySameConsecutiveSpk and newSegment['cluster']==listTmp[y]['cluster']):
actionsSegmentationBoundaryMerge.append(copy.deepcopy([newSegment,listTmp[y]]))
actionsIncrementalSegmentationBoundaryMergeTurn.append(copy.deepcopy([newSegment,listTmp[y]]))
if modeNoGap == True and newSegment['cluster']!=listTmp[y]['cluster']:
valueTmp=dropSegment(listTmp[y],valueTmp)
listTmp[y]['cluster']=newSegment['cluster']
valueTmp.append_seg(listTmp[y])
valueTmp.sort()
newSegment,valueTmp=mergeSegment(newSegment,listTmp[y],valueTmp)
else:
newSegment=listTmp[y]
# Updates diarHyp
diarHyp=valueTmp
......
S4D tutorials
===
Here you will find short tutorials on how to use different components of S4D to train and run a complete speaker diarization system.
1. [Train a PLDA model for i-vector clustering](tuto_1_iv_model.ipynb)
2. [Perform a BIC diarization](tuto_2_diar_bic.ipynb)
3. [Use i-vectors for speaker clustering](tuto_3_iv_plda_clustering.ipynb)
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Train model for Diarization\n",
"====\n",
"\n",
"This script trains UBM, TV and PLDA models for a diarization system.\n",
"\n",
"Initialization\n",
"---"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import logging\n",
"from s4d.diar import Diar\n",
"from s4d.utils import *\n",
"\n",
"from sidekit import Mixture, FactorAnalyser, StatServer, IdMap\n",
"import numpy\n",
"import logging\n",
"import re\n",
"import sidekit\n",
"from sidekit.sidekit_io import *\n",
"try:\n",
" from sortedcontainers import SortedDict as dict\n",
"except ImportError:\n",
" pass"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"init_logging(level=logging.INFO)\n",
"num_thread = 4\n",
"audio_dir = '../data/train/{}.wav'\n",
"\n",
"\n",
"ubm_seg_fn = './data/seg/ubm_ester.seg'\n",
"nb_gauss = 1024\n",
"mfcc_ubm_fn = './data/mfcc/ubm.h5'\n",
"ubm_idmap_fn = './data/mfcc/ubm_idmap.txt'\n",
"ubm_fn = './data/model/ester_ubm_'+str(nb_gauss)+'.h5'\n",
"\n",
"\n",
"tv_seg_fn = './data/seg/train.tv.seg'\n",
"rank_tv = 300\n",
"it_max_tv = 10\n",
"mfcc_tv_fn = './data/mfcc/tv.h5'\n",
"tv_idmap_fn = './data/mfcc/tv_idmap.h5'\n",
"tv_stat_fn = './data/model/tv.stat.h5'\n",
"tv_fn = './data/model/tv_'+str(rank_tv)+'.h5'\n",
"\n",
"\n",
"plda_seg_fn = './data/seg/train.plda.seg'\n",
"rank_plda = 150\n",
"it_max_plda = 10\n",
"mfcc_plda_fn = './data/mfcc/norm_plda.h5'\n",
"plda_idmap_fn = './data/mfcc/plda_idmap.h5'\n",
"plda_fn = './data/model/plda_'+str(rank_tv)+'_'+str(rank_plda)+'.h5'\n",
"norm_stat_fn = './data/model/norm.stat.h5'\n",
"norm_fn = './data/model/norm.h5'\n",
"norm_iv_fn = './data/model/norm.iv.h5'\n",
"\n",
"\n",
"matrices_fn = './data/model/matrices.h5'\n",
"model_fn = './data/model/ester_model_{}_{}_{}.h5'.format(nb_gauss, rank_tv, rank_plda)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Step 1: UBM\n",
"---\n",
"Extract MFCC for the UBM"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"logging.info('Computing MFCC for UBM')\n",
"diar_ubm = Diar.read_seg(ubm_seg_fn, normalize_cluster=True)\n",
"fe = get_feature_extractor(audio_dir, 'sid')\n",
"ubm_idmap = fe.save_multispeakers(diar_ubm.id_map(), output_feature_filename=mfcc_ubm_fn, keep_all=False)\n",
"ubm_idmap.write_txt(ubm_idmap_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Train the UBM by EM"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"ubm_idmap = IdMap.read_txt(ubm_idmap_fn)\n",
"\n",
"fs = get_feature_server(mfcc_ubm_fn, 'sid')\n",
"\n",
"spk_lst = ubm_idmap.rightids\n",
"ubm = Mixture()\n",
"ubm.EM_split(fs, spk_lst, nb_gauss,\n",
" iterations=(1, 2, 2, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8), num_thread=num_thread,\n",
" llk_gain=0.01)\n",
"ubm.write(ubm_fn, prefix='ubm/')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Step 2: TV\n",
"---\n",
"Extract MFCC for TV"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"logging.info('Computing MFCC for TV')\n",
"diar_tv = Diar.read_seg(tv_seg_fn, normalize_cluster=True)\n",
"fe = get_feature_extractor(audio_dir, 'sid')\n",
"tv_idmap = fe.save_multispeakers(diar_tv.id_map(), output_feature_filename=mfcc_tv_fn, keep_all=False)\n",
"tv_idmap.write(tv_idmap_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Train a Total Variability model using the FactorAnalyser class"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"tv_idmap = IdMap.read(tv_idmap_fn)\n",
"\n",
"ubm = Mixture()\n",
"ubm.read(ubm_fn, prefix='ubm/')\n",
"\n",
"fs = get_feature_server(mfcc_tv_fn, 'sid')\n",
"\n",
"tv_idmap.leftids = numpy.copy(tv_idmap.rightids)\n",
"\n",
"tv_stat = StatServer(tv_idmap, ubm.get_distrib_nb(), ubm.dim())\n",
"tv_stat.accumulate_stat(ubm=ubm, feature_server=fs, seg_indices=range(tv_stat.segset.shape[0]), num_thread=num_thread)\n",
"tv_stat.write(tv_stat_fn)\n",
"fa = FactorAnalyser()\n",
"fa.total_variability(tv_stat_fn, ubm, rank_tv, nb_iter=it_max_tv, batch_size=1000, num_thread=num_thread)\n",
"\n",
"write_tv_hdf5([fa.F, fa.mean, fa.Sigma], tv_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Step 3: PLDA\n",
"---\n",
"Extract the MFCC for the PLDA"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"logging.info('Computing MFCC for PLDA')\n",
"diar_plda = Diar.read_seg(plda_seg_fn, normalize_cluster=True)\n",
"fe = get_feature_extractor(audio_dir, 'sid')\n",
"plda_idmap = fe.save_multispeakers(diar_plda.id_map(), output_feature_filename=mfcc_plda_fn, keep_all=False)\n",
"plda_idmap.write(plda_idmap_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Accumulate statistics"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"plda_idmap = IdMap.read(plda_idmap_fn)\n",
"\n",
"ubm = Mixture()\n",
"ubm.read(ubm_fn, prefix='ubm/')\n",
"tv, tv_mean, tv_sigma = read_tv_hdf5(tv_fn)\n",
"\n",
"fs = get_feature_server(mfcc_plda_fn, 'sid')\n",
"\n",
"plda_norm_stat = StatServer(plda_idmap, ubm.get_distrib_nb(), ubm.dim())\n",
"plda_norm_stat.accumulate_stat(ubm=ubm, feature_server=fs, \n",
" seg_indices=range(plda_norm_stat.segset.shape[0]), num_thread=num_thread)\n",
"plda_norm_stat.write(norm_stat_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Extract i-vectors and compute norm"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fa = FactorAnalyser(F=tv, mean=tv_mean, Sigma=tv_sigma)\n",
"norm_iv = fa.extract_ivectors(ubm, norm_stat_fn, num_thread=num_thread)\n",
"norm_iv.write(norm_iv_fn)\n",
"\n",
"norm_mean, norm_cov = norm_iv.estimate_spectral_norm_stat1(1, 'sphNorm')\n",
"\n",
"write_norm_hdf5([norm_mean, norm_cov], norm_fn)\n",
"\n",
"norm_iv.spectral_norm_stat1(norm_mean[:1], norm_cov[:1])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Train the PLDA model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"fa = FactorAnalyser()\n",
"fa.plda(norm_iv, rank_plda, nb_iter=it_max_plda)\n",
"write_plda_hdf5([fa.mean, fa.F, numpy.zeros((rank_tv, 0)), fa.Sigma], plda_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Step 4: Compute additional data (optional)\n",
"---\n",
"Adding matrices for additional scoring methods: \n",
"* Mahalonobis matrix\n",
"* Lower Choleski decomposition of the WCCN matrix\n",
"* Within- and Between-class Covariance matrices"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"iv = StatServer(norm_iv_fn)\n",
"matrix_dict = {}\n",
"\n",
"logging.info('compute mahalanobis_matrix')\n",
"mahalanobis_matrix = iv.get_mahalanobis_matrix_stat1()\n",
"matrix_dict['mahalanobis_matrix'] = mahalanobis_matrix\n",
"\n",
"logging.info('compute wccn_choleski')\n",
"wccn_choleski = iv.get_wccn_choleski_stat1()\n",
"matrix_dict['wccn_choleski'] = wccn_choleski\n",
"\n",
"logging.info('compute two_covariance')\n",
"within_covariance = iv.get_within_covariance_stat1()\n",
"matrix_dict['two_covariance/within_covariance'] = within_covariance\n",
"between_covariance = iv.get_between_covariance_stat1()\n",
"matrix_dict['two_covariance/between_covariance'] = between_covariance\n",
"\n",
"write_dict_hdf5(matrix_dict, matrices_fn)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Step 5: Merge in one model\n",
"---"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"with h5py.File(model_fn, 'w') as model:\n",
" for fn in [ubm_fn, tv_fn, norm_fn, plda_fn, matrices_fn]:\n",
" if not os.path.exists(fn):\n",
" continue\n",
" with h5py.File(fn, 'r') as fh:\n",
" for group in fh:\n",
" logging.info(group)\n",
" fh.copy(group, model)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
This source diff could not be displayed because it is too large. You can view the blob instead.
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"i-vector clustering with PLDA scoring\n",
"===\n",
"This script demonstrates the use of several clustering algorithms using PLDA scoring and i-vectors. The algorithms proposed are:\n",
" - Integer Linear Programming (ILP) IV\n",
" - HAC IV\n",
" - Connected Components (CC) IV\n",
" - Combination of CC and HAC, and CC and ILP\n",
"\n",
"It takes as input the segments generated by the second tutorial (BIC-HAC) and uses the model learned in the first."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Import theano\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Can not use cuDNN on context None: Disabled by dnn.enabled flag\n",
"Mapped name None to device cuda: GeForce GTX TI