Commit e1d3d538 authored by Anthony Larcher's avatar Anthony Larcher
Browse files

dnn, environment variables

parent 1e932d87
......@@ -39,17 +39,17 @@ SIDEKIT_CONFIG={"theano":True,
"theano_config":'gpu', # Can be 'cpu' or 'gpu'
"libsvm":True
}
for cfg in os.environ['SIDEKIT'].split(","):
k, val = cfg.split("=")
if k == "theano":
if val == "false":
SIDEKIT_CONFIG["theano"] = False
elif k == "theano_config":
SIDEKIT_CONFIG["theano_config"] = val
elif k == "libsvm":
if val == "false":
SIDEKIT_CONFIG["libsvm"] = False
if 'SIDEKIT' in os.environ:
for cfg in os.environ['SIDEKIT'].split(","):
k, val = cfg.split("=")
if k == "theano":
if val == "false":
SIDEKIT_CONFIG["theano"] = False
elif k == "theano_config":
SIDEKIT_CONFIG["theano_config"] = val
elif k == "libsvm":
if val == "false":
SIDEKIT_CONFIG["libsvm"] = False
PARALLEL_MODULE = 'multiprocessing' # can be , threading, multiprocessing MPI is planned in the future
......
......@@ -379,14 +379,16 @@ class Mixture(object):
if self.invcov.ndim == 2: # for Diagonal covariance only
self.det = 1.0 / numpy.prod(self.invcov, axis=1)
elif self.invcov.ndim == 3: # For full covariance dstributions
for gg in range(self.mu.shape[1]):
logging.critical("size of det: {}".format(self.det.shape))
logging.critical("size of mu: {}".format(self.mu.shape))
for gg in range(self.mu.shape[0]):
self.det[gg] = 1./numpy.linalg.det(self.invcov[gg])
self.cst = 1.0 / (numpy.sqrt(self.det) * (2.0 * numpy.pi) ** (self.dim() / 2.0))
if self.invcov.ndim == 2:
self.A = (numpy.square(self.mu) * self.invcov).sum(1) - 2.0 * (numpy.log(self.w) + numpy.log(self.cst))
elif self.invcov.ndim == 3:
self.A = 0
self.A = numpy.zeros(self.cst.shape)
def validate(self):
"""Verify the format of the Mixture
......@@ -861,7 +863,7 @@ class Mixture(object):
# M step
logging.debug('Maximisation')
self._maximization(accum)
if i > 0:
if it > 0:
# gain = llk[-1] - llk[-2]
# if gain < llk_gain:
# logging.debug(
......
......@@ -461,7 +461,7 @@ class FForwardNetwork(object):
nfiles = 0
# Iterate on the mini-batches
for ii, training_segment_set in enumerate(training_segment_sets):
for ii, training_segment_set in enumerate(training_segment_sets[:3]):
start_time = time.time()
l = []
f = []
......@@ -525,20 +525,18 @@ class FForwardNetwork(object):
feat, _ = features_server.load(show,
start=s - features_server.context[0],
stop=e + features_server.context[1])
print("taille de feat = {}".format(feat.shape))
if traps:
# Get features in context
X = features_server.get_traps(feat=feat,
label=None,
start=features_server.context[0],
stop=feat.shape[0] - features_server.context[1])
[0].astype(numpy.float32)
stop=feat.shape[0] - features_server.context[1])[0].astype(numpy.float32)
else:
# Get features in context
X = features_server.get_context(feat=feat,
label=None,
start=features_server.context[0],
stop=feat.shape[0] - features_server.context[1])
[0].astype(numpy.float32)
stop=feat.shape[0] - features_server.context[1])[0].astype(numpy.float32)
assert len(X) == len(t)
err, acc = xentropy(X, t)
......@@ -626,7 +624,7 @@ class FForwardNetwork(object):
# If not done yet, compute mean and standard deviation on all training data
if 0 in [len(self.params["input_mean"]), len(self.params["input_std"])]:
if True:
if False:
self.log.info("Compute mean and standard deviation from the training features")
feature_nb, self.params["input_mean"], self.params["input_std"] = mean_std_many(features_server,
feature_size,
......
......@@ -583,7 +583,10 @@ class StatServer:
if not ubm.dim() == data.shape[1]:
raise Exception('dimension of ubm and features differ: {:d} / {:d}'.format(ubm.dim(), data.shape[1]))
else:
lp = ubm.compute_log_posterior_probabilities(data)
if ubm.invcov.ndim == 2:
lp = ubm.compute_log_posterior_probabilities(data)
else:
lp = ubm.compute_log_posterior_probabilities_full(data)
pp, foo = sum_log_probabilities(lp)
# Compute 0th-order statistics
self.stat0[idx, :] = pp.sum(0)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment