Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
Abdelwahab HEBA
kaldi_2015
Commits
36cad857
Commit
36cad857
authored
Jul 22, 2015
by
Jan "yenda" Trmal
Browse files
Merge pull request #8 from jtrmal/windows-test-fixes
Windows test fixes
parents
db63ae29
b0439a2e
Changes
81
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
76 additions
and
61 deletions
+76
-61
src/featbin/interpolate-pitch.cc
src/featbin/interpolate-pitch.cc
+2
-2
src/featbin/process-pitch-feats.cc
src/featbin/process-pitch-feats.cc
+2
-2
src/fstext/determinize-lattice-inl.h
src/fstext/determinize-lattice-inl.h
+4
-2
src/fstext/fstext-utils-test.cc
src/fstext/fstext-utils-test.cc
+3
-0
src/fstext/push-special.cc
src/fstext/push-special.cc
+7
-6
src/fstext/rescale-inl.h
src/fstext/rescale-inl.h
+7
-1
src/fstext/rescale-test.cc
src/fstext/rescale-test.cc
+5
-2
src/gmm/diag-gmm-test.cc
src/gmm/diag-gmm-test.cc
+7
-7
src/gmm/diag-gmm.cc
src/gmm/diag-gmm.cc
+5
-5
src/gmm/ebw-diag-gmm-test.cc
src/gmm/ebw-diag-gmm-test.cc
+2
-2
src/gmm/ebw-diag-gmm.cc
src/gmm/ebw-diag-gmm.cc
+2
-2
src/gmm/full-gmm-test.cc
src/gmm/full-gmm-test.cc
+2
-2
src/gmm/full-gmm.cc
src/gmm/full-gmm.cc
+3
-3
src/gmm/indirect-diff-diag-gmm.cc
src/gmm/indirect-diff-diag-gmm.cc
+1
-1
src/gmm/mle-am-diag-gmm-test.cc
src/gmm/mle-am-diag-gmm-test.cc
+1
-1
src/gmm/mle-diag-gmm-test.cc
src/gmm/mle-diag-gmm-test.cc
+2
-2
src/gmm/mle-full-gmm-test.cc
src/gmm/mle-full-gmm-test.cc
+1
-1
src/gmm/model-test-common.cc
src/gmm/model-test-common.cc
+2
-2
src/hmm/hmm-utils.cc
src/hmm/hmm-utils.cc
+2
-2
src/hmm/transition-model.cc
src/hmm/transition-model.cc
+16
-16
No files found.
src/featbin/interpolate-pitch.cc
View file @
36cad857
...
...
@@ -139,8 +139,8 @@ class PitchInterpolator {
BaseFloat
constant_prob
=
(
1.0
-
p_voicing_
[
t
])
*
opts_
.
interpolator_factor
,
specified_prob
=
p_voicing_
[
t
]
+
constant_prob
;
// specified_prob adds in the extra probability mass at the observed pitch value.
BaseFloat
log_constant_prob
=
l
og
(
constant_prob
),
log_ratio
=
l
og
(
specified_prob
/
constant_prob
);
BaseFloat
log_constant_prob
=
L
og
(
constant_prob
),
log_ratio
=
L
og
(
specified_prob
/
constant_prob
);
log_alpha_
.
Add
(
log_constant_prob
);
// add log_constant_prob to all pitches at this time.
log_alpha_
(
pitch_
[
t
])
+=
log_ratio
;
// corrects this to be like adding
...
...
src/featbin/process-pitch-feats.cc
View file @
36cad857
...
...
@@ -32,7 +32,7 @@ void ProcessPovFeatures(Matrix<BaseFloat> *mat) {
for
(
int32
i
=
0
;
i
<
num_frames
;
i
++
)
{
BaseFloat
p
=
(
*
mat
)(
i
,
0
);
KALDI_ASSERT
(
p
>=
0.0
&&
p
<=
1.0
);
(
*
mat
)(
i
,
0
)
=
l
og
((
p
+
0.0001
)
/
(
1.0001
-
p
));
(
*
mat
)(
i
,
0
)
=
L
og
((
p
+
0.0001
)
/
(
1.0001
-
p
));
}
}
...
...
@@ -40,7 +40,7 @@ void TakeLogOfPitch(Matrix<BaseFloat> *mat) {
int32
num_frames
=
mat
->
NumRows
();
for
(
int32
i
=
0
;
i
<
num_frames
;
i
++
)
{
KALDI_ASSERT
((
*
mat
)(
i
,
1
)
>
0.0
);
(
*
mat
)(
i
,
1
)
=
l
og
((
*
mat
)(
i
,
1
));
(
*
mat
)(
i
,
1
)
=
L
og
((
*
mat
)(
i
,
1
));
}
}
...
...
src/fstext/determinize-lattice-inl.h
View file @
36cad857
...
...
@@ -149,13 +149,15 @@ template<class IntType> class LatticeStringRepository {
void
ConvertToVector
(
const
Entry
*
entry
,
vector
<
IntType
>
*
out
)
const
{
size_t
length
=
Size
(
entry
);
out
->
resize
(
length
);
typename
vector
<
IntType
>::
iterator
iter
=
out
->
end
()
-
1
;
if
(
entry
!=
NULL
)
{
typename
vector
<
IntType
>::
reverse_iterator
iter
=
out
->
rbegin
();
while
(
entry
!=
NULL
)
{
*
iter
=
entry
->
i
;
entry
=
entry
->
parent
;
--
iter
;
++
iter
;
}
}
}
const
Entry
*
ConvertFromVector
(
const
vector
<
IntType
>
&
vec
)
{
const
Entry
*
e
=
NULL
;
...
...
src/fstext/fstext-utils-test.cc
View file @
36cad857
...
...
@@ -103,6 +103,9 @@ template<class Arc> void TestSafeDeterminizeWrapper() { // also tests SafeDete
int
n_syms
=
2
+
kaldi
::
Rand
()
%
5
,
n_states
=
3
+
kaldi
::
Rand
()
%
10
,
n_arcs
=
5
+
kaldi
::
Rand
()
%
30
,
n_final
=
1
+
kaldi
::
Rand
()
%
3
;
// Up to 2 unique symbols.
cout
<<
"Testing pre-determinize with "
<<
n_syms
<<
" symbols, "
<<
n_states
<<
" states and "
<<
n_arcs
<<
" arcs and "
<<
n_final
<<
" final states.
\n
"
;
SymbolTable
*
sptr
=
new
SymbolTable
(
"my-symbol-table"
);
sptr
->
AddSymbol
(
"<eps>"
);
delete
sptr
;
sptr
=
new
SymbolTable
(
"my-symbol-table"
);
vector
<
Label
>
all_syms
;
// including epsilon.
// Put symbols in the symbol table from 1..n_syms-1.
...
...
src/fstext/push-special.cc
View file @
36cad857
...
...
@@ -20,6 +20,7 @@
#include "fstext/push-special.h"
#include "base/kaldi-error.h"
#include "base/kaldi-math.h"
namespace
fst
{
...
...
@@ -101,10 +102,10 @@ class PushSpecialClass {
!
aiter
.
Done
();
aiter
.
Next
())
{
const
Arc
&
arc
=
aiter
.
Value
();
StateId
t
=
arc
.
nextstate
;
double
weight
=
e
xp
(
-
arc
.
weight
.
Value
());
double
weight
=
kaldi
::
E
xp
(
-
arc
.
weight
.
Value
());
pred_
[
t
].
push_back
(
std
::
make_pair
(
s
,
weight
));
}
double
final
=
e
xp
(
-
fst_
->
Final
(
s
).
Value
());
double
final
=
kaldi
::
E
xp
(
-
fst_
->
Final
(
s
).
Value
());
if
(
final
!=
0.0
)
pred_
[
initial_state_
].
push_back
(
std
::
make_pair
(
s
,
final
));
}
...
...
@@ -121,9 +122,9 @@ class PushSpecialClass {
!
aiter
.
Done
();
aiter
.
Next
())
{
const
Arc
&
arc
=
aiter
.
Value
();
StateId
t
=
arc
.
nextstate
;
sum
+=
e
xp
(
-
arc
.
weight
.
Value
())
*
occ_
[
t
]
/
occ_
[
s
];
sum
+=
kaldi
::
E
xp
(
-
arc
.
weight
.
Value
())
*
occ_
[
t
]
/
occ_
[
s
];
}
sum
+=
e
xp
(
-
(
fst_
->
Final
(
s
).
Value
()))
*
occ_
[
initial_state_
]
/
occ_
[
s
];
sum
+=
kaldi
::
E
xp
(
-
(
fst_
->
Final
(
s
).
Value
()))
*
occ_
[
initial_state_
]
/
occ_
[
s
];
if
(
s
==
0
)
{
min_sum
=
sum
;
max_sum
=
sum
;
...
...
@@ -133,7 +134,7 @@ class PushSpecialClass {
}
}
KALDI_VLOG
(
4
)
<<
"min,max is "
<<
min_sum
<<
" "
<<
max_sum
;
return
l
og
(
max_sum
/
min_sum
);
// In FST world we'll actually
return
kaldi
::
L
og
(
max_sum
/
min_sum
);
// In FST world we'll actually
// dealing with logs, so the log of the ratio is more suitable
// to compare with delta (makes testing the algorithm easier).
}
...
...
@@ -187,7 +188,7 @@ class PushSpecialClass {
// First get the potentials as negative-logs, like the values
// in the FST.
for
(
StateId
s
=
0
;
s
<
num_states_
;
s
++
)
{
occ_
[
s
]
=
-
l
og
(
occ_
[
s
]);
occ_
[
s
]
=
-
kaldi
::
L
og
(
occ_
[
s
]);
if
(
KALDI_ISNAN
(
occ_
[
s
])
||
KALDI_ISINF
(
occ_
[
s
]))
KALDI_WARN
<<
"NaN or inf found: "
<<
occ_
[
s
];
}
...
...
src/fstext/rescale-inl.h
View file @
36cad857
...
...
@@ -21,6 +21,7 @@
#define KALDI_FSTEXT_RESCALE_INL_H_
#include <cstring>
#include "base/kaldi-common.h"
#include "base/kaldi-math.h"
#include "util/stl-utils.h"
#include "fstext/fstext-utils.h"
...
...
@@ -119,7 +120,7 @@ inline LogWeight RescaleToStochastic(MutableFst<LogArc> *fst,
return
Weight
::
One
();
// can't rescale empty FST.
// total weight).
Weight
max
=
Weight
(
-
l
og
(
2.0
));
Weight
max
=
Weight
(
-
kaldi
::
L
og
(
2.0
));
// upper_bound and lower_bound are in terms of weight.Value(),
// in terms of weight they would have the reversed names.
...
...
@@ -132,6 +133,11 @@ inline LogWeight RescaleToStochastic(MutableFst<LogArc> *fst,
Weight
cur_rescale
=
Weight
::
One
();
Weight
cur_tot
;
while
(
1
)
{
{
FstPrinter
<
LogArc
>
fstprinter
(
*
fst
,
NULL
,
NULL
,
NULL
,
false
,
true
);
fstprinter
.
Print
(
&
std
::
cout
,
"standard output"
);
}
cur_tot
=
ComputeTotalWeight
(
*
fst
,
max
,
delta
);
std
::
cerr
<<
"Current rescaling factor is "
<<
cur_rescale
<<
", total is: "
<<
cur_tot
<<
'\n'
;
if
(
cur_tot
.
Value
()
<
Weight
::
One
().
Value
())
{
// read as: cur_tot > 1.
...
...
src/fstext/rescale-test.cc
View file @
36cad857
...
...
@@ -20,11 +20,14 @@
#include "fstext/rescale.h"
#include "fstext/fstext-utils.h"
#include "fstext/fst-test-utils.h"
#include "base/kaldi-math.h"
// Just check that it compiles, for now.
namespace
fst
{
using
kaldi
::
Exp
;
using
kaldi
::
Log
;
template
<
class
Arc
>
void
TestComputeTotalWeight
()
{
typedef
typename
Arc
::
Weight
Weight
;
...
...
@@ -40,7 +43,7 @@ template<class Arc> void TestComputeTotalWeight() {
fstprinter
.
Print
(
&
std
::
cout
,
"standard output"
);
}
Weight
max
(
-
l
og
(
2.0
));
Weight
max
(
-
L
og
(
2.0
));
Weight
tot
=
ComputeTotalWeight
(
*
fst
,
max
);
std
::
cout
<<
"Total weight is: "
<<
tot
.
Value
()
<<
'\n'
;
...
...
@@ -80,7 +83,7 @@ void TestRescaleToStochastic() {
RescaleToStochastic
(
fst
,
diff
);
Weight
tot
=
ShortestDistance
(
*
fst
),
tot2
=
ComputeTotalWeight
(
*
fst
,
Weight
(
-
l
og
(
2.0
)));
tot2
=
ComputeTotalWeight
(
*
fst
,
Weight
(
-
L
og
(
2.0
)));
std
::
cerr
<<
" tot is "
<<
tot
<<
", tot2 = "
<<
tot2
<<
'\n'
;
assert
(
ApproxEqual
(
tot2
,
Weight
::
One
(),
diff
));
...
...
src/gmm/diag-gmm-test.cc
View file @
36cad857
...
...
@@ -34,10 +34,10 @@ void InitRandomGmm(DiagGmm *gmm_in) {
Vector
<
BaseFloat
>
weights
(
num_gauss
);
for
(
int32
i
=
0
;
i
<
num_gauss
;
i
++
)
{
for
(
int32
j
=
0
;
j
<
dim
;
j
++
)
{
inv_vars
(
i
,
j
)
=
e
xp
(
RandGauss
()
*
(
1.0
/
(
1
+
j
)));
inv_vars
(
i
,
j
)
=
E
xp
(
RandGauss
()
*
(
1.0
/
(
1
+
j
)));
means
(
i
,
j
)
=
RandGauss
()
*
(
1.0
/
(
1
+
j
));
}
weights
(
i
)
=
e
xp
(
RandGauss
());
weights
(
i
)
=
E
xp
(
RandGauss
());
}
weights
.
Scale
(
1.0
/
weights
.
Sum
());
gmm
.
SetWeights
(
weights
);
...
...
@@ -107,7 +107,7 @@ void UnitTestDiagGmm() {
weights
(
m
)
=
kaldi
::
RandUniform
();
for
(
size_t
d
=
0
;
d
<
dim
;
d
++
)
{
means
(
m
,
d
)
=
kaldi
::
RandGauss
();
vars
(
m
,
d
)
=
e
xp
(
kaldi
::
RandGauss
())
+
1e-5
;
vars
(
m
,
d
)
=
E
xp
(
kaldi
::
RandGauss
())
+
1e-5
;
}
tot_weight
+=
weights
(
m
);
}
...
...
@@ -116,10 +116,10 @@ void UnitTestDiagGmm() {
for
(
size_t
m
=
0
;
m
<
nMix
;
m
++
)
{
weights
(
m
)
/=
tot_weight
;
for
(
size_t
d
=
0
;
d
<
dim
;
d
++
)
{
loglikes
(
m
)
+=
-
0.5
*
(
M_LOG_2PI
+
l
og
(
vars
(
m
,
d
))
+
(
feat
(
d
)
-
loglikes
(
m
)
+=
-
0.5
*
(
M_LOG_2PI
+
L
og
(
vars
(
m
,
d
))
+
(
feat
(
d
)
-
means
(
m
,
d
))
*
(
feat
(
d
)
-
means
(
m
,
d
))
/
vars
(
m
,
d
));
}
loglikes
(
m
)
+=
l
og
(
weights
(
m
));
loglikes
(
m
)
+=
L
og
(
weights
(
m
));
}
loglike
=
loglikes
.
LogSumExp
();
...
...
@@ -282,8 +282,8 @@ void UnitTestDiagGmm() {
gmm1
.
ComputeGconsts
();
std
::
vector
<
std
::
pair
<
BaseFloat
,
const
DiagGmm
*>
>
vec
;
vec
.
push_back
(
std
::
make_pair
(
0.4
,
(
const
DiagGmm
*
)(
&
gmm1
)));
vec
.
push_back
(
std
::
make_pair
(
0.6
,
(
const
DiagGmm
*
)(
&
gmm1
)));
vec
.
push_back
(
std
::
make_pair
(
static_cast
<
BaseFloat
>
(
0.4
)
,
(
const
DiagGmm
*
)(
&
gmm1
)));
vec
.
push_back
(
std
::
make_pair
(
static_cast
<
BaseFloat
>
(
0.6
)
,
(
const
DiagGmm
*
)(
&
gmm1
)));
DiagGmm
gmm2
(
vec
);
...
...
src/gmm/diag-gmm.cc
View file @
36cad857
...
...
@@ -123,9 +123,9 @@ int32 DiagGmm::ComputeGconsts() {
for
(
int32
mix
=
0
;
mix
<
num_mix
;
mix
++
)
{
KALDI_ASSERT
(
weights_
(
mix
)
>=
0
);
// Cannot have negative weights.
BaseFloat
gc
=
l
og
(
weights_
(
mix
))
+
offset
;
// May be -inf if weights == 0
BaseFloat
gc
=
L
og
(
weights_
(
mix
))
+
offset
;
// May be -inf if weights == 0
for
(
int32
d
=
0
;
d
<
dim
;
d
++
)
{
gc
+=
0.5
*
l
og
(
inv_vars_
(
mix
,
d
))
-
0.5
*
means_invvars_
(
mix
,
d
)
gc
+=
0.5
*
L
og
(
inv_vars_
(
mix
,
d
))
-
0.5
*
means_invvars_
(
mix
,
d
)
*
means_invvars_
(
mix
,
d
)
/
inv_vars_
(
mix
,
d
);
}
// Change sign for logdet because var is inverted. Also, note that
...
...
@@ -348,7 +348,7 @@ void DiagGmm::Merge(int32 target_components, std::vector<int32> *history) {
for
(
int32
i
=
0
;
i
<
num_comp
;
i
++
)
{
discarded_component
[
i
]
=
false
;
for
(
int32
d
=
0
;
d
<
dim
;
d
++
)
{
logdet
(
i
)
+=
0.5
*
l
og
(
inv_vars_
(
i
,
d
));
// +0.5 because var is inverted
logdet
(
i
)
+=
0.5
*
L
og
(
inv_vars_
(
i
,
d
));
// +0.5 because var is inverted
}
}
...
...
@@ -431,7 +431,7 @@ void DiagGmm::Merge(int32 target_components, std::vector<int32> *history) {
// Update logdet for merged component
logdet
(
max_i
)
=
0.0
;
for
(
int32
d
=
0
;
d
<
dim
;
d
++
)
{
logdet
(
max_i
)
+=
0.5
*
l
og
(
inv_vars_
(
max_i
,
d
));
logdet
(
max_i
)
+=
0.5
*
L
og
(
inv_vars_
(
max_i
,
d
));
// +0.5 because var is inverted
}
...
...
@@ -488,7 +488,7 @@ BaseFloat DiagGmm::merged_components_logdet(BaseFloat w1, BaseFloat w2,
tmp_var
.
AddVec2
(
-
1.0
,
tmp_mean
);
BaseFloat
merged_logdet
=
0.0
;
for
(
int32
d
=
0
;
d
<
dim
;
d
++
)
{
merged_logdet
-=
0.5
*
l
og
(
tmp_var
(
d
));
merged_logdet
-=
0.5
*
L
og
(
tmp_var
(
d
));
// -0.5 because var is not inverted
}
return
merged_logdet
;
...
...
src/gmm/ebw-diag-gmm-test.cc
View file @
36cad857
...
...
@@ -43,7 +43,7 @@ void UnitTestEstimateMmieDiagGmm() {
for
(
size_t
m
=
0
;
m
<
nMix
;
m
++
)
{
for
(
size_t
d
=
0
;
d
<
dim
;
d
++
)
{
means_f
(
m
,
d
)
=
kaldi
::
RandGauss
()
*
100.0
F
;
vars_f
(
m
,
d
)
=
e
xp
(
kaldi
::
RandGauss
())
*
1000.0
F
+
1.0
F
;
vars_f
(
m
,
d
)
=
E
xp
(
kaldi
::
RandGauss
())
*
1000.0
F
+
1.0
F
;
}
// std::cout << "Gauss " << m << ": Mean = " << means_f.Row(m) << '\n'
// << "Vars = " << vars_f.Row(m) << '\n';
...
...
@@ -104,7 +104,7 @@ void UnitTestEstimateMmieDiagGmm() {
Matrix
<
BaseFloat
>
means
(
1
,
dim
),
vars
(
1
,
dim
),
invvars
(
1
,
dim
);
for
(
size_t
d
=
0
;
d
<
dim
;
d
++
)
{
means
(
0
,
d
)
=
kaldi
::
RandGauss
()
*
100.0
F
;
vars
(
0
,
d
)
=
e
xp
(
kaldi
::
RandGauss
())
*
10.0
F
+
1e-5
F
;
vars
(
0
,
d
)
=
E
xp
(
kaldi
::
RandGauss
())
*
10.0
F
+
1e-5
F
;
}
weights
(
0
)
=
1.0
F
;
invvars
.
CopyFromMat
(
vars
);
...
...
src/gmm/ebw-diag-gmm.cc
View file @
36cad857
...
...
@@ -78,10 +78,10 @@ static bool EBWUpdateGaussian(
int32
dim
=
orig_mean
.
Dim
();
for
(
int32
i
=
0
;
i
<
dim
;
i
++
)
{
BaseFloat
mean_diff
=
(
*
mean
)(
i
)
-
orig_mean
(
i
);
old_auxf
+=
(
occ
+
D
)
*
-
0.5
*
(
l
og
(
orig_var
(
i
))
+
old_auxf
+=
(
occ
+
D
)
*
-
0.5
*
(
L
og
(
orig_var
(
i
))
+
((
*
var
)(
i
)
+
mean_diff
*
mean_diff
)
/
orig_var
(
i
));
new_auxf
+=
(
occ
+
D
)
*
-
0.5
*
(
l
og
((
*
var
)(
i
))
+
1.0
);
new_auxf
+=
(
occ
+
D
)
*
-
0.5
*
(
L
og
((
*
var
)(
i
))
+
1.0
);
}
*
auxf_impr
=
new_auxf
-
old_auxf
;
...
...
src/gmm/full-gmm-test.cc
View file @
36cad857
...
...
@@ -61,7 +61,7 @@ void init_rand_diag_gmm(DiagGmm *gmm) {
weights
(
m
)
=
kaldi
::
RandUniform
();
for
(
size_t
d
=
0
;
d
<
dim
;
d
++
)
{
means
(
m
,
d
)
=
kaldi
::
RandGauss
();
vars
(
m
,
d
)
=
e
xp
(
kaldi
::
RandGauss
())
+
1e-5
;
vars
(
m
,
d
)
=
E
xp
(
kaldi
::
RandGauss
())
+
1e-5
;
}
tot_weight
+=
weights
(
m
);
}
...
...
@@ -156,7 +156,7 @@ UnitTestFullGmm() {
+
VecSpVec
(
means
.
Row
(
m
),
invcovars
[
m
],
means
.
Row
(
m
))
+
VecSpVec
(
feat
,
invcovars
[
m
],
feat
))
+
VecSpVec
(
means
.
Row
(
m
),
invcovars
[
m
],
feat
);
loglikes
(
m
)
+=
l
og
(
weights
(
m
));
loglikes
(
m
)
+=
L
og
(
weights
(
m
));
}
loglike
=
loglikes
.
LogSumExp
();
...
...
src/gmm/full-gmm.cc
View file @
36cad857
...
...
@@ -101,7 +101,7 @@ int32 FullGmm::ComputeGconsts() {
for
(
int32
mix
=
0
;
mix
<
num_mix
;
mix
++
)
{
KALDI_ASSERT
(
weights_
(
mix
)
>=
0
);
// Cannot have negative weights.
BaseFloat
gc
=
l
og
(
weights_
(
mix
))
+
offset
;
// May be -inf if weights == 0
BaseFloat
gc
=
L
og
(
weights_
(
mix
))
+
offset
;
// May be -inf if weights == 0
SpMatrix
<
BaseFloat
>
covar
(
inv_covars_
[
mix
]);
covar
.
InvertDouble
();
BaseFloat
logdet
=
covar
.
LogPosDefDet
();
...
...
@@ -449,8 +449,8 @@ BaseFloat FullGmm::MergePreselect(int32 target_components,
removed
<
num_comp
-
target_components
&&
!
queue
.
empty
();
)
{
QueueElem
qelem
=
queue
.
top
();
queue
.
pop
();
BaseFloat
delta_log_like_old
=
qelem
.
first
,
idx1
=
qelem
.
second
.
first
,
idx2
=
qelem
.
second
.
second
;
BaseFloat
delta_log_like_old
=
qelem
.
first
;
int32
idx1
=
qelem
.
second
.
first
,
idx2
=
qelem
.
second
.
second
;
// the next 3 lines are to handle when components got merged
// and moved to different indices, but we still want to consider
// merging their descendants. [descendant = current index where
...
...
src/gmm/indirect-diff-diag-gmm.cc
View file @
36cad857
...
...
@@ -217,7 +217,7 @@ void DoRescalingUpdate(const AccumDiagGmm &old_ml_acc,
double
divergence
=
0.5
*
(((
new_model_mean
-
old_model_mean
)
*
(
new_model_mean
-
old_model_mean
)
+
new_model_var
-
old_model_var
)
/
old_model_var
+
l
og
(
old_model_var
/
new_model_var
));
L
og
(
old_model_var
/
new_model_var
));
if
(
divergence
<
0.0
)
KALDI_WARN
<<
"Negative divergence "
<<
divergence
;
*
tot_divergence
+=
divergence
*
new_ml_count
;
...
...
src/gmm/mle-am-diag-gmm-test.cc
View file @
36cad857
...
...
@@ -115,7 +115,7 @@ void UnitTestMleAmDiagGmm() {
for
(
int32
m
=
0
;
m
<
num_feat_comp
;
m
++
)
{
for
(
int32
d
=
0
;
d
<
dim
;
d
++
)
{
means
(
m
,
d
)
=
kaldi
::
RandGauss
();
vars
(
m
,
d
)
=
e
xp
(
kaldi
::
RandGauss
())
+
1e-2
;
vars
(
m
,
d
)
=
E
xp
(
kaldi
::
RandGauss
())
+
1e-2
;
}
}
// Now generate random features with those means and variances.
...
...
src/gmm/mle-diag-gmm-test.cc
View file @
36cad857
...
...
@@ -215,7 +215,7 @@ UnitTestEstimateDiagGmm() {
for
(
size_t
m
=
0
;
m
<
nMix
;
m
++
)
{
for
(
size_t
d
=
0
;
d
<
dim
;
d
++
)
{
means_f
(
m
,
d
)
=
kaldi
::
RandGauss
()
*
100.0
F
;
vars_f
(
m
,
d
)
=
e
xp
(
kaldi
::
RandGauss
())
*
1000.0
F
+
1.0
F
;
vars_f
(
m
,
d
)
=
E
xp
(
kaldi
::
RandGauss
())
*
1000.0
F
+
1.0
F
;
}
// std::cout << "Gauss " << m << ": Mean = " << means_f.Row(m) << '\n'
// << "Vars = " << vars_f.Row(m) << '\n';
...
...
@@ -259,7 +259,7 @@ UnitTestEstimateDiagGmm() {
Matrix
<
BaseFloat
>
means
(
1
,
dim
),
vars
(
1
,
dim
),
invvars
(
1
,
dim
);
for
(
size_t
d
=
0
;
d
<
dim
;
d
++
)
{
means
(
0
,
d
)
=
kaldi
::
RandGauss
()
*
100.0
F
;
vars
(
0
,
d
)
=
e
xp
(
kaldi
::
RandGauss
())
*
10.0
F
+
1e-5
F
;
vars
(
0
,
d
)
=
E
xp
(
kaldi
::
RandGauss
())
*
10.0
F
+
1e-5
F
;
}
weights
(
0
)
=
1.0
F
;
invvars
.
CopyFromMat
(
vars
);
...
...
src/gmm/mle-full-gmm-test.cc
View file @
36cad857
...
...
@@ -132,7 +132,7 @@ BaseFloat GetLogLikeTest(const FullGmm &gmm,
for
(
int32
i
=
0
;
i
<
gmm
.
NumGauss
();
i
++
)
{
BaseFloat
logdet
=
-
(
inv_covars
[
i
].
LogPosDefDet
());
BaseFloat
log_like
=
l
og
(
gmm
.
weights
()(
i
))
BaseFloat
log_like
=
L
og
(
gmm
.
weights
()(
i
))
-
0.5
*
(
gmm
.
Dim
()
*
M_LOG_2PI
+
logdet
);
Vector
<
BaseFloat
>
offset
(
feats
);
offset
.
AddVec
(
-
1.0
,
means
.
Row
(
i
));
...
...
src/gmm/model-test-common.cc
View file @
36cad857
...
...
@@ -93,10 +93,10 @@ void InitRandDiagGmm(int32 dim, int32 num_comp, DiagGmm *gmm) {
Matrix
<
BaseFloat
>
means
(
num_comp
,
dim
),
inv_vars
(
num_comp
,
dim
);
for
(
int32
m
=
0
;
m
<
num_comp
;
m
++
)
{
weights
(
m
)
=
e
xp
(
RandGauss
());
weights
(
m
)
=
E
xp
(
RandGauss
());
for
(
int32
d
=
0
;
d
<
dim
;
d
++
)
{
means
(
m
,
d
)
=
RandGauss
()
/
(
1
+
d
);
inv_vars
(
m
,
d
)
=
e
xp
(
RandGauss
()
/
(
1
+
d
))
+
1e-2
;
inv_vars
(
m
,
d
)
=
E
xp
(
RandGauss
()
/
(
1
+
d
))
+
1e-2
;
}
}
weights
.
Scale
(
1.0
/
weights
.
Sum
());
...
...
src/hmm/hmm-utils.cc
View file @
36cad857
...
...
@@ -125,7 +125,7 @@ fst::VectorFst<fst::StdArc> *GetHmmAsFst(
// no pdf, hence non-estimated probability.
// [would not happen with normal topology] . There is no transition-state
// involved in this case.
log_prob
=
l
og
(
entry
[
hmm_state
].
transitions
[
trans_idx
].
second
);
log_prob
=
L
og
(
entry
[
hmm_state
].
transitions
[
trans_idx
].
second
);
label
=
0
;
}
else
{
// normal probability.
int32
trans_state
=
...
...
@@ -225,7 +225,7 @@ GetHmmAsFstSimple(std::vector<int32> phone_window,
// [would not happen with normal topology] . There is no transition-state
// involved in this case.
KALDI_ASSERT
(
!
is_self_loop
);
log_prob
=
l
og
(
entry
[
hmm_state
].
transitions
[
trans_idx
].
second
);
log_prob
=
L
og
(
entry
[
hmm_state
].
transitions
[
trans_idx
].
second
);
label
=
0
;
}
else
{
// normal probability.
int32
trans_state
=
...
...
src/hmm/transition-model.cc
View file @
36cad857
...
...
@@ -110,7 +110,7 @@ void TransitionModel::InitializeProbs() {
"probability [should remove that entry in the topology]"
;
if
(
prob
>
1.0
)
KALDI_WARN
<<
"TransitionModel::InitializeProbs, prob greater than one."
;
log_probs_
(
trans_id
)
=
l
og
(
prob
);
log_probs_
(
trans_id
)
=
L
og
(
prob
);
}
ComputeDerivedOfProbs
();
}
...
...
@@ -260,13 +260,13 @@ void TransitionModel::ComputeDerivedOfProbs() {
if
(
tid
==
0
)
{
// no self-loop
non_self_loop_log_probs_
(
tstate
)
=
0.0
;
// log(1.0)
}
else
{
BaseFloat
self_loop_prob
=
e
xp
(
GetTransitionLogProb
(
tid
)),
BaseFloat
self_loop_prob
=
E
xp
(
GetTransitionLogProb
(
tid
)),
non_self_loop_prob
=
1.0
-
self_loop_prob
;
if
(
non_self_loop_prob
<=
0.0
)
{
KALDI_WARN
<<
"ComputeDerivedOfProbs(): non-self-loop prob is "
<<
non_self_loop_prob
;
non_self_loop_prob
=
1.0e-10
;
// just so we can continue...
}
non_self_loop_log_probs_
(
tstate
)
=
l
og
(
non_self_loop_prob
);
// will be negative.
non_self_loop_log_probs_
(
tstate
)
=
L
og
(
non_self_loop_prob
);
// will be negative.
}
}
}
...
...
@@ -318,7 +318,7 @@ void TransitionModel::Write(std::ostream &os, bool binary) const {
}
BaseFloat
TransitionModel
::
GetTransitionProb
(
int32
trans_id
)
const
{
return
e
xp
(
log_probs_
(
trans_id
));
return
E
xp
(
log_probs_
(
trans_id
));
}
BaseFloat
TransitionModel
::
GetTransitionLogProb
(
int32
trans_id
)
const
{
...
...
@@ -376,14 +376,14 @@ void TransitionModel::MleUpdate(const Vector<double> &stats,
// Compute objf change
for
(
int32
tidx
=
0
;
tidx
<
n
;
tidx
++
)
{
if
(
new_probs
(
tidx
)
==
cfg
.
floor
)
num_floored
++
;
double
objf_change
=
counts
(
tidx
)
*
(
l
og
(
new_probs
(
tidx
))
-
l
og
(
old_probs
(
tidx
)));
double
objf_change
=
counts
(
tidx
)
*
(
L
og
(
new_probs
(
tidx
))
-
L
og
(
old_probs
(
tidx
)));
objf_impr_sum
+=
objf_change
;
}
// Commit updated values.
for
(
int32
tidx
=
0
;
tidx
<
n
;
tidx
++
)
{
int32
tid
=
PairToTransitionId
(
tstate
,
tidx
);
log_probs_
(
tid
)
=
l
og
(
new_probs
(
tidx
));
log_probs_
(
tid
)
=
L
og
(
new_probs
(
tidx
));
if
(
log_probs_
(
tid
)
-
log_probs_
(
tid
)
!=
0.0
)
KALDI_ERR
<<
"Log probs is inf or NaN: error in update or bad stats?"
;
}
...
...
@@ -435,14 +435,14 @@ void TransitionModel::MapUpdate(const Vector<double> &stats,
(
cfg
.
tau
+
tstate_tot
);
// Compute objf change
for
(
int32
tidx
=
0
;
tidx
<
n
;
tidx
++
)
{
double
objf_change
=
counts
(
tidx
)
*
(
l
og
(
new_probs
(
tidx
))
-
l
og
(
old_probs
(
tidx
)));
double
objf_change
=
counts
(
tidx
)
*
(
L
og
(
new_probs
(
tidx
))
-
L
og
(
old_probs
(
tidx
)));
objf_impr_sum
+=
objf_change
;
}
// Commit updated values.
for
(
int32
tidx
=
0
;
tidx
<
n
;
tidx
++
)
{
int32
tid
=
PairToTransitionId
(
tstate
,
tidx
);
log_probs_
(
tid
)
=
l
og
(
new_probs
(
tidx
));
log_probs_
(
tid
)
=
L
og
(
new_probs
(
tidx
));
if
(
log_probs_
(
tid
)
-
log_probs_
(
tid
)
!=
0.0
)
KALDI_ERR
<<
"Log probs is inf or NaN: error in update or bad stats?"
;
}
...
...
@@ -524,8 +524,8 @@ void TransitionModel::MleUpdateShared(const Vector<double> &stats,
// Compute objf change
for
(
int32
tidx
=
0
;
tidx
<
n
;
tidx
++
)
{
if
(
new_probs
(
tidx
)
==
cfg
.
floor
)
num_floored
++
;
double
objf_change
=
counts
(
tidx
)
*
(
l
og
(
new_probs
(
tidx
))
-
l
og
(
old_probs
(
tidx
)));
double
objf_change
=
counts
(
tidx
)
*
(
L
og
(
new_probs
(
tidx
))
-
L
og
(
old_probs
(
tidx
)));
objf_impr_sum
+=
objf_change
;
}
// Commit updated values.
...
...
@@ -535,7 +535,7 @@ void TransitionModel::MleUpdateShared(const Vector<double> &stats,
int32
tstate
=
*
iter
;
for
(
int32
tidx
=
0
;
tidx
<
n
;
tidx
++
)
{
int32
tid
=
PairToTransitionId
(
tstate
,
tidx
);
log_probs_
(
tid
)
=
l
og
(
new_probs
(
tidx
));
log_probs_
(
tid
)
=
L
og
(
new_probs
(
tidx
));
if
(
log_probs_
(
tid
)
-
log_probs_
(
tid
)
!=
0.0
)
KALDI_ERR
<<
"Log probs is inf or NaN: error in update or bad stats?"
;
}
...
...
@@ -612,8 +612,8 @@ void TransitionModel::MapUpdateShared(const Vector<double> &stats,
(
pdf_tot
+
cfg
.
tau
);
// Compute objf change
for
(
int32
tidx
=
0
;
tidx
<
n
;
tidx
++
)
{
double
objf_change
=
counts
(
tidx
)
*
(
l
og
(
new_probs
(
tidx
))
-
l
og
(
old_probs
(
tidx
)));
double
objf_change
=
counts
(
tidx
)
*
(
L
og
(
new_probs
(
tidx
))
-
L
og
(
old_probs
(
tidx
)));
objf_impr_sum
+=
objf_change
;
}
// Commit updated values.
...
...
@@ -623,7 +623,7 @@ void TransitionModel::MapUpdateShared(const Vector<double> &stats,
int32
tstate
=
*
iter
;
for
(
int32
tidx
=
0
;
tidx
<
n
;
tidx
++
)
{
int32
tid
=
PairToTransitionId
(
tstate
,
tidx
);
log_probs_
(
tid
)
=
l
og
(
new_probs
(
tidx
));
log_probs_
(
tid
)
=
L
og
(
new_probs
(
tidx
));
if
(
log_probs_
(
tid
)
-
log_probs_
(
tid
)
!=
0.0
)
KALDI_ERR
<<
"Log probs is inf or NaN: error in update or bad stats?"
;
}
...
...
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment