Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
Abdelwahab HEBA
kaldi_2015
Commits
36cad857
Commit
36cad857
authored
Jul 22, 2015
by
Jan "yenda" Trmal
Browse files
Merge pull request #8 from jtrmal/windows-test-fixes
Windows test fixes
parents
db63ae29
b0439a2e
Changes
81
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
40 additions
and
38 deletions
+40
-38
src/ivector/logistic-regression-test.cc
src/ivector/logistic-regression-test.cc
+1
-1
src/ivector/logistic-regression.cc
src/ivector/logistic-regression.cc
+1
-1
src/kws/kws-scoring.cc
src/kws/kws-scoring.cc
+2
-1
src/lat/lattice-functions.cc
src/lat/lattice-functions.cc
+5
-5
src/lat/sausages.cc
src/lat/sausages.cc
+3
-3
src/latbin/lattice-combine.cc
src/latbin/lattice-combine.cc
+1
-1
src/latbin/lattice-confidence.cc
src/latbin/lattice-confidence.cc
+3
-3
src/lm/const-arpa-lm.cc
src/lm/const-arpa-lm.cc
+3
-2
src/matrix/matrix-lib-test.cc
src/matrix/matrix-lib-test.cc
+10
-10
src/nnet/nnet-component-test.cc
src/nnet/nnet-component-test.cc
+0
-1
src/nnet/nnet-rbm.h
src/nnet/nnet-rbm.h
+1
-1
src/nnet2/mixup-nnet.cc
src/nnet2/mixup-nnet.cc
+1
-1
src/nnet2/nnet-component.cc
src/nnet2/nnet-component.cc
+1
-1
src/nnet2/nnet-compute-discriminative.cc
src/nnet2/nnet-compute-discriminative.cc
+1
-1
src/nnet2/nnet-compute.cc
src/nnet2/nnet-compute.cc
+1
-1
src/nnet2/nnet-precondition-online-test.cc
src/nnet2/nnet-precondition-online-test.cc
+1
-1
src/nnet2/nnet-precondition-online.cc
src/nnet2/nnet-precondition-online.cc
+1
-1
src/nnet2bin/nnet-adjust-priors.cc
src/nnet2bin/nnet-adjust-priors.cc
+1
-1
src/nnet2bin/nnet-modify-learning-rates.cc
src/nnet2bin/nnet-modify-learning-rates.cc
+2
-2
src/nnetbin/cuda-gpu-available.cc
src/nnetbin/cuda-gpu-available.cc
+1
-0
No files found.
src/ivector/logistic-regression-test.cc
View file @
36cad857
...
...
@@ -117,7 +117,7 @@ void UnitTestTrain() {
double
objf_rand_w
=
classifier
.
GetObjfAndGrad
(
xs_with_prior
,
ys
,
xw_rand
,
&
grad
,
normalizer
);
KALDI_ASSERT
(
objf_trained
>
objf_rand_w
);
KALDI_ASSERT
(
objf_trained
>
std
::
l
og
(
1.0
/
n_xs
));
KALDI_ASSERT
(
objf_trained
>
L
og
(
1.0
/
n_xs
));
}
}
...
...
src/ivector/logistic-regression.cc
View file @
36cad857
...
...
@@ -249,7 +249,7 @@ BaseFloat LogisticRegression::GetObjfAndGrad(
class_sum
+=
row
(
cols
[
j
]);
}
if
(
class_sum
<
1.0e-20
)
class_sum
=
1.0e-20
;
raw_objf
+=
std
::
l
og
(
class_sum
);
raw_objf
+=
L
og
(
class_sum
);
// Iterate over weights for each component. If there are no
// mixtures each row corresponds to a class.
for
(
int32
k
=
0
;
k
<
weights_
.
NumRows
();
k
++
)
{
...
...
src/kws/kws-scoring.cc
View file @
36cad857
...
...
@@ -310,7 +310,8 @@ void TwvMetricsOptions::Register(OptionsItf *po) {
// a mandatory argument, not optional
}
struct
TwvMetricsStats
{
class
TwvMetricsStats
{
public:
kws_internal
::
KwScoreStats
global_keyword_stats
;
kws_internal
::
KwStats
keyword_stats
;
kws_internal
::
PerKwSweepStats
otwv_sweep_cache
;
...
...
src/lat/lattice-functions.cc
View file @
36cad857
...
...
@@ -326,18 +326,18 @@ BaseFloat LatticeForwardBackward(const Lattice &lat, Posterior *post,
// The following "if" is an optimization to avoid un-needed exp().
if
(
transition_id
!=
0
||
acoustic_like_sum
!=
NULL
)
{
double
posterior
=
e
xp
(
alpha
[
s
]
+
arc_beta
-
tot_forward_prob
);
double
posterior
=
E
xp
(
alpha
[
s
]
+
arc_beta
-
tot_forward_prob
);
if
(
transition_id
!=
0
)
// Arc has a transition-id on it [not epsilon]
(
*
post
)[
state_times
[
s
]].
push_back
(
std
::
make_pair
(
transition_id
,
posterior
));
static_cast
<
kaldi
::
BaseFloat
>
(
posterior
))
)
;
if
(
acoustic_like_sum
!=
NULL
)
*
acoustic_like_sum
-=
posterior
*
arc
.
weight
.
Value2
();
}
}
if
(
acoustic_like_sum
!=
NULL
&&
f
!=
Weight
::
Zero
())
{
double
final_logprob
=
-
ConvertToCost
(
f
),
posterior
=
e
xp
(
alpha
[
s
]
+
final_logprob
-
tot_forward_prob
);
posterior
=
E
xp
(
alpha
[
s
]
+
final_logprob
-
tot_forward_prob
);
*
acoustic_like_sum
-=
posterior
*
f
.
Value2
();
}
beta
[
s
]
=
this_beta
;
...
...
@@ -894,12 +894,12 @@ BaseFloat LatticeForwardBackwardMpeVariants(
beta_smbr
[
s
]
+=
arc_scale
*
(
beta_smbr
[
arc
.
nextstate
]
+
frame_acc
);
if
(
transition_id
!=
0
)
{
// Arc has a transition-id on it [not epsilon]
double
posterior
=
e
xp
(
alpha
[
s
]
+
arc_beta
-
tot_forward_prob
);
double
posterior
=
E
xp
(
alpha
[
s
]
+
arc_beta
-
tot_forward_prob
);
double
acc_diff
=
alpha_smbr
[
s
]
+
frame_acc
+
beta_smbr
[
arc
.
nextstate
]
-
tot_forward_score
;
double
posterior_smbr
=
posterior
*
acc_diff
;
(
*
post
)[
state_times
[
s
]].
push_back
(
std
::
make_pair
(
transition_id
,
posterior_smbr
));
static_cast
<
BaseFloat
>
(
posterior_smbr
))
)
;
}
}
}
...
...
src/lat/sausages.cc
View file @
36cad857
...
...
@@ -122,7 +122,7 @@ double MinimumBayesRisk::EditDistance(int32 N, int32 Q,
alpha_dash_arc
(
q
)
=
std
::
min
(
a1
,
std
::
min
(
a2
,
a3
));
}
// line 19:
alpha_dash
(
n
,
q
)
+=
e
xp
(
alpha
(
s_a
)
+
p_a
-
alpha
(
n
))
*
alpha_dash_arc
(
q
);
alpha_dash
(
n
,
q
)
+=
E
xp
(
alpha
(
s_a
)
+
p_a
-
alpha
(
n
))
*
alpha_dash_arc
(
q
);
}
}
}
...
...
@@ -182,7 +182,7 @@ void MinimumBayesRisk::AccStats() {
beta_dash_arc
.
SetZero
();
// line 19.
for
(
int32
q
=
Q
;
q
>=
1
;
q
--
)
{
// line 21:
beta_dash_arc
(
q
)
+=
e
xp
(
alpha
(
s_a
)
+
p_a
-
alpha
(
n
))
*
beta_dash
(
n
,
q
);
beta_dash_arc
(
q
)
+=
E
xp
(
alpha
(
s_a
)
+
p_a
-
alpha
(
n
))
*
beta_dash
(
n
,
q
);
switch
(
static_cast
<
int
>
(
b_arc
[
q
]))
{
// lines 22 and 23:
case
1
:
beta_dash
(
s_a
,
q
-
1
)
+=
beta_dash_arc
(
q
);
...
...
@@ -210,7 +210,7 @@ void MinimumBayesRisk::AccStats() {
KALDI_ERR
<<
"Invalid b_arc value"
;
// error in code.
}
}
beta_dash_arc
(
0
)
+=
e
xp
(
alpha
(
s_a
)
+
p_a
-
alpha
(
n
))
*
beta_dash
(
n
,
0
);
beta_dash_arc
(
0
)
+=
E
xp
(
alpha
(
s_a
)
+
p_a
-
alpha
(
n
))
*
beta_dash
(
n
,
0
);
beta_dash
(
s_a
,
0
)
+=
beta_dash_arc
(
0
);
// line 26.
}
}
...
...
src/latbin/lattice-combine.cc
View file @
36cad857
...
...
@@ -81,7 +81,7 @@ bool CompactLatticeNormalize(CompactLattice *clat, BaseFloat weight,
// If exp_weights = false, add to the log AM & LM scores.
if
(
!
exp_weights
)
total_backward_cost
-=
std
::
l
og
(
weight
);
total_backward_cost
-=
L
og
(
weight
);
for
(
fst
::
StateIterator
<
CompactLattice
>
sit
(
*
clat
);
!
sit
.
Done
();
sit
.
Next
())
{
CompactLatticeWeight
f
=
clat
->
Final
(
sit
.
Value
());
...
...
src/latbin/lattice-confidence.cc
View file @
36cad857
...
...
@@ -114,7 +114,7 @@ int main(int argc, char *argv[]) {
}
num_done
++
;
confidence
=
std
::
min
(
max_output
,
confidence
);
// disallow infinity.
sum_neg_exp
+=
e
xp
(
-
confidence
);
// diagnostic.
sum_neg_exp
+=
E
xp
(
-
confidence
);
// diagnostic.
confidence_writer
.
Write
(
key
,
confidence
);
}
}
else
{
...
...
@@ -147,7 +147,7 @@ int main(int argc, char *argv[]) {
}
num_done
++
;
confidence
=
std
::
min
(
max_output
,
confidence
);
// disallow infinity.
sum_neg_exp
+=
e
xp
(
-
confidence
);
// diagnostic.
sum_neg_exp
+=
E
xp
(
-
confidence
);
// diagnostic.
confidence_writer
.
Write
(
key
,
confidence
);
}
}
...
...
@@ -157,7 +157,7 @@ int main(int argc, char *argv[]) {
<<
num_empty
<<
" were equivalent to the empty lattice."
;
if
(
num_done
!=
0
)
KALDI_LOG
<<
"Average confidence (averaged in negative-log space) is "
<<
-
l
og
(
sum_neg_exp
/
num_done
);
<<
-
L
og
(
sum_neg_exp
/
num_done
);
if
(
num_same_sentence
!=
0
)
{
KALDI_WARN
<<
num_same_sentence
<<
" lattices had the same sentence on "
...
...
src/lm/const-arpa-lm.cc
View file @
36cad857
...
...
@@ -22,6 +22,7 @@
#include "lm/const-arpa-lm.h"
#include "util/stl-utils.h"
#include "util/text-utils.h"
#include "base/kaldi-math.h"
namespace
kaldi
{
...
...
@@ -396,8 +397,8 @@ void ConstArpaLmBuilder::Read(std::istream &is, bool binary) {
KALDI_ASSERT
(
ConvertStringToReal
(
col
[
0
],
&
logprob
));
KALDI_ASSERT
(
ConvertStringToReal
(
col
[
1
+
cur_order
],
&
backoff_logprob
));
if
(
natural_base_
)
{
logprob
*=
l
og
(
10
);
backoff_logprob
*=
l
og
(
10
);
logprob
*=
L
og
(
10
.0
f
);
backoff_logprob
*=
L
og
(
10
.0
f
);
}
// If <ngram_order_> is larger than 1, then we do not create LmState for
...
...
src/matrix/matrix-lib-test.cc
View file @
36cad857
...
...
@@ -601,7 +601,7 @@ static void UnitTestSimpleForVec() { // testing some simple operaters on vector
V1
.
CopyFromVec
(
V
);
V1
.
ApplyExp
();
Real
a
=
V
.
LogSumExp
();
V2
.
Set
(
e
xp
(
V
.
LogSumExp
()));
V2
.
Set
(
E
xp
(
V
.
LogSumExp
()));
V1
.
DivElements
(
V2
);
V2
.
CopyFromVec
(
V
);
...
...
@@ -1144,7 +1144,7 @@ template<typename Real> static void UnitTestDeterminantSign() {
// add in a scaling factor too.
Real
tmp
=
1.0
+
((
Rand
()
%
5
)
*
0.01
);
Real
logdet_factor
=
dimM
*
l
og
(
tmp
);
Real
logdet_factor
=
dimM
*
L
og
(
tmp
);
N
.
Scale
(
tmp
);
S
.
Scale
(
tmp
);
...
...
@@ -1422,7 +1422,7 @@ template<typename Real> static void UnitTestEig() {
{
// Check that the eigenvalues match up with the determinant.
BaseFloat
logdet_check
=
0.0
,
logdet
=
M
.
LogDet
();
for
(
MatrixIndexT
i
=
0
;
i
<
dimM
;
i
++
)
logdet_check
+=
0.5
*
l
og
(
real_eigs
(
i
)
*
real_eigs
(
i
)
+
imag_eigs
(
i
)
*
imag_eigs
(
i
));
logdet_check
+=
0.5
*
L
og
(
real_eigs
(
i
)
*
real_eigs
(
i
)
+
imag_eigs
(
i
)
*
imag_eigs
(
i
));
AssertEqual
(
logdet_check
,
logdet
);
}
Matrix
<
Real
>
Pinv
(
P
);
...
...
@@ -2305,9 +2305,9 @@ template<typename Real> static void UnitTestTanh() {
for
(
int32
c
=
0
;
c
<
dimN
;
c
++
)
{
Real
x
=
N
(
r
,
c
);
if
(
x
>
0.0
)
{
x
=
-
1.0
+
2.0
/
(
1.0
+
e
xp
(
-
2.0
*
x
));
x
=
-
1.0
+
2.0
/
(
1.0
+
E
xp
(
-
2.0
*
x
));
}
else
{
x
=
1.0
-
2.0
/
(
1.0
+
e
xp
(
2.0
*
x
));
x
=
1.0
-
2.0
/
(
1.0
+
E
xp
(
2.0
*
x
));
}
N
(
r
,
c
)
=
x
;
Real
out_diff
=
P
(
r
,
c
),
in_diff
=
out_diff
*
(
1.0
-
x
*
x
);
...
...
@@ -2331,7 +2331,7 @@ template<typename Real> static void UnitTestSigmoid() {
for
(
int32
r
=
0
;
r
<
dimM
;
r
++
)
{
for
(
int32
c
=
0
;
c
<
dimN
;
c
++
)
{
Real
x
=
N
(
r
,
c
),
y
=
1.0
/
(
1
+
e
xp
(
-
x
));
y
=
1.0
/
(
1
+
E
xp
(
-
x
));
N
(
r
,
c
)
=
y
;
Real
out_diff
=
P
(
r
,
c
),
in_diff
=
out_diff
*
y
*
(
1.0
-
y
);
Q
(
r
,
c
)
=
in_diff
;
...
...
@@ -2356,7 +2356,7 @@ template<typename Real> static void UnitTestSoftHinge() {
Real
x
=
M
(
r
,
c
);
Real
&
y
=
N
(
r
,
c
);
if
(
x
>
10.0
)
y
=
x
;
else
y
=
l
og
(
1.0
+
e
xp
(
x
));
else
y
=
L
og
1p
(
E
xp
(
x
));
}
}
O
.
SoftHinge
(
M
);
...
...
@@ -2395,7 +2395,7 @@ template<typename Real> static void UnitTestSimple() {
{
Vector
<
Real
>
V2
(
V
);
for
(
MatrixIndexT
i
=
0
;
i
<
V2
.
Dim
();
i
++
)
V2
(
i
)
=
e
xp
(
V2
(
i
));
V2
(
i
)
=
E
xp
(
V2
(
i
));
V
.
ApplyExp
();
AssertEqual
(
V
,
V2
);
}
...
...
@@ -2403,7 +2403,7 @@ template<typename Real> static void UnitTestSimple() {
Matrix
<
Real
>
N2
(
N
),
N3
(
N
);
for
(
MatrixIndexT
i
=
0
;
i
<
N
.
NumRows
();
i
++
)
for
(
MatrixIndexT
j
=
0
;
j
<
N
.
NumCols
();
j
++
)
N2
(
i
,
j
)
=
e
xp
(
N2
(
i
,
j
));
N2
(
i
,
j
)
=
E
xp
(
N2
(
i
,
j
));
N3
.
ApplyExp
();
AssertEqual
(
N2
,
N3
);
}
...
...
@@ -3121,7 +3121,7 @@ template<typename Real> static void UnitTestLbfgs() {
Vector
<
Real
>
dlogf_dx
(
v
);
// derivative of log(f) w.r.t. x.
dlogf_dx
.
AddSpVec
(
-
1.0
,
S
,
x
,
1.0
);
KALDI_VLOG
(
2
)
<<
"Gradient magnitude is "
<<
dlogf_dx
.
Norm
(
2.0
);
Real
f
=
e
xp
(
c
*
logf
);
Real
f
=
E
xp
(
c
*
logf
);
Vector
<
Real
>
df_dx
(
dlogf_dx
);
df_dx
.
Scale
(
f
*
c
);
// comes from derivative of the exponential function.
f
*=
sign
;
...
...
src/nnet/nnet-component-test.cc
View file @
36cad857
...
...
@@ -292,7 +292,6 @@ namespace nnet1 {
AssertEqual
(
mat_in_diff
,
mat_in_diff_ref
);
delete
c
;
}
}
// namespace nnet1
...
...
src/nnet/nnet-rbm.h
View file @
36cad857
...
...
@@ -166,7 +166,7 @@ class Rbm : public RbmBase {
for
(
int32
d
=
0
;
d
<
p
.
Dim
();
d
++
)
{
if
(
p
(
d
)
<
0.0001
)
p
(
d
)
=
0.0001
;
if
(
p
(
d
)
>
0.9999
)
p
(
d
)
=
0.9999
;
logit_p
(
d
)
=
l
og
(
p
(
d
))
-
l
og
(
1.0
-
p
(
d
));
logit_p
(
d
)
=
L
og
(
p
(
d
))
-
L
og
(
1.0
-
p
(
d
));
}
vis_bias_
=
logit_p
;
KALDI_ASSERT
(
vis_bias_
.
Dim
()
==
InputDim
());
...
...
src/nnet2/mixup-nnet.cc
View file @
36cad857
...
...
@@ -199,7 +199,7 @@ void SoftmaxComponent::MixUp(int32 num_mixtures,
rand
.
SetRandn
();
cur_vec
.
AddVec
(
perturb_stddev
,
rand
);
new_vec
.
AddVec
(
-
perturb_stddev
,
rand
);
this_new_bias_term
(
max_index
)
+=
l
og
(
0.5
);
this_new_bias_term
(
max_index
)
+=
L
og
(
0.5
);
this_new_bias_term
(
new_index
)
=
this_new_bias_term
(
max_index
);
}
old_offset
+=
this_old_dim
;
...
...
src/nnet2/nnet-component.cc
View file @
36cad857
...
...
@@ -1032,7 +1032,7 @@ void LogSoftmaxComponent::Propagate(const ChunkInfo &in_info,
out
->
ApplyLogSoftMaxPerRow
(
in
);
// Just to be consistent with SoftmaxComponent::Propagate()
out
->
ApplyFloor
(
l
og
(
1.0e-20
));
out
->
ApplyFloor
(
L
og
(
1.0e-20
));
}
void
LogSoftmaxComponent
::
Backprop
(
const
ChunkInfo
&
in_info
,
...
...
src/nnet2/nnet-compute-discriminative.cc
View file @
36cad857
...
...
@@ -251,7 +251,7 @@ void NnetDiscriminativeUpdater::LatticeComputations() {
num_floored
++
;
}
int32
pdf_id
=
requested_indexes
[
index
].
second
;
BaseFloat
pseudo_loglike
=
l
og
(
post
/
priors
(
pdf_id
))
*
opts_
.
acoustic_scale
;
BaseFloat
pseudo_loglike
=
L
og
(
post
/
priors
(
pdf_id
))
*
opts_
.
acoustic_scale
;
KALDI_ASSERT
(
!
KALDI_ISINF
(
pseudo_loglike
)
&&
!
KALDI_ISNAN
(
pseudo_loglike
));
answers
[
index
]
=
pseudo_loglike
;
}
...
...
src/nnet2/nnet-compute.cc
View file @
36cad857
...
...
@@ -125,7 +125,7 @@ BaseFloat NnetComputer::ComputeLastLayerDeriv(const Posterior &pdf_post,
KALDI_ASSERT
(
label
>=
0
&&
label
<
num_pdfs
);
BaseFloat
this_prob
=
last_layer_output
(
i
,
label
);
KALDI_ASSERT
(
this_prob
>
0.99e-20
);
// We floored to 1.0e-20 in SoftmaxLayer.
tot_objf
+=
weight
*
l
og
(
this_prob
);
tot_objf
+=
weight
*
L
og
(
this_prob
);
tot_weight
+=
weight
;
(
*
deriv
)(
i
,
label
)
+=
weight
/
this_prob
;
// could be "=", assuming the
// labels are all distinct.
...
...
src/nnet2/nnet-precondition-online-test.cc
View file @
36cad857
...
...
@@ -115,7 +115,7 @@ void OnlinePreconditionerSimple::Init(const MatrixBase<double> &R0) {
BaseFloat
OnlinePreconditionerSimple
::
Eta
(
int32
N
)
const
{
KALDI_ASSERT
(
num_samples_history_
>
0.0
);
return
1.0
-
e
xp
(
-
N
/
num_samples_history_
);
return
1.0
-
E
xp
(
-
N
/
num_samples_history_
);
}
...
...
src/nnet2/nnet-precondition-online.cc
View file @
36cad857
...
...
@@ -389,7 +389,7 @@ void OnlinePreconditioner::PreconditionDirectionsInternal(
BaseFloat
OnlinePreconditioner
::
Eta
(
int32
N
)
const
{
KALDI_ASSERT
(
num_samples_history_
>
0.0
);
return
1.0
-
e
xp
(
-
N
/
num_samples_history_
);
return
1.0
-
E
xp
(
-
N
/
num_samples_history_
);
}
void
OnlinePreconditioner
::
ComputeWt1
(
int32
N
,
...
...
src/nnet2bin/nnet-adjust-priors.cc
View file @
36cad857
...
...
@@ -41,7 +41,7 @@ BaseFloat KlDivergence(const Vector<BaseFloat> &p,
for
(
int32
i
=
0
;
i
<
p
.
Dim
();
i
++
)
{
BaseFloat
p_prob
=
p
(
i
)
/
sum_p
,
q_prob
=
q
(
i
)
/
sum_q
;
ans
+=
p_prob
*
l
og
(
p_prob
/
q_prob
);
ans
+=
p_prob
*
L
og
(
p_prob
/
q_prob
);
}
return
ans
;
}
...
...
src/nnet2bin/nnet-modify-learning-rates.cc
View file @
36cad857
...
...
@@ -161,7 +161,7 @@ int main(int argc, char *argv[]) {
// Gets target geometric mean.
BaseFloat
target_geometric_mean
=
0.0
;
if
(
average_learning_rate
==
0.0
)
{
target_geometric_mean
=
e
xp
(
cur_nnet_learning_rates
.
SumLog
()
target_geometric_mean
=
E
xp
(
cur_nnet_learning_rates
.
SumLog
()
/
static_cast
<
BaseFloat
>
(
num_updatable
));
}
else
{
target_geometric_mean
=
average_learning_rate
;
...
...
@@ -177,7 +177,7 @@ int main(int argc, char *argv[]) {
nnet_learning_rates
(
num_updatable
-
1
)
*=
last_layer_factor
;
KALDI_ASSERT
(
first_layer_factor
>
0.0
);
nnet_learning_rates
(
0
)
*=
first_layer_factor
;
BaseFloat
cur_geometric_mean
=
e
xp
(
nnet_learning_rates
.
SumLog
()
BaseFloat
cur_geometric_mean
=
E
xp
(
nnet_learning_rates
.
SumLog
()
/
static_cast
<
BaseFloat
>
(
num_updatable
));
nnet_learning_rates
.
Scale
(
target_geometric_mean
/
cur_geometric_mean
);
KALDI_LOG
<<
"New learning rates for current model per layer are "
...
...
src/nnetbin/cuda-gpu-available.cc
View file @
36cad857
...
...
@@ -62,6 +62,7 @@ int main(int argc, char *argv[]) try {
<<
std
::
endl
<<
"### - Check with NVidia web that your 'display driver' and 'CUDA toolkit' is not too old."
<<
std
::
endl
;
static_cast
<
void
>
(
e
);
//To avoid "unreferenced local variable"
return
1
;
}
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment