Commit 36cad857 authored by Jan "yenda" Trmal's avatar Jan "yenda" Trmal
Browse files

Merge pull request #8 from jtrmal/windows-test-fixes

Windows test fixes
parents db63ae29 b0439a2e
......@@ -117,7 +117,7 @@ void UnitTestTrain() {
double objf_rand_w = classifier.GetObjfAndGrad(xs_with_prior, ys,
xw_rand, &grad, normalizer);
KALDI_ASSERT(objf_trained > objf_rand_w);
KALDI_ASSERT(objf_trained > std::log(1.0 / n_xs));
KALDI_ASSERT(objf_trained > Log(1.0 / n_xs));
}
}
......
......@@ -249,7 +249,7 @@ BaseFloat LogisticRegression::GetObjfAndGrad(
class_sum += row(cols[j]);
}
if (class_sum < 1.0e-20) class_sum = 1.0e-20;
raw_objf += std::log(class_sum);
raw_objf += Log(class_sum);
// Iterate over weights for each component. If there are no
// mixtures each row corresponds to a class.
for (int32 k = 0; k < weights_.NumRows(); k++) {
......
......@@ -310,7 +310,8 @@ void TwvMetricsOptions::Register(OptionsItf *po) {
// a mandatory argument, not optional
}
struct TwvMetricsStats {
class TwvMetricsStats {
public:
kws_internal::KwScoreStats global_keyword_stats;
kws_internal::KwStats keyword_stats;
kws_internal::PerKwSweepStats otwv_sweep_cache;
......
......@@ -326,18 +326,18 @@ BaseFloat LatticeForwardBackward(const Lattice &lat, Posterior *post,
// The following "if" is an optimization to avoid un-needed exp().
if (transition_id != 0 || acoustic_like_sum != NULL) {
double posterior = exp(alpha[s] + arc_beta - tot_forward_prob);
double posterior = Exp(alpha[s] + arc_beta - tot_forward_prob);
if (transition_id != 0) // Arc has a transition-id on it [not epsilon]
(*post)[state_times[s]].push_back(std::make_pair(transition_id,
posterior));
static_cast<kaldi::BaseFloat>(posterior)));
if (acoustic_like_sum != NULL)
*acoustic_like_sum -= posterior * arc.weight.Value2();
}
}
if (acoustic_like_sum != NULL && f != Weight::Zero()) {
double final_logprob = - ConvertToCost(f),
posterior = exp(alpha[s] + final_logprob - tot_forward_prob);
posterior = Exp(alpha[s] + final_logprob - tot_forward_prob);
*acoustic_like_sum -= posterior * f.Value2();
}
beta[s] = this_beta;
......@@ -894,12 +894,12 @@ BaseFloat LatticeForwardBackwardMpeVariants(
beta_smbr[s] += arc_scale * (beta_smbr[arc.nextstate] + frame_acc);
if (transition_id != 0) { // Arc has a transition-id on it [not epsilon]
double posterior = exp(alpha[s] + arc_beta - tot_forward_prob);
double posterior = Exp(alpha[s] + arc_beta - tot_forward_prob);
double acc_diff = alpha_smbr[s] + frame_acc + beta_smbr[arc.nextstate]
- tot_forward_score;
double posterior_smbr = posterior * acc_diff;
(*post)[state_times[s]].push_back(std::make_pair(transition_id,
posterior_smbr));
static_cast<BaseFloat>(posterior_smbr)));
}
}
}
......
......@@ -122,7 +122,7 @@ double MinimumBayesRisk::EditDistance(int32 N, int32 Q,
alpha_dash_arc(q) = std::min(a1, std::min(a2, a3));
}
// line 19:
alpha_dash(n, q) += exp(alpha(s_a) + p_a - alpha(n)) * alpha_dash_arc(q);
alpha_dash(n, q) += Exp(alpha(s_a) + p_a - alpha(n)) * alpha_dash_arc(q);
}
}
}
......@@ -182,7 +182,7 @@ void MinimumBayesRisk::AccStats() {
beta_dash_arc.SetZero(); // line 19.
for (int32 q = Q; q >= 1; q--) {
// line 21:
beta_dash_arc(q) += exp(alpha(s_a) + p_a - alpha(n)) * beta_dash(n, q);
beta_dash_arc(q) += Exp(alpha(s_a) + p_a - alpha(n)) * beta_dash(n, q);
switch (static_cast<int>(b_arc[q])) { // lines 22 and 23:
case 1:
beta_dash(s_a, q-1) += beta_dash_arc(q);
......@@ -210,7 +210,7 @@ void MinimumBayesRisk::AccStats() {
KALDI_ERR << "Invalid b_arc value"; // error in code.
}
}
beta_dash_arc(0) += exp(alpha(s_a) + p_a - alpha(n)) * beta_dash(n, 0);
beta_dash_arc(0) += Exp(alpha(s_a) + p_a - alpha(n)) * beta_dash(n, 0);
beta_dash(s_a, 0) += beta_dash_arc(0); // line 26.
}
}
......
......@@ -81,7 +81,7 @@ bool CompactLatticeNormalize(CompactLattice *clat, BaseFloat weight,
// If exp_weights = false, add to the log AM & LM scores.
if (!exp_weights)
total_backward_cost -= std::log(weight);
total_backward_cost -= Log(weight);
for (fst::StateIterator<CompactLattice> sit(*clat); !sit.Done(); sit.Next()) {
CompactLatticeWeight f = clat->Final(sit.Value());
......
......@@ -114,7 +114,7 @@ int main(int argc, char *argv[]) {
}
num_done++;
confidence = std::min(max_output, confidence); // disallow infinity.
sum_neg_exp += exp(-confidence); // diagnostic.
sum_neg_exp += Exp(-confidence); // diagnostic.
confidence_writer.Write(key, confidence);
}
} else {
......@@ -147,7 +147,7 @@ int main(int argc, char *argv[]) {
}
num_done++;
confidence = std::min(max_output, confidence); // disallow infinity.
sum_neg_exp += exp(-confidence); // diagnostic.
sum_neg_exp += Exp(-confidence); // diagnostic.
confidence_writer.Write(key, confidence);
}
}
......@@ -157,7 +157,7 @@ int main(int argc, char *argv[]) {
<< num_empty << " were equivalent to the empty lattice.";
if (num_done != 0)
KALDI_LOG << "Average confidence (averaged in negative-log space) is "
<< -log(sum_neg_exp / num_done);
<< -Log(sum_neg_exp / num_done);
if (num_same_sentence != 0) {
KALDI_WARN << num_same_sentence << " lattices had the same sentence on "
......
......@@ -22,6 +22,7 @@
#include "lm/const-arpa-lm.h"
#include "util/stl-utils.h"
#include "util/text-utils.h"
#include "base/kaldi-math.h"
namespace kaldi {
......@@ -396,8 +397,8 @@ void ConstArpaLmBuilder::Read(std::istream &is, bool binary) {
KALDI_ASSERT(ConvertStringToReal(col[0], &logprob));
KALDI_ASSERT(ConvertStringToReal(col[1 + cur_order], &backoff_logprob));
if (natural_base_) {
logprob *= log(10);
backoff_logprob *= log(10);
logprob *= Log(10.0f);
backoff_logprob *= Log(10.0f);
}
// If <ngram_order_> is larger than 1, then we do not create LmState for
......
......@@ -601,7 +601,7 @@ static void UnitTestSimpleForVec() { // testing some simple operaters on vector
V1.CopyFromVec(V);
V1.ApplyExp();
Real a = V.LogSumExp();
V2.Set(exp(V.LogSumExp()));
V2.Set(Exp(V.LogSumExp()));
V1.DivElements(V2);
V2.CopyFromVec(V);
......@@ -1144,7 +1144,7 @@ template<typename Real> static void UnitTestDeterminantSign() {
// add in a scaling factor too.
Real tmp = 1.0 + ((Rand() % 5) * 0.01);
Real logdet_factor = dimM * log(tmp);
Real logdet_factor = dimM * Log(tmp);
N.Scale(tmp);
S.Scale(tmp);
......@@ -1422,7 +1422,7 @@ template<typename Real> static void UnitTestEig() {
{ // Check that the eigenvalues match up with the determinant.
BaseFloat logdet_check = 0.0, logdet = M.LogDet();
for (MatrixIndexT i = 0; i < dimM ; i++)
logdet_check += 0.5 * log(real_eigs(i)*real_eigs(i) + imag_eigs(i)*imag_eigs(i));
logdet_check += 0.5 * Log(real_eigs(i)*real_eigs(i) + imag_eigs(i)*imag_eigs(i));
AssertEqual(logdet_check, logdet);
}
Matrix<Real> Pinv(P);
......@@ -2305,9 +2305,9 @@ template<typename Real> static void UnitTestTanh() {
for (int32 c = 0; c < dimN; c++) {
Real x = N(r, c);
if (x > 0.0) {
x = -1.0 + 2.0 / (1.0 + exp(-2.0 * x));
x = -1.0 + 2.0 / (1.0 + Exp(-2.0 * x));
} else {
x = 1.0 - 2.0 / (1.0 + exp(2.0 * x));
x = 1.0 - 2.0 / (1.0 + Exp(2.0 * x));
}
N(r, c) = x;
Real out_diff = P(r, c), in_diff = out_diff * (1.0 - x * x);
......@@ -2331,7 +2331,7 @@ template<typename Real> static void UnitTestSigmoid() {
for(int32 r = 0; r < dimM; r++) {
for (int32 c = 0; c < dimN; c++) {
Real x = N(r, c),
y = 1.0 / (1 + exp(-x));
y = 1.0 / (1 + Exp(-x));
N(r, c) = y;
Real out_diff = P(r, c), in_diff = out_diff * y * (1.0 - y);
Q(r, c) = in_diff;
......@@ -2356,7 +2356,7 @@ template<typename Real> static void UnitTestSoftHinge() {
Real x = M(r, c);
Real &y = N(r, c);
if (x > 10.0) y = x;
else y = log(1.0 + exp(x));
else y = Log1p(Exp(x));
}
}
O.SoftHinge(M);
......@@ -2395,7 +2395,7 @@ template<typename Real> static void UnitTestSimple() {
{
Vector<Real> V2(V);
for (MatrixIndexT i = 0; i < V2.Dim(); i++)
V2(i) = exp(V2(i));
V2(i) = Exp(V2(i));
V.ApplyExp();
AssertEqual(V, V2);
}
......@@ -2403,7 +2403,7 @@ template<typename Real> static void UnitTestSimple() {
Matrix<Real> N2(N), N3(N);
for (MatrixIndexT i = 0; i < N.NumRows(); i++)
for (MatrixIndexT j = 0; j < N.NumCols(); j++)
N2(i, j) = exp(N2(i, j));
N2(i, j) = Exp(N2(i, j));
N3.ApplyExp();
AssertEqual(N2, N3);
}
......@@ -3121,7 +3121,7 @@ template<typename Real> static void UnitTestLbfgs() {
Vector<Real> dlogf_dx(v); // derivative of log(f) w.r.t. x.
dlogf_dx.AddSpVec(-1.0, S, x, 1.0);
KALDI_VLOG(2) << "Gradient magnitude is " << dlogf_dx.Norm(2.0);
Real f = exp(c * logf);
Real f = Exp(c * logf);
Vector<Real> df_dx(dlogf_dx);
df_dx.Scale(f * c); // comes from derivative of the exponential function.
f *= sign;
......
......@@ -292,7 +292,6 @@ namespace nnet1 {
AssertEqual(mat_in_diff, mat_in_diff_ref);
delete c;
}
} // namespace nnet1
......
......@@ -166,7 +166,7 @@ class Rbm : public RbmBase {
for(int32 d = 0; d < p.Dim(); d++) {
if(p(d) < 0.0001) p(d) = 0.0001;
if(p(d) > 0.9999) p(d) = 0.9999;
logit_p(d) = log(p(d)) - log(1.0 - p(d));
logit_p(d) = Log(p(d)) - Log(1.0 - p(d));
}
vis_bias_ = logit_p;
KALDI_ASSERT(vis_bias_.Dim() == InputDim());
......
......@@ -199,7 +199,7 @@ void SoftmaxComponent::MixUp(int32 num_mixtures,
rand.SetRandn();
cur_vec.AddVec(perturb_stddev, rand);
new_vec.AddVec(-perturb_stddev, rand);
this_new_bias_term(max_index) += log(0.5);
this_new_bias_term(max_index) += Log(0.5);
this_new_bias_term(new_index) = this_new_bias_term(max_index);
}
old_offset += this_old_dim;
......
......@@ -1032,7 +1032,7 @@ void LogSoftmaxComponent::Propagate(const ChunkInfo &in_info,
out->ApplyLogSoftMaxPerRow(in);
// Just to be consistent with SoftmaxComponent::Propagate()
out->ApplyFloor(log(1.0e-20));
out->ApplyFloor(Log(1.0e-20));
}
void LogSoftmaxComponent::Backprop(const ChunkInfo &in_info,
......
......@@ -251,7 +251,7 @@ void NnetDiscriminativeUpdater::LatticeComputations() {
num_floored++;
}
int32 pdf_id = requested_indexes[index].second;
BaseFloat pseudo_loglike = log(post / priors(pdf_id)) * opts_.acoustic_scale;
BaseFloat pseudo_loglike = Log(post / priors(pdf_id)) * opts_.acoustic_scale;
KALDI_ASSERT(!KALDI_ISINF(pseudo_loglike) && !KALDI_ISNAN(pseudo_loglike));
answers[index] = pseudo_loglike;
}
......
......@@ -125,7 +125,7 @@ BaseFloat NnetComputer::ComputeLastLayerDeriv(const Posterior &pdf_post,
KALDI_ASSERT(label >= 0 && label < num_pdfs);
BaseFloat this_prob = last_layer_output(i, label);
KALDI_ASSERT(this_prob > 0.99e-20); // We floored to 1.0e-20 in SoftmaxLayer.
tot_objf += weight * log(this_prob);
tot_objf += weight * Log(this_prob);
tot_weight += weight;
(*deriv)(i, label) += weight / this_prob; // could be "=", assuming the
// labels are all distinct.
......
......@@ -115,7 +115,7 @@ void OnlinePreconditionerSimple::Init(const MatrixBase<double> &R0) {
BaseFloat OnlinePreconditionerSimple::Eta(int32 N) const {
KALDI_ASSERT(num_samples_history_ > 0.0);
return 1.0 - exp(-N / num_samples_history_);
return 1.0 - Exp(-N / num_samples_history_);
}
......
......@@ -389,7 +389,7 @@ void OnlinePreconditioner::PreconditionDirectionsInternal(
BaseFloat OnlinePreconditioner::Eta(int32 N) const {
KALDI_ASSERT(num_samples_history_ > 0.0);
return 1.0 - exp(-N / num_samples_history_);
return 1.0 - Exp(-N / num_samples_history_);
}
void OnlinePreconditioner::ComputeWt1(int32 N,
......
......@@ -41,7 +41,7 @@ BaseFloat KlDivergence(const Vector<BaseFloat> &p,
for (int32 i = 0; i < p.Dim(); i++) {
BaseFloat p_prob = p(i) / sum_p, q_prob = q(i) / sum_q;
ans += p_prob * log(p_prob / q_prob);
ans += p_prob * Log(p_prob / q_prob);
}
return ans;
}
......
......@@ -161,7 +161,7 @@ int main(int argc, char *argv[]) {
// Gets target geometric mean.
BaseFloat target_geometric_mean = 0.0;
if (average_learning_rate == 0.0) {
target_geometric_mean = exp(cur_nnet_learning_rates.SumLog()
target_geometric_mean = Exp(cur_nnet_learning_rates.SumLog()
/ static_cast<BaseFloat>(num_updatable));
} else {
target_geometric_mean = average_learning_rate;
......@@ -177,7 +177,7 @@ int main(int argc, char *argv[]) {
nnet_learning_rates(num_updatable - 1) *= last_layer_factor;
KALDI_ASSERT(first_layer_factor > 0.0);
nnet_learning_rates(0) *= first_layer_factor;
BaseFloat cur_geometric_mean = exp(nnet_learning_rates.SumLog()
BaseFloat cur_geometric_mean = Exp(nnet_learning_rates.SumLog()
/ static_cast<BaseFloat>(num_updatable));
nnet_learning_rates.Scale(target_geometric_mean / cur_geometric_mean);
KALDI_LOG << "New learning rates for current model per layer are "
......
......@@ -62,6 +62,7 @@ int main(int argc, char *argv[]) try {
<< std::endl
<< "### - Check with NVidia web that your 'display driver' and 'CUDA toolkit' is not too old."
<< std::endl;
static_cast<void>(e); //To avoid "unreferenced local variable"
return 1;
}
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment