Commit e1f76237 authored by Jan Trmal's avatar Jan Trmal
Browse files

windows fixes -- resolving issues with incrrectly behaving expf/exp(float)...

windows fixes -- resolving issues with incrrectly behaving expf/exp(float) under MSVC2013, plus some other improvements
parent db63ae29
......@@ -139,8 +139,8 @@ class PitchInterpolator {
BaseFloat constant_prob = (1.0 - p_voicing_[t]) * opts_.interpolator_factor,
specified_prob = p_voicing_[t] + constant_prob;
// specified_prob adds in the extra probability mass at the observed pitch value.
BaseFloat log_constant_prob = log(constant_prob),
log_ratio = log(specified_prob / constant_prob);
BaseFloat log_constant_prob = Log(constant_prob),
log_ratio = Log(specified_prob / constant_prob);
log_alpha_.Add(log_constant_prob); // add log_constant_prob to all pitches at this time.
log_alpha_(pitch_[t]) += log_ratio; // corrects this to be like adding
......
......@@ -32,7 +32,7 @@ void ProcessPovFeatures(Matrix<BaseFloat> *mat) {
for (int32 i = 0; i < num_frames; i++) {
BaseFloat p = (*mat)(i, 0);
KALDI_ASSERT(p >= 0.0 && p <= 1.0);
(*mat)(i, 0) = log((p + 0.0001) / (1.0001 - p));
(*mat)(i, 0) = Log((p + 0.0001) / (1.0001 - p));
}
}
......@@ -40,7 +40,7 @@ void TakeLogOfPitch(Matrix<BaseFloat> *mat) {
int32 num_frames = mat->NumRows();
for (int32 i = 0; i < num_frames; i++) {
KALDI_ASSERT((*mat)(i, 1) > 0.0);
(*mat)(i, 1) = log((*mat)(i, 1));
(*mat)(i, 1) = Log((*mat)(i, 1));
}
}
......
......@@ -149,13 +149,15 @@ template<class IntType> class LatticeStringRepository {
void ConvertToVector(const Entry *entry, vector<IntType> *out) const {
size_t length = Size(entry);
out->resize(length);
typename vector<IntType>::iterator iter = out->end() - 1;
if (entry != NULL) {
typename vector<IntType>::reverse_iterator iter = out->rbegin();
while (entry != NULL) {
*iter = entry->i;
entry = entry->parent;
--iter;
++iter;
}
}
}
const Entry *ConvertFromVector(const vector<IntType> &vec) {
const Entry *e = NULL;
......
......@@ -103,6 +103,9 @@ template<class Arc> void TestSafeDeterminizeWrapper() { // also tests SafeDete
int n_syms = 2 + kaldi::Rand() % 5, n_states = 3 + kaldi::Rand() % 10, n_arcs = 5 + kaldi::Rand() % 30, n_final = 1 + kaldi::Rand()%3; // Up to 2 unique symbols.
cout << "Testing pre-determinize with "<<n_syms<<" symbols, "<<n_states<<" states and "<<n_arcs<<" arcs and "<<n_final<<" final states.\n";
SymbolTable *sptr = new SymbolTable("my-symbol-table");
sptr->AddSymbol("<eps>");
delete sptr;
sptr = new SymbolTable("my-symbol-table");
vector<Label> all_syms; // including epsilon.
// Put symbols in the symbol table from 1..n_syms-1.
......
......@@ -20,6 +20,7 @@
#include "fstext/push-special.h"
#include "base/kaldi-error.h"
#include "base/kaldi-math.h"
namespace fst {
......@@ -101,10 +102,10 @@ class PushSpecialClass {
!aiter.Done(); aiter.Next()) {
const Arc &arc = aiter.Value();
StateId t = arc.nextstate;
double weight = exp(-arc.weight.Value());
double weight = kaldi::Exp(-arc.weight.Value());
pred_[t].push_back(std::make_pair(s, weight));
}
double final = exp(-fst_->Final(s).Value());
double final = kaldi::Exp(-fst_->Final(s).Value());
if (final != 0.0)
pred_[initial_state_].push_back(std::make_pair(s, final));
}
......@@ -121,9 +122,9 @@ class PushSpecialClass {
!aiter.Done(); aiter.Next()) {
const Arc &arc = aiter.Value();
StateId t = arc.nextstate;
sum += exp(-arc.weight.Value()) * occ_[t] / occ_[s];
sum += kaldi::Exp(-arc.weight.Value()) * occ_[t] / occ_[s];
}
sum += exp(-(fst_->Final(s).Value())) * occ_[initial_state_] / occ_[s];
sum += kaldi::Exp(-(fst_->Final(s).Value())) * occ_[initial_state_] / occ_[s];
if (s == 0) {
min_sum = sum;
max_sum = sum;
......@@ -133,7 +134,7 @@ class PushSpecialClass {
}
}
KALDI_VLOG(4) << "min,max is " << min_sum << " " << max_sum;
return log(max_sum / min_sum); // In FST world we'll actually
return kaldi::Log(max_sum / min_sum); // In FST world we'll actually
// dealing with logs, so the log of the ratio is more suitable
// to compare with delta (makes testing the algorithm easier).
}
......@@ -187,7 +188,7 @@ class PushSpecialClass {
// First get the potentials as negative-logs, like the values
// in the FST.
for (StateId s = 0; s < num_states_; s++) {
occ_[s] = -log(occ_[s]);
occ_[s] = -kaldi::Log(occ_[s]);
if (KALDI_ISNAN(occ_[s]) || KALDI_ISINF(occ_[s]))
KALDI_WARN << "NaN or inf found: " << occ_[s];
}
......
......@@ -21,6 +21,7 @@
#define KALDI_FSTEXT_RESCALE_INL_H_
#include <cstring>
#include "base/kaldi-common.h"
#include "base/kaldi-math.h"
#include "util/stl-utils.h"
#include "fstext/fstext-utils.h"
......@@ -119,7 +120,7 @@ inline LogWeight RescaleToStochastic(MutableFst<LogArc> *fst,
return Weight::One(); // can't rescale empty FST.
// total weight).
Weight max = Weight(-log(2.0));
Weight max = Weight(-kaldi::Log(2.0));
// upper_bound and lower_bound are in terms of weight.Value(),
// in terms of weight they would have the reversed names.
......@@ -132,6 +133,11 @@ inline LogWeight RescaleToStochastic(MutableFst<LogArc> *fst,
Weight cur_rescale = Weight::One();
Weight cur_tot;
while (1) {
{
FstPrinter<LogArc> fstprinter(*fst, NULL, NULL, NULL, false, true);
fstprinter.Print(&std::cout, "standard output");
}
cur_tot = ComputeTotalWeight(*fst, max, delta);
std::cerr << "Current rescaling factor is " << cur_rescale <<", total is: " << cur_tot << '\n';
if (cur_tot.Value() < Weight::One().Value()) { // read as: cur_tot > 1.
......
......@@ -20,11 +20,14 @@
#include "fstext/rescale.h"
#include "fstext/fstext-utils.h"
#include "fstext/fst-test-utils.h"
#include "base/kaldi-math.h"
// Just check that it compiles, for now.
namespace fst
{
using kaldi::Exp;
using kaldi::Log;
template<class Arc> void TestComputeTotalWeight() {
typedef typename Arc::Weight Weight;
......@@ -40,7 +43,7 @@ template<class Arc> void TestComputeTotalWeight() {
fstprinter.Print(&std::cout, "standard output");
}
Weight max(-log(2.0));
Weight max(-Log(2.0));
Weight tot = ComputeTotalWeight(*fst, max);
std::cout << "Total weight is: " << tot.Value() << '\n';
......@@ -80,7 +83,7 @@ void TestRescaleToStochastic() {
RescaleToStochastic(fst, diff);
Weight tot = ShortestDistance(*fst),
tot2 = ComputeTotalWeight(*fst, Weight(-log(2.0)));
tot2 = ComputeTotalWeight(*fst, Weight(-Log(2.0)));
std::cerr << " tot is " << tot<<", tot2 = "<<tot2<<'\n';
assert(ApproxEqual(tot2, Weight::One(), diff));
......
......@@ -34,10 +34,10 @@ void InitRandomGmm(DiagGmm *gmm_in) {
Vector<BaseFloat> weights(num_gauss);
for (int32 i = 0; i < num_gauss; i++) {
for (int32 j = 0; j < dim; j++) {
inv_vars(i, j) = exp(RandGauss() * (1.0 / (1 + j)));
inv_vars(i, j) = Exp(RandGauss() * (1.0 / (1 + j)));
means(i, j) = RandGauss() * (1.0 / (1 + j));
}
weights(i) = exp(RandGauss());
weights(i) = Exp(RandGauss());
}
weights.Scale(1.0 / weights.Sum());
gmm.SetWeights(weights);
......@@ -107,7 +107,7 @@ void UnitTestDiagGmm() {
weights(m) = kaldi::RandUniform();
for (size_t d= 0; d < dim; d++) {
means(m, d) = kaldi::RandGauss();
vars(m, d) = exp(kaldi::RandGauss()) + 1e-5;
vars(m, d) = Exp(kaldi::RandGauss()) + 1e-5;
}
tot_weight += weights(m);
}
......@@ -116,10 +116,10 @@ void UnitTestDiagGmm() {
for (size_t m = 0; m < nMix; m++) {
weights(m) /= tot_weight;
for (size_t d= 0; d < dim; d++) {
loglikes(m) += -0.5 * (M_LOG_2PI + log(vars(m, d)) + (feat(d) -
loglikes(m) += -0.5 * (M_LOG_2PI + Log(vars(m, d)) + (feat(d) -
means(m, d)) * (feat(d) - means(m, d)) / vars(m, d));
}
loglikes(m) += log(weights(m));
loglikes(m) += Log(weights(m));
}
loglike = loglikes.LogSumExp();
......@@ -282,8 +282,8 @@ void UnitTestDiagGmm() {
gmm1.ComputeGconsts();
std::vector<std::pair<BaseFloat, const DiagGmm*> > vec;
vec.push_back(std::make_pair(0.4, (const DiagGmm*)(&gmm1)));
vec.push_back(std::make_pair(0.6, (const DiagGmm*)(&gmm1)));
vec.push_back(std::make_pair(static_cast<BaseFloat>(0.4), (const DiagGmm*)(&gmm1)));
vec.push_back(std::make_pair(static_cast<BaseFloat>(0.6), (const DiagGmm*)(&gmm1)));
DiagGmm gmm2(vec);
......
......@@ -123,9 +123,9 @@ int32 DiagGmm::ComputeGconsts() {
for (int32 mix = 0; mix < num_mix; mix++) {
KALDI_ASSERT(weights_(mix) >= 0); // Cannot have negative weights.
BaseFloat gc = log(weights_(mix)) + offset; // May be -inf if weights == 0
BaseFloat gc = Log(weights_(mix)) + offset; // May be -inf if weights == 0
for (int32 d = 0; d < dim; d++) {
gc += 0.5 * log(inv_vars_(mix, d)) - 0.5 * means_invvars_(mix, d)
gc += 0.5 * Log(inv_vars_(mix, d)) - 0.5 * means_invvars_(mix, d)
* means_invvars_(mix, d) / inv_vars_(mix, d);
}
// Change sign for logdet because var is inverted. Also, note that
......@@ -348,7 +348,7 @@ void DiagGmm::Merge(int32 target_components, std::vector<int32> *history) {
for (int32 i = 0; i < num_comp; i++) {
discarded_component[i] = false;
for (int32 d = 0; d < dim; d++) {
logdet(i) += 0.5 * log(inv_vars_(i, d)); // +0.5 because var is inverted
logdet(i) += 0.5 * Log(inv_vars_(i, d)); // +0.5 because var is inverted
}
}
......@@ -431,7 +431,7 @@ void DiagGmm::Merge(int32 target_components, std::vector<int32> *history) {
// Update logdet for merged component
logdet(max_i) = 0.0;
for (int32 d = 0; d < dim; d++) {
logdet(max_i) += 0.5 * log(inv_vars_(max_i, d));
logdet(max_i) += 0.5 * Log(inv_vars_(max_i, d));
// +0.5 because var is inverted
}
......@@ -488,7 +488,7 @@ BaseFloat DiagGmm::merged_components_logdet(BaseFloat w1, BaseFloat w2,
tmp_var.AddVec2(-1.0, tmp_mean);
BaseFloat merged_logdet = 0.0;
for (int32 d = 0; d < dim; d++) {
merged_logdet -= 0.5 * log(tmp_var(d));
merged_logdet -= 0.5 * Log(tmp_var(d));
// -0.5 because var is not inverted
}
return merged_logdet;
......
......@@ -43,7 +43,7 @@ void UnitTestEstimateMmieDiagGmm() {
for (size_t m = 0; m < nMix; m++) {
for (size_t d= 0; d < dim; d++) {
means_f(m, d) = kaldi::RandGauss()*100.0F;
vars_f(m, d) = exp(kaldi::RandGauss())*1000.0F+ 1.0F;
vars_f(m, d) = Exp(kaldi::RandGauss())*1000.0F+ 1.0F;
}
// std::cout << "Gauss " << m << ": Mean = " << means_f.Row(m) << '\n'
// << "Vars = " << vars_f.Row(m) << '\n';
......@@ -104,7 +104,7 @@ void UnitTestEstimateMmieDiagGmm() {
Matrix<BaseFloat> means(1, dim), vars(1, dim), invvars(1, dim);
for (size_t d= 0; d < dim; d++) {
means(0, d) = kaldi::RandGauss()*100.0F;
vars(0, d) = exp(kaldi::RandGauss()) *10.0F + 1e-5F;
vars(0, d) = Exp(kaldi::RandGauss()) *10.0F + 1e-5F;
}
weights(0) = 1.0F;
invvars.CopyFromMat(vars);
......
......@@ -78,10 +78,10 @@ static bool EBWUpdateGaussian(
int32 dim = orig_mean.Dim();
for (int32 i = 0; i < dim; i++) {
BaseFloat mean_diff = (*mean)(i) - orig_mean(i);
old_auxf += (occ+D) * -0.5 * (log(orig_var(i)) +
old_auxf += (occ+D) * -0.5 * (Log(orig_var(i)) +
((*var)(i) + mean_diff*mean_diff)
/ orig_var(i));
new_auxf += (occ+D) * -0.5 * (log((*var)(i)) + 1.0);
new_auxf += (occ+D) * -0.5 * (Log((*var)(i)) + 1.0);
}
*auxf_impr = new_auxf - old_auxf;
......
......@@ -61,7 +61,7 @@ void init_rand_diag_gmm(DiagGmm *gmm) {
weights(m) = kaldi::RandUniform();
for (size_t d= 0; d < dim; d++) {
means(m, d) = kaldi::RandGauss();
vars(m, d) = exp(kaldi::RandGauss()) + 1e-5;
vars(m, d) = Exp(kaldi::RandGauss()) + 1e-5;
}
tot_weight += weights(m);
}
......@@ -156,7 +156,7 @@ UnitTestFullGmm() {
+ VecSpVec(means.Row(m), invcovars[m], means.Row(m))
+ VecSpVec(feat, invcovars[m], feat))
+ VecSpVec(means.Row(m), invcovars[m], feat);
loglikes(m) += log(weights(m));
loglikes(m) += Log(weights(m));
}
loglike = loglikes.LogSumExp();
......
......@@ -101,7 +101,7 @@ int32 FullGmm::ComputeGconsts() {
for (int32 mix = 0; mix < num_mix; mix++) {
KALDI_ASSERT(weights_(mix) >= 0); // Cannot have negative weights.
BaseFloat gc = log(weights_(mix)) + offset; // May be -inf if weights == 0
BaseFloat gc = Log(weights_(mix)) + offset; // May be -inf if weights == 0
SpMatrix<BaseFloat> covar(inv_covars_[mix]);
covar.InvertDouble();
BaseFloat logdet = covar.LogPosDefDet();
......@@ -449,8 +449,8 @@ BaseFloat FullGmm::MergePreselect(int32 target_components,
removed < num_comp - target_components && !queue.empty(); ) {
QueueElem qelem = queue.top();
queue.pop();
BaseFloat delta_log_like_old = qelem.first,
idx1 = qelem.second.first, idx2 = qelem.second.second;
BaseFloat delta_log_like_old = qelem.first;
int32 idx1 = qelem.second.first, idx2 = qelem.second.second;
// the next 3 lines are to handle when components got merged
// and moved to different indices, but we still want to consider
// merging their descendants. [descendant = current index where
......
......@@ -217,7 +217,7 @@ void DoRescalingUpdate(const AccumDiagGmm &old_ml_acc,
double divergence =
0.5 *(((new_model_mean-old_model_mean)*(new_model_mean-old_model_mean) +
new_model_var - old_model_var)/old_model_var +
log(old_model_var / new_model_var));
Log(old_model_var / new_model_var));
if (divergence < 0.0)
KALDI_WARN << "Negative divergence " << divergence;
*tot_divergence += divergence * new_ml_count;
......
......@@ -115,7 +115,7 @@ void UnitTestMleAmDiagGmm() {
for (int32 m = 0; m < num_feat_comp; m++) {
for (int32 d= 0; d < dim; d++) {
means(m, d) = kaldi::RandGauss();
vars(m, d) = exp(kaldi::RandGauss()) + 1e-2;
vars(m, d) = Exp(kaldi::RandGauss()) + 1e-2;
}
}
// Now generate random features with those means and variances.
......
......@@ -215,7 +215,7 @@ UnitTestEstimateDiagGmm() {
for (size_t m = 0; m < nMix; m++) {
for (size_t d= 0; d < dim; d++) {
means_f(m, d) = kaldi::RandGauss()*100.0F;
vars_f(m, d) = exp(kaldi::RandGauss())*1000.0F+ 1.0F;
vars_f(m, d) = Exp(kaldi::RandGauss())*1000.0F+ 1.0F;
}
// std::cout << "Gauss " << m << ": Mean = " << means_f.Row(m) << '\n'
// << "Vars = " << vars_f.Row(m) << '\n';
......@@ -259,7 +259,7 @@ UnitTestEstimateDiagGmm() {
Matrix<BaseFloat> means(1, dim), vars(1, dim), invvars(1, dim);
for (size_t d= 0; d < dim; d++) {
means(0, d) = kaldi::RandGauss()*100.0F;
vars(0, d) = exp(kaldi::RandGauss()) *10.0F + 1e-5F;
vars(0, d) = Exp(kaldi::RandGauss()) *10.0F + 1e-5F;
}
weights(0) = 1.0F;
invvars.CopyFromMat(vars);
......
......@@ -132,7 +132,7 @@ BaseFloat GetLogLikeTest(const FullGmm &gmm,
for (int32 i = 0; i < gmm.NumGauss(); i++) {
BaseFloat logdet = -(inv_covars[i].LogPosDefDet());
BaseFloat log_like = log(gmm.weights()(i))
BaseFloat log_like = Log(gmm.weights()(i))
-0.5 * (gmm.Dim() * M_LOG_2PI + logdet);
Vector<BaseFloat> offset(feats);
offset.AddVec(-1.0, means.Row(i));
......
......@@ -93,10 +93,10 @@ void InitRandDiagGmm(int32 dim, int32 num_comp, DiagGmm *gmm) {
Matrix<BaseFloat> means(num_comp, dim), inv_vars(num_comp, dim);
for (int32 m = 0; m < num_comp; m++) {
weights(m) = exp(RandGauss());
weights(m) = Exp(RandGauss());
for (int32 d= 0; d < dim; d++) {
means(m, d) = RandGauss() / (1 + d);
inv_vars(m, d) = exp(RandGauss() / (1 + d)) + 1e-2;
inv_vars(m, d) = Exp(RandGauss() / (1 + d)) + 1e-2;
}
}
weights.Scale(1.0 / weights.Sum());
......
......@@ -125,7 +125,7 @@ fst::VectorFst<fst::StdArc> *GetHmmAsFst(
// no pdf, hence non-estimated probability.
// [would not happen with normal topology] . There is no transition-state
// involved in this case.
log_prob = log(entry[hmm_state].transitions[trans_idx].second);
log_prob = Log(entry[hmm_state].transitions[trans_idx].second);
label = 0;
} else { // normal probability.
int32 trans_state =
......@@ -225,7 +225,7 @@ GetHmmAsFstSimple(std::vector<int32> phone_window,
// [would not happen with normal topology] . There is no transition-state
// involved in this case.
KALDI_ASSERT(!is_self_loop);
log_prob = log(entry[hmm_state].transitions[trans_idx].second);
log_prob = Log(entry[hmm_state].transitions[trans_idx].second);
label = 0;
} else { // normal probability.
int32 trans_state =
......
......@@ -110,7 +110,7 @@ void TransitionModel::InitializeProbs() {
"probability [should remove that entry in the topology]";
if (prob > 1.0)
KALDI_WARN << "TransitionModel::InitializeProbs, prob greater than one.";
log_probs_(trans_id) = log(prob);
log_probs_(trans_id) = Log(prob);
}
ComputeDerivedOfProbs();
}
......@@ -260,13 +260,13 @@ void TransitionModel::ComputeDerivedOfProbs() {
if (tid == 0) { // no self-loop
non_self_loop_log_probs_(tstate) = 0.0; // log(1.0)
} else {
BaseFloat self_loop_prob = exp(GetTransitionLogProb(tid)),
BaseFloat self_loop_prob = Exp(GetTransitionLogProb(tid)),
non_self_loop_prob = 1.0 - self_loop_prob;
if (non_self_loop_prob <= 0.0) {
KALDI_WARN << "ComputeDerivedOfProbs(): non-self-loop prob is " << non_self_loop_prob;
non_self_loop_prob = 1.0e-10; // just so we can continue...
}
non_self_loop_log_probs_(tstate) = log(non_self_loop_prob); // will be negative.
non_self_loop_log_probs_(tstate) = Log(non_self_loop_prob); // will be negative.
}
}
}
......@@ -318,7 +318,7 @@ void TransitionModel::Write(std::ostream &os, bool binary) const {
}
BaseFloat TransitionModel::GetTransitionProb(int32 trans_id) const {
return exp(log_probs_(trans_id));
return Exp(log_probs_(trans_id));
}
BaseFloat TransitionModel::GetTransitionLogProb(int32 trans_id) const {
......@@ -376,14 +376,14 @@ void TransitionModel::MleUpdate(const Vector<double> &stats,
// Compute objf change
for (int32 tidx = 0; tidx < n; tidx++) {
if (new_probs(tidx) == cfg.floor) num_floored++;
double objf_change = counts(tidx) * (log(new_probs(tidx))
- log(old_probs(tidx)));
double objf_change = counts(tidx) * (Log(new_probs(tidx))
- Log(old_probs(tidx)));
objf_impr_sum += objf_change;
}
// Commit updated values.
for (int32 tidx = 0; tidx < n; tidx++) {
int32 tid = PairToTransitionId(tstate, tidx);
log_probs_(tid) = log(new_probs(tidx));
log_probs_(tid) = Log(new_probs(tidx));
if (log_probs_(tid) - log_probs_(tid) != 0.0)
KALDI_ERR << "Log probs is inf or NaN: error in update or bad stats?";
}
......@@ -435,14 +435,14 @@ void TransitionModel::MapUpdate(const Vector<double> &stats,
(cfg.tau + tstate_tot);
// Compute objf change
for (int32 tidx = 0; tidx < n; tidx++) {
double objf_change = counts(tidx) * (log(new_probs(tidx))
- log(old_probs(tidx)));
double objf_change = counts(tidx) * (Log(new_probs(tidx))
- Log(old_probs(tidx)));
objf_impr_sum += objf_change;
}
// Commit updated values.
for (int32 tidx = 0; tidx < n; tidx++) {
int32 tid = PairToTransitionId(tstate, tidx);
log_probs_(tid) = log(new_probs(tidx));
log_probs_(tid) = Log(new_probs(tidx));
if (log_probs_(tid) - log_probs_(tid) != 0.0)
KALDI_ERR << "Log probs is inf or NaN: error in update or bad stats?";
}
......@@ -524,8 +524,8 @@ void TransitionModel::MleUpdateShared(const Vector<double> &stats,
// Compute objf change
for (int32 tidx = 0; tidx < n; tidx++) {
if (new_probs(tidx) == cfg.floor) num_floored++;
double objf_change = counts(tidx) * (log(new_probs(tidx))
- log(old_probs(tidx)));
double objf_change = counts(tidx) * (Log(new_probs(tidx))
- Log(old_probs(tidx)));
objf_impr_sum += objf_change;
}
// Commit updated values.
......@@ -535,7 +535,7 @@ void TransitionModel::MleUpdateShared(const Vector<double> &stats,
int32 tstate = *iter;
for (int32 tidx = 0; tidx < n; tidx++) {
int32 tid = PairToTransitionId(tstate, tidx);
log_probs_(tid) = log(new_probs(tidx));
log_probs_(tid) = Log(new_probs(tidx));
if (log_probs_(tid) - log_probs_(tid) != 0.0)
KALDI_ERR << "Log probs is inf or NaN: error in update or bad stats?";
}
......@@ -612,8 +612,8 @@ void TransitionModel::MapUpdateShared(const Vector<double> &stats,
(pdf_tot + cfg.tau);
// Compute objf change
for (int32 tidx = 0; tidx < n; tidx++) {
double objf_change = counts(tidx) * (log(new_probs(tidx))
- log(old_probs(tidx)));
double objf_change = counts(tidx) * (Log(new_probs(tidx))
- Log(old_probs(tidx)));
objf_impr_sum += objf_change;
}
// Commit updated values.
......@@ -623,7 +623,7 @@ void TransitionModel::MapUpdateShared(const Vector<double> &stats,
int32 tstate = *iter;
for (int32 tidx = 0; tidx < n; tidx++) {
int32 tid = PairToTransitionId(tstate, tidx);
log_probs_(tid) = log(new_probs(tidx));
log_probs_(tid) = Log(new_probs(tidx));
if (log_probs_(tid) - log_probs_(tid) != 0.0)
KALDI_ERR << "Log probs is inf or NaN: error in update or bad stats?";
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment