Commit 7afae3f8 authored by nichongjia's avatar nichongjia

google style code

parent e9439dd3
This diff is collapsed.
...@@ -120,7 +120,7 @@ Component* Component::NewComponentOfType(ComponentType comp_type, ...@@ -120,7 +120,7 @@ Component* Component::NewComponentOfType(ComponentType comp_type,
ans = new LstmProjectedStreams(input_dim, output_dim); ans = new LstmProjectedStreams(input_dim, output_dim);
break; break;
case Component::kBLstmProjectedStreams : case Component::kBLstmProjectedStreams :
ans = new BLstmProjectedStreams(input_dim, output_dim); ans = new BLstmProjectedStreams(input_dim, output_dim);
break; break;
case Component::kSoftmax : case Component::kSoftmax :
ans = new Softmax(input_dim, output_dim); ans = new Softmax(input_dim, output_dim);
......
...@@ -32,7 +32,7 @@ namespace nnet1 { ...@@ -32,7 +32,7 @@ namespace nnet1 {
Nnet::Nnet(const Nnet& other) { Nnet::Nnet(const Nnet& other) {
// copy the components // copy the components
for(int32 i=0; i<other.NumComponents(); i++) { for(int32 i = 0; i < other.NumComponents(); i++) {
components_.push_back(other.GetComponent(i).Copy()); components_.push_back(other.GetComponent(i).Copy());
} }
// create empty buffers // create empty buffers
...@@ -40,13 +40,13 @@ Nnet::Nnet(const Nnet& other) { ...@@ -40,13 +40,13 @@ Nnet::Nnet(const Nnet& other) {
backpropagate_buf_.resize(NumComponents()+1); backpropagate_buf_.resize(NumComponents()+1);
// copy train opts // copy train opts
SetTrainOptions(other.opts_); SetTrainOptions(other.opts_);
Check(); Check();
} }
Nnet & Nnet::operator = (const Nnet& other) { Nnet & Nnet::operator = (const Nnet& other) {
Destroy(); Destroy();
// copy the components // copy the components
for(int32 i=0; i<other.NumComponents(); i++) { for(int32 i = 0; i < other.NumComponents(); i++) {
components_.push_back(other.GetComponent(i).Copy()); components_.push_back(other.GetComponent(i).Copy());
} }
// create empty buffers // create empty buffers
......
...@@ -36,23 +36,23 @@ namespace nnet1 { ...@@ -36,23 +36,23 @@ namespace nnet1 {
class Nnet { class Nnet {
public: public:
Nnet() {} Nnet() {}
Nnet(const Nnet& other); // Copy constructor. Nnet(const Nnet& other); // Copy constructor.
Nnet &operator = (const Nnet& other); // Assignment operator. Nnet &operator = (const Nnet& other); // Assignment operator.
~Nnet(); ~Nnet();
public: public:
/// Perform forward pass through the network /// Perform forward pass through the network
void Propagate(const CuMatrixBase<BaseFloat> &in, CuMatrix<BaseFloat> *out); void Propagate(const CuMatrixBase<BaseFloat> &in, CuMatrix<BaseFloat> *out);
/// Perform backward pass through the network /// Perform backward pass through the network
void Backpropagate(const CuMatrixBase<BaseFloat> &out_diff, CuMatrix<BaseFloat> *in_diff); void Backpropagate(const CuMatrixBase<BaseFloat> &out_diff, CuMatrix<BaseFloat> *in_diff);
/// Perform forward pass through the network, don't keep buffers (use it when not training) /// Perform forward pass through the network, don't keep buffers (use it when not training)
void Feedforward(const CuMatrixBase<BaseFloat> &in, CuMatrix<BaseFloat> *out); void Feedforward(const CuMatrixBase<BaseFloat> &in, CuMatrix<BaseFloat> *out);
/// Dimensionality on network input (input feature dim.) /// Dimensionality on network input (input feature dim.)
int32 InputDim() const; int32 InputDim() const;
/// Dimensionality of network outputs (posteriors | bn-features | etc.) /// Dimensionality of network outputs (posteriors | bn-features | etc.)
int32 OutputDim() const; int32 OutputDim() const;
/// Returns number of components-- think of this as similar to # of layers, but /// Returns number of components-- think of this as similar to # of layers, but
/// e.g. the nonlinearity and the linear part count as separate components, /// e.g. the nonlinearity and the linear part count as separate components,
...@@ -65,7 +65,7 @@ class Nnet { ...@@ -65,7 +65,7 @@ class Nnet {
/// Sets the c'th component to "component", taking ownership of the pointer /// Sets the c'th component to "component", taking ownership of the pointer
/// and deleting the corresponding one that we own. /// and deleting the corresponding one that we own.
void SetComponent(int32 c, Component *component); void SetComponent(int32 c, Component *component);
/// Appends this component to the components already in the neural net. /// Appends this component to the components already in the neural net.
/// Takes ownership of the pointer /// Takes ownership of the pointer
void AppendComponent(Component *dynamically_allocated_comp); void AppendComponent(Component *dynamically_allocated_comp);
...@@ -77,12 +77,12 @@ class Nnet { ...@@ -77,12 +77,12 @@ class Nnet {
void RemoveLastComponent() { RemoveComponent(NumComponents()-1); } void RemoveLastComponent() { RemoveComponent(NumComponents()-1); }
/// Access to forward pass buffers /// Access to forward pass buffers
const std::vector<CuMatrix<BaseFloat> >& PropagateBuffer() const { const std::vector<CuMatrix<BaseFloat> >& PropagateBuffer() const {
return propagate_buf_; return propagate_buf_;
} }
/// Access to backward pass buffers /// Access to backward pass buffers
const std::vector<CuMatrix<BaseFloat> >& BackpropagateBuffer() const { const std::vector<CuMatrix<BaseFloat> >& BackpropagateBuffer() const {
return backpropagate_buf_; return backpropagate_buf_;
} }
/// Get the number of parameters in the network /// Get the number of parameters in the network
...@@ -96,7 +96,7 @@ class Nnet { ...@@ -96,7 +96,7 @@ class Nnet {
/// Get the gradient stored in the network /// Get the gradient stored in the network
void GetGradient(Vector<BaseFloat>* grad_copy) const; void GetGradient(Vector<BaseFloat>* grad_copy) const;
/// Set the dropout rate /// Set the dropout rate
void SetDropoutRetention(BaseFloat r); void SetDropoutRetention(BaseFloat r);
/// Reset streams in LSTM multi-stream training, /// Reset streams in LSTM multi-stream training,
void ResetLstmStreams(const std::vector<int32> &stream_reset_flag); void ResetLstmStreams(const std::vector<int32> &stream_reset_flag);
...@@ -107,14 +107,14 @@ class Nnet { ...@@ -107,14 +107,14 @@ class Nnet {
/// Initialize MLP from config /// Initialize MLP from config
void Init(const std::string &config_file); void Init(const std::string &config_file);
/// Read the MLP from file (can add layers to exisiting instance of Nnet) /// Read the MLP from file (can add layers to exisiting instance of Nnet)
void Read(const std::string &file); void Read(const std::string &file);
/// Read the MLP from stream (can add layers to exisiting instance of Nnet) /// Read the MLP from stream (can add layers to exisiting instance of Nnet)
void Read(std::istream &in, bool binary); void Read(std::istream &in, bool binary);
/// Write MLP to file /// Write MLP to file
void Write(const std::string &file, bool binary) const; void Write(const std::string &file, bool binary) const;
/// Write MLP to stream /// Write MLP to stream
void Write(std::ostream &out, bool binary) const; void Write(std::ostream &out, bool binary) const;
/// Create string with human readable description of the nnet /// Create string with human readable description of the nnet
std::string Info() const; std::string Info() const;
/// Create string with per-component gradient statistics /// Create string with per-component gradient statistics
...@@ -138,18 +138,17 @@ class Nnet { ...@@ -138,18 +138,17 @@ class Nnet {
private: private:
/// Vector which contains all the components composing the neural network, /// Vector which contains all the components composing the neural network,
/// the components are for example: AffineTransform, Sigmoid, Softmax /// the components are for example: AffineTransform, Sigmoid, Softmax
std::vector<Component*> components_; std::vector<Component*> components_;
std::vector<CuMatrix<BaseFloat> > propagate_buf_; ///< buffers for forward pass std::vector<CuMatrix<BaseFloat> > propagate_buf_; ///< buffers for forward pass
std::vector<CuMatrix<BaseFloat> > backpropagate_buf_; ///< buffers for backward pass std::vector<CuMatrix<BaseFloat> > backpropagate_buf_; ///< buffers for backward pass
/// Option class with hyper-parameters passed to UpdatableComponent(s) /// Option class with hyper-parameters passed to UpdatableComponent(s)
NnetTrainOptions opts_; NnetTrainOptions opts_;
}; };
} // namespace nnet1 } // namespace nnet1
} // namespace kaldi } // namespace kaldi
#endif // KALDI_NNET_NNET_NNET_H_ #endif // KALDI_NNET_NNET_NNET_H_
// nnetbin/nnet-train-blstm-parallel.cc // nnetbin/nnet-train-blstm-parallel.cc
// Copyright 2015 Chongjia Ni // Copyright 2015 Chongjia Ni
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
// You may obtain a copy of the License at // You may obtain a copy of the License at
// //
// http://www.apache.org/licenses/LICENSE-2.0 // http://www.apache.org/licenses/LICENSE-2.0
// //
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
...@@ -27,8 +27,8 @@ ...@@ -27,8 +27,8 @@
int main(int argc, char *argv[]) { int main(int argc, char *argv[]) {
using namespace kaldi; using namespace kaldi;
using namespace kaldi::nnet1; using namespace kaldi::nnet1;
typedef kaldi::int32 int32; typedef kaldi::int32 int32;
try { try {
const char *usage = const char *usage =
"Perform one iteration of senones training by SGD.\n" "Perform one iteration of senones training by SGD.\n"
...@@ -39,11 +39,11 @@ int main(int argc, char *argv[]) { ...@@ -39,11 +39,11 @@ int main(int argc, char *argv[]) {
" nnet-train-blstm-streams scp:feature.scp ark:labels.ark nnet.init nnet.iter1\n"; " nnet-train-blstm-streams scp:feature.scp ark:labels.ark nnet.init nnet.iter1\n";
ParseOptions po(usage); ParseOptions po(usage);
// training options
NnetTrainOptions trn_opts;
trn_opts.Register(&po);
NnetTrainOptions trn_opts; // training options bool binary = true,
trn_opts.Register(&po);
bool binary = true,
crossvalidate = false; crossvalidate = false;
po.Register("binary", &binary, "Write model in binary mode"); po.Register("binary", &binary, "Write model in binary mode");
po.Register("cross-validate", &crossvalidate, "Perform cross-validation (no backpropagation)"); po.Register("cross-validate", &crossvalidate, "Perform cross-validation (no backpropagation)");
...@@ -53,7 +53,7 @@ int main(int argc, char *argv[]) { ...@@ -53,7 +53,7 @@ int main(int argc, char *argv[]) {
int32 length_tolerance = 5; int32 length_tolerance = 5;
po.Register("length-tolerance", &length_tolerance, "Allowed length difference of features/targets (frames)"); po.Register("length-tolerance", &length_tolerance, "Allowed length difference of features/targets (frames)");
std::string frame_weights; std::string frame_weights;
po.Register("frame-weights", &frame_weights, "Per-frame weights to scale gradients (frame selection/weighting)."); po.Register("frame-weights", &frame_weights, "Per-frame weights to scale gradients (frame selection/weighting).");
...@@ -66,11 +66,11 @@ int main(int argc, char *argv[]) { ...@@ -66,11 +66,11 @@ int main(int argc, char *argv[]) {
double frame_limit = 100000; double frame_limit = 100000;
po.Register("frame-limit", &frame_limit, "Max number of frames to be processed"); po.Register("frame-limit", &frame_limit, "Max number of frames to be processed");
int32 report_step=100; int32 report_step = 100;
po.Register("report-step", &report_step, "Step (number of sequences) for status reporting"); po.Register("report-step", &report_step, "Step (number of sequences) for status reporting");
std::string use_gpu="yes"; std::string use_gpu = "yes";
// po.Register("use-gpu", &use_gpu, "yes|no|optional, only has effect if compiled with CUDA"); // po.Register("use-gpu", &use_gpu, "yes|no|optional, only has effect if compiled with CUDA");
po.Read(argc, argv); po.Read(argc, argv);
...@@ -82,7 +82,7 @@ int main(int argc, char *argv[]) { ...@@ -82,7 +82,7 @@ int main(int argc, char *argv[]) {
std::string feature_rspecifier = po.GetArg(1), std::string feature_rspecifier = po.GetArg(1),
targets_rspecifier = po.GetArg(2), targets_rspecifier = po.GetArg(2),
model_filename = po.GetArg(3); model_filename = po.GetArg(3);
std::string target_model_filename; std::string target_model_filename;
if (!crossvalidate) { if (!crossvalidate) {
target_model_filename = po.GetArg(4); target_model_filename = po.GetArg(4);
...@@ -92,13 +92,13 @@ int main(int argc, char *argv[]) { ...@@ -92,13 +92,13 @@ int main(int argc, char *argv[]) {
using namespace kaldi::nnet1; using namespace kaldi::nnet1;
typedef kaldi::int32 int32; typedef kaldi::int32 int32;
Vector<BaseFloat> weights; Vector<BaseFloat> weights;
//Select the GPU // Select the GPU
#if HAVE_CUDA==1 #if HAVE_CUDA == 1
CuDevice::Instantiate().SelectGpuId(use_gpu); CuDevice::Instantiate().SelectGpuId(use_gpu);
#endif #endif
Nnet nnet_transf; Nnet nnet_transf;
if(feature_transform != "") { if ( feature_transform != "" ) {
nnet_transf.Read(feature_transform); nnet_transf.Read(feature_transform);
} }
...@@ -115,17 +115,18 @@ int main(int argc, char *argv[]) { ...@@ -115,17 +115,18 @@ int main(int argc, char *argv[]) {
if (frame_weights != "") { if (frame_weights != "") {
weights_reader.Open(frame_weights); weights_reader.Open(frame_weights);
} }
Xent xent; Xent xent;
Mse mse; Mse mse;
CuMatrix<BaseFloat> feats, feats_transf, nnet_out, obj_diff; CuMatrix<BaseFloat> feats, feats_transf, nnet_out, obj_diff;
Timer time; Timer time;
KALDI_LOG << (crossvalidate?"CROSS-VALIDATION":"TRAINING") << " STARTED"; KALDI_LOG << (crossvalidate?"CROSS-VALIDATION":"TRAINING") << " STARTED";
// Feature matrix of every utterance
std::vector< Matrix<BaseFloat> > feats_utt(num_streams); // Feature matrix of every utterance std::vector< Matrix<BaseFloat> > feats_utt(num_streams);
std::vector< Posterior > labels_utt(num_streams); // Label vector of every utterance // Label vector of every utterance
std::vector< Posterior > labels_utt(num_streams);
std::vector< Vector<BaseFloat> > weights_utt(num_streams); std::vector< Vector<BaseFloat> > weights_utt(num_streams);
int32 feat_dim = nnet.InputDim(); int32 feat_dim = nnet.InputDim();
...@@ -134,7 +135,7 @@ int main(int argc, char *argv[]) { ...@@ -134,7 +135,7 @@ int main(int argc, char *argv[]) {
while (1) { while (1) {
std::vector<int32> frame_num_utt; std::vector<int32> frame_num_utt;
int32 sequence_index = 0, max_frame_num = 0; int32 sequence_index = 0, max_frame_num = 0;
for ( ; !feature_reader.Done(); feature_reader.Next()) { for ( ; !feature_reader.Done(); feature_reader.Next()) {
std::string utt = feature_reader.Key(); std::string utt = feature_reader.Key();
...@@ -150,7 +151,7 @@ int main(int argc, char *argv[]) { ...@@ -150,7 +151,7 @@ int main(int argc, char *argv[]) {
if (frame_weights != "") { if (frame_weights != "") {
weights = weights_reader.Value(utt); weights = weights_reader.Value(utt);
} else { // all per-frame weights are 1.0 } else { // all per-frame weights are 1.0
weights.Resize(mat.NumRows()); weights.Resize(mat.NumRows());
weights.Set(1.0); weights.Set(1.0);
} }
...@@ -162,13 +163,13 @@ int main(int argc, char *argv[]) { ...@@ -162,13 +163,13 @@ int main(int argc, char *argv[]) {
lenght.push_back(targets.size()); lenght.push_back(targets.size());
lenght.push_back(weights.Dim()); lenght.push_back(weights.Dim());
// find min, max // find min, max
int32 min = *std::min_element(lenght.begin(),lenght.end()); int32 min = *std::min_element(lenght.begin(), lenght.end());
int32 max = *std::max_element(lenght.begin(),lenght.end()); int32 max = *std::max_element(lenght.begin(), lenght.end());
// fix or drop ? // fix or drop ?
if (max - min < length_tolerance) { if (max - min < length_tolerance) {
if(mat.NumRows() != min) mat.Resize(min, mat.NumCols(), kCopyData); if (mat.NumRows() != min) mat.Resize(min, mat.NumCols(), kCopyData);
if(targets.size() != min) targets.resize(min); if (targets.size() != min) targets.resize(min);
if(weights.Dim() != min) weights.Resize(min, kCopyData); if (weights.Dim() != min) weights.Resize(min, kCopyData);
} else { } else {
KALDI_WARN << utt << ", length mismatch of targets " << targets.size() KALDI_WARN << utt << ", length mismatch of targets " << targets.size()
<< " and features " << mat.NumRows(); << " and features " << mat.NumRows();
...@@ -191,7 +192,7 @@ int main(int argc, char *argv[]) { ...@@ -191,7 +192,7 @@ int main(int argc, char *argv[]) {
} }
} }
int32 cur_sequence_num = frame_num_utt.size(); int32 cur_sequence_num = frame_num_utt.size();
// Create the final feature matrix. Every utterance is padded to the max length within this group of utterances // Create the final feature matrix. Every utterance is padded to the max length within this group of utterances
Matrix<BaseFloat> feat_mat_host(cur_sequence_num * max_frame_num, feat_dim, kSetZero); Matrix<BaseFloat> feat_mat_host(cur_sequence_num * max_frame_num, feat_dim, kSetZero);
Posterior target_host; Posterior target_host;
...@@ -200,26 +201,25 @@ int main(int argc, char *argv[]) { ...@@ -200,26 +201,25 @@ int main(int argc, char *argv[]) {
target_host.resize(cur_sequence_num * max_frame_num); target_host.resize(cur_sequence_num * max_frame_num);
weight_host.Resize(cur_sequence_num * max_frame_num, kSetZero); weight_host.Resize(cur_sequence_num * max_frame_num, kSetZero);
///
for (int s = 0; s < cur_sequence_num; s++) { for (int s = 0; s < cur_sequence_num; s++) {
Matrix<BaseFloat> mat_tmp = feats_utt[s]; Matrix<BaseFloat> mat_tmp = feats_utt[s];
for (int r = 0; r < frame_num_utt[s]; r++) { for (int r = 0; r < frame_num_utt[s]; r++) {
feat_mat_host.Row(r*cur_sequence_num + s).CopyFromVec(mat_tmp.Row(r)); feat_mat_host.Row(r*cur_sequence_num + s).CopyFromVec(mat_tmp.Row(r));
} }
} }
///
for (int s = 0; s < cur_sequence_num; s++) { for (int s = 0; s < cur_sequence_num; s++) {
Posterior target_tmp = labels_utt[s]; Posterior target_tmp = labels_utt[s];
for (int r = 0; r < frame_num_utt[s]; r++) { for (int r = 0; r < frame_num_utt[s]; r++) {
target_host[r*cur_sequence_num+s] = target_tmp[r]; target_host[r*cur_sequence_num+s] = target_tmp[r];
} }
Vector<BaseFloat> weight_tmp = weights_utt[s]; Vector<BaseFloat> weight_tmp = weights_utt[s];
for (int r = 0; r < frame_num_utt[s]; r++) { for (int r = 0; r < frame_num_utt[s]; r++) {
weight_host(r*cur_sequence_num+s) = weight_tmp(r); weight_host(r*cur_sequence_num+s) = weight_tmp(r);
} }
} }
////create // transform feature
nnet_transf.Feedforward(CuMatrix<BaseFloat>(feat_mat_host), &feats_transf); nnet_transf.Feedforward(CuMatrix<BaseFloat>(feat_mat_host), &feats_transf);
// Set the original lengths of utterances before padding // Set the original lengths of utterances before padding
...@@ -230,21 +230,21 @@ int main(int argc, char *argv[]) { ...@@ -230,21 +230,21 @@ int main(int argc, char *argv[]) {
if (objective_function == "xent") { if (objective_function == "xent") {
// gradients re-scaled by weights in Eval, // gradients re-scaled by weights in Eval,
xent.Eval(weight_host, nnet_out, target_host, &obj_diff); xent.Eval(weight_host, nnet_out, target_host, &obj_diff);
} else if (objective_function == "mse") { } else if (objective_function == "mse") {
// gradients re-scaled by weights in Eval, // gradients re-scaled by weights in Eval,
mse.Eval(weight_host, nnet_out, target_host, &obj_diff); mse.Eval(weight_host, nnet_out, target_host, &obj_diff);
} else { } else {
KALDI_ERR << "Unknown objective function code : " << objective_function; KALDI_ERR << "Unknown objective function code : " << objective_function;
} }
// Backward pass // Backward pass
if (!crossvalidate) { if (!crossvalidate) {
nnet.Backpropagate(obj_diff, NULL); nnet.Backpropagate(obj_diff, NULL);
} }
// 1st minibatch : show what happens in network // 1st minibatch : show what happens in network
if (kaldi::g_kaldi_verbose_level >= 2 && total_frames == 0) { // vlog-1 if (kaldi::g_kaldi_verbose_level >= 2 && total_frames == 0) { // vlog-1
KALDI_VLOG(1) << "### After " << total_frames << " frames,"; KALDI_VLOG(1) << "### After " << total_frames << " frames,";
KALDI_VLOG(1) << nnet.InfoPropagate(); KALDI_VLOG(1) << nnet.InfoPropagate();
if (!crossvalidate) { if (!crossvalidate) {
...@@ -252,15 +252,15 @@ int main(int argc, char *argv[]) { ...@@ -252,15 +252,15 @@ int main(int argc, char *argv[]) {
KALDI_VLOG(1) << nnet.InfoGradient(); KALDI_VLOG(1) << nnet.InfoGradient();
} }
} }
num_done += cur_sequence_num; num_done += cur_sequence_num;
total_frames += feats_transf.NumRows(); total_frames += feats_transf.NumRows();
if (feature_reader.Done()) break; // end loop of while(1) if (feature_reader.Done()) break; // end loop of while(1)
} }
// Check network parameters and gradients when training finishes // Check network parameters and gradients when training finishes
if (kaldi::g_kaldi_verbose_level >= 1) { // vlog-1 if (kaldi::g_kaldi_verbose_level >= 1) { // vlog-1
KALDI_VLOG(1) << "### After " << total_frames << " frames,"; KALDI_VLOG(1) << "### After " << total_frames << " frames,";
KALDI_VLOG(1) << nnet.InfoPropagate(); KALDI_VLOG(1) << nnet.InfoPropagate();
if (!crossvalidate) { if (!crossvalidate) {
...@@ -278,10 +278,10 @@ int main(int argc, char *argv[]) { ...@@ -278,10 +278,10 @@ int main(int argc, char *argv[]) {
<< " with other errors. " << " with other errors. "
<< "[" << (crossvalidate?"CROSS-VALIDATION":"TRAINING") << "[" << (crossvalidate?"CROSS-VALIDATION":"TRAINING")
<< ", " << time.Elapsed()/60 << " min, fps" << total_frames/time.Elapsed() << ", " << time.Elapsed()/60 << " min, fps" << total_frames/time.Elapsed()
<< "]"; << "]";
KALDI_LOG << xent.Report(); KALDI_LOG << xent.Report();
#if HAVE_CUDA==1 #if HAVE_CUDA == 1
CuDevice::Instantiate().PrintProfile(); CuDevice::Instantiate().PrintProfile();
#endif #endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment