http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/134c891a/src/neuralnet/optional_layer.cc ---------------------------------------------------------------------- diff --git a/src/neuralnet/optional_layer.cc b/src/neuralnet/optional_layer.cc index 097751d..a51258b 100644 --- a/src/neuralnet/optional_layer.cc +++ b/src/neuralnet/optional_layer.cc @@ -1,114 +1,112 @@ -#ifdef USE_LMDB #include "neuralnet/optional_layer.h" + namespace singa { +#ifdef USE_LMDB /*********************LMDBDataLayer**********************************/ +LMDBDataLayer::~LMDBDataLayer() { + mdb_cursor_close(mdb_cursor_); + mdb_txn_abort(mdb_txn_); + mdb_cursor_ = nullptr; +} + +void LMDBDataLayer::Setup(const LayerProto& proto, int npartitions) { + Layer::Setup(proto, npartitions); + OpenLMDB(proto.lmdbdata_conf().path()); + CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_NEXT), + MDB_SUCCESS); + mdb_cursor_close(mdb_cursor_); + mdb_txn_abort(mdb_txn_); + mdb_cursor_ = nullptr; + CaffeDatum datum; + datum.ParseFromArray(mdb_value_.mv_data, mdb_value_.mv_size); + SingleLabelImageRecord* record = sample_.mutable_image(); + ConvertCaffeDatumToRecord(datum, record); + batchsize_ = proto.lmdbdata_conf().batchsize(); + if (partition_dim() == 0) + batchsize_ /= npartitions; + records_.resize(batchsize_); + random_skip_ = proto.lmdbdata_conf().random_skip(); +} + +void LMDBDataLayer::OpenLMDB(const std::string& path) { + CHECK_EQ(mdb_env_create(&mdb_env_), MDB_SUCCESS) << "mdb_env_create failed"; + CHECK_EQ(mdb_env_set_mapsize(mdb_env_, 1099511627776), MDB_SUCCESS); // 1TB + CHECK_EQ(mdb_env_open(mdb_env_, path.c_str(), + MDB_RDONLY, 0664), MDB_SUCCESS) << "cannot open lmdb " << path; + CHECK_EQ(mdb_txn_begin(mdb_env_, NULL, MDB_RDONLY, &mdb_txn_), MDB_SUCCESS) + << "mdb_txn_begin failed"; + CHECK_EQ(mdb_open(mdb_txn_, NULL, 0, &mdb_dbi_), MDB_SUCCESS) + << "mdb_open failed"; + CHECK_EQ(mdb_cursor_open(mdb_txn_, mdb_dbi_, &mdb_cursor_), MDB_SUCCESS) + << "mdb_cursor_open failed"; + LOG(INFO) << "Opening lmdb " << path; + CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_FIRST), + MDB_SUCCESS) << "mdb_cursor_get failed"; +} + void LMDBDataLayer::ComputeFeature(Phase phase, Metric* perf) { if (mdb_cursor_ == nullptr) OpenLMDB(layer_proto_.lmdbdata_conf().path()); - if(random_skip_){ + if (random_skip_) { int nskip = rand() % random_skip_; - int n=0; + int n = 0; CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, - &mdb_value_, MDB_FIRST), MDB_SUCCESS); + &mdb_value_, MDB_FIRST), MDB_SUCCESS); while (mdb_cursor_get(mdb_cursor_, &mdb_key_, - &mdb_value_, MDB_NEXT) == MDB_SUCCESS) + &mdb_value_, MDB_NEXT) == MDB_SUCCESS) n++; - LOG(INFO)<<"Random Skip "<<nskip<<" records of total "<<n<<"records"; + LOG(INFO) << "Random Skip " << nskip << " records of total " + << n << "records"; // We have reached the end. Restart from the first. CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, - &mdb_value_, MDB_FIRST), MDB_SUCCESS); - for(int i=0;i<nskip;i++){ + &mdb_value_, MDB_FIRST), MDB_SUCCESS); + for (int i = 0; i < nskip; i++) { if (mdb_cursor_get(mdb_cursor_, &mdb_key_, - &mdb_value_, MDB_NEXT) != MDB_SUCCESS) { + &mdb_value_, MDB_NEXT) != MDB_SUCCESS) { // We have reached the end. Restart from the first. DLOG(INFO) << "Restarting data prefetching from start."; CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, - &mdb_value_, MDB_FIRST), MDB_SUCCESS); + &mdb_value_, MDB_FIRST), MDB_SUCCESS); } } - random_skip_=0; + random_skip_ = 0; } CaffeDatum datum; - for(auto& record: records_){ - SingleLabelImageRecord* image=record.mutable_image(); + for (auto& record : records_) { + SingleLabelImageRecord* image = record.mutable_image(); CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, - &mdb_value_, MDB_GET_CURRENT), MDB_SUCCESS); + &mdb_value_, MDB_GET_CURRENT), MDB_SUCCESS); datum.ParseFromArray(mdb_value_.mv_data, mdb_value_.mv_size); ConvertCaffeDatumToRecord(datum, image); if (mdb_cursor_get(mdb_cursor_, &mdb_key_, - &mdb_value_, MDB_NEXT) != MDB_SUCCESS) { + &mdb_value_, MDB_NEXT) != MDB_SUCCESS) { // We have reached the end. Restart from the first. DLOG(INFO) << "Restarting data prefetching from start."; CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, - &mdb_value_, MDB_FIRST), MDB_SUCCESS); + &mdb_value_, MDB_FIRST), MDB_SUCCESS); } } } void LMDBDataLayer::ConvertCaffeDatumToRecord(const CaffeDatum& datum, - SingleLabelImageRecord* record){ + SingleLabelImageRecord* record) { record->set_label(datum.label()); record->clear_shape(); - if(datum.has_channels()) + if (datum.has_channels()) record->add_shape(datum.channels()); - if(datum.has_height()) + if (datum.has_height()) record->add_shape(datum.height()); - if(datum.has_width()) + if (datum.has_width()) record->add_shape(datum.width()); - if(datum.has_data()) + if (datum.has_data()) record->set_pixel(datum.data()); - if(datum.float_data_size()){ + if (datum.float_data_size()) { record->clear_data(); - for(float x: datum.float_data()) + for (float x : datum.float_data()) record->add_data(x); } } - -void LMDBDataLayer::OpenLMDB(const std::string& path) { - CHECK_EQ(mdb_env_create(&mdb_env_), MDB_SUCCESS) << "mdb_env_create failed"; - CHECK_EQ(mdb_env_set_mapsize(mdb_env_, 1099511627776), MDB_SUCCESS); // 1TB - CHECK_EQ(mdb_env_open(mdb_env_, path.c_str(), - MDB_RDONLY, 0664), MDB_SUCCESS) << "cannot open lmdb " << path; - CHECK_EQ(mdb_txn_begin(mdb_env_, NULL, MDB_RDONLY, &mdb_txn_), MDB_SUCCESS) - << "mdb_txn_begin failed"; - CHECK_EQ(mdb_open(mdb_txn_, NULL, 0, &mdb_dbi_), MDB_SUCCESS) - << "mdb_open failed"; - CHECK_EQ(mdb_cursor_open(mdb_txn_, mdb_dbi_, &mdb_cursor_), MDB_SUCCESS) - << "mdb_cursor_open failed"; - LOG(INFO) << "Opening lmdb " << path; - CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_FIRST), - MDB_SUCCESS) << "mdb_cursor_get failed"; -} - -void LMDBDataLayer::Setup(const LayerProto& proto, int npartitions) { - Layer::Setup(proto, npartitions); - OpenLMDB(proto.lmdbdata_conf().path()); - CHECK_EQ(mdb_cursor_get(mdb_cursor_, &mdb_key_, &mdb_value_, MDB_NEXT), - MDB_SUCCESS); - mdb_cursor_close(mdb_cursor_); - mdb_txn_abort(mdb_txn_); - mdb_cursor_ = nullptr; - - CaffeDatum datum; - datum.ParseFromArray(mdb_value_.mv_data, mdb_value_.mv_size); - SingleLabelImageRecord* record=sample_.mutable_image(); - ConvertCaffeDatumToRecord(datum, record); - - batchsize_ = proto.lmdbdata_conf().batchsize(); - if(partition_dim() == 0) - batchsize_ /= npartitions; - records_.resize(batchsize_); - random_skip_=proto.lmdbdata_conf().random_skip(); -} - -LMDBDataLayer::~LMDBDataLayer() { - mdb_cursor_close(mdb_cursor_); - mdb_txn_abort(mdb_txn_); - mdb_cursor_ = nullptr; -} - -} /* singa */ - #endif +} // namespace singa
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/134c891a/src/proto/job.proto ---------------------------------------------------------------------- diff --git a/src/proto/job.proto b/src/proto/job.proto index 1c79aea..bea5234 100644 --- a/src/proto/job.proto +++ b/src/proto/job.proto @@ -523,7 +523,6 @@ enum LayerType { kRGBImage = 10; // Neuron layers // - Feature transformation - kConcate = 2; kConvolution = 1; kDropout = 4; kInnerProduct = 5; @@ -538,10 +537,11 @@ enum LayerType { // - Compute objective loss kSoftmaxLoss = 11; kEuclideanLoss = 25; - // Other layers + // Connection layers // - Connect layers when neural net is partitioned kBridgeDst = 16; kBridgeSrc = 15; + kConcate = 2; kSlice = 12; kSplit = 13; @@ -556,6 +556,7 @@ enum PartitionType { } enum Phase { + kUnknown = 0; kTrain = 1; kValidation = 2; kTest= 4; http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/134c891a/src/trainer/worker.cc ---------------------------------------------------------------------- diff --git a/src/trainer/worker.cc b/src/trainer/worker.cc index a22a8ef..4137230 100644 --- a/src/trainer/worker.cc +++ b/src/trainer/worker.cc @@ -361,7 +361,7 @@ void BPWorker::Backward(int step, shared_ptr<NeuralNet> net) { if(layer->is_bridgesrclayer()) { // ReceiveBlobs(false, true, layer, net); } - layer->ComputeGradient(kTrain | kBackward); + layer->ComputeGradient(kTrain | kBackward, nullptr); if (DisplayDebugInfo(step)) LOG(INFO) << layer->DebugString(step, kTrain | kBackward); for (Param* p : layer->GetParams()) @@ -398,7 +398,7 @@ void CDWorker::TrainOneBatch(int step, Metric* perf) { } } for (auto* layer : layers) { - layer->ComputeGradient(kTrain); + layer->ComputeGradient(kTrain, nullptr); for (Param* p : layer->GetParams()) { Update(p, step); }
