<ruby id="bdb3f"></ruby>

    <p id="bdb3f"><cite id="bdb3f"></cite></p>

      <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
        <p id="bdb3f"><cite id="bdb3f"></cite></p>

          <pre id="bdb3f"></pre>
          <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

          <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
          <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

          <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                <ruby id="bdb3f"></ruby>

                # CC4.0 Caffe for CC4.0-Windows,簡單的Caffe C++接口,方便簡單而更深入的研究深度學習 https://github.com/dlunion/CC4.0 # 特性 1.只需要一個頭文件和一個依賴項libcaffe.lib<br/> 2.能夠輕易使用C++寫訓練過程或調用過程<br/> 3.能夠輕易自定義layer(不用編譯caffe也不用修改caffe.proto,只修改代碼即可使用)、自己實現數據層,不需要lmdb也能高效率訓練<br/> 4.能夠在訓練過程中對自定義layer進行調試查看中間結果<br/> 5.支持LSTM不定長OCR(有案例),支持SSD更輕易的訓練起來<br/> 6.有了4.0的支持,很輕易的能夠實現任何新的網絡結構<br/> 7.可以允許通過自定義層,訓練中查看訓練效果,更加容易理解CNN在干嘛,學的效果怎么樣,不再盲目了<br/> 8.接口查看[cc.h](https://github.com/dlunion/CC4.0/blob/master/include/caffe/cc/core/cc.h)和[cc_utils.h](https://github.com/dlunion/CC4.0/blob/master/include/caffe/cc/core/cc_utils.h) # 編譯 編譯環境:VS2013<br/> CUDA版本:8.0<br/> CUDNN版本:5.0<br/> 只需要下載3rd目錄下的下載地址,解壓出來后。安裝完cuda8.0即可編譯<br/> 如果不想自己編譯可以下載下面已經編譯好的庫文件即可,庫文件里面包含了CUDA8.0的下載地址 和所有要用到的工具等的下載地址或文件,直接vs打開后即可編譯。編譯時請選擇ReleaseDLL<br/> # 下載編譯好的庫文件和案例等數據 推薦使用VS2013,下載后壓縮包已經配置好環境和帶好了OpenCV2.4.10靜態庫<br/> <del>[CC4.0.3.rar-百度網盤](https://pan.baidu.com/s/1OQDmxWwVpVohER2YMqGbZQ)</del>,里面的依賴可以用,但是頭文件和libcaffe.dll不可用(因為有幾個bug),等待重新編譯并上傳 # 案例 非常容易在C++里面實現自己的datalayer、losslayer等,自定義數據的輸入等 在prototxt中定義如下: ``` protobuf layer { name: "data" type: "CPP" top: "data" top: "label" include { phase: TRAIN } cpp_param { type: "LstmDataLayer" param_str: "batch_size: 16; width: 150; height: 60; num: 6" } } ``` # cpp代碼訓練: ``` c++ #include <cc_utils.h> #pragma comment(lib, "libcaffe.lib") //define my LstmDataLayer class LstmDataLayer : public DataLayer{ public: SETUP_LAYERFUNC(LstmDataLayer); virtual int getBatchCacheSize(){ return 3; } virtual void loadBatch(Blob** top, int numTop){ Blob* image = top[0]; Blob* label = top[1]; float* image_ptr = image->mutable_cpu_data(); float* label_ptr = label->mutable_cpu_data(); int batch_size = image->num(); int w = image->width(); int h = image->height(); for (int i = 0; i < batch_size; ++i){ //... } } virtual void setup( const char* name, const char* type, const char* param_str, int phase, Blob** bottom, int numBottom, Blob** top, int numTop){ //... } }; void main(){ installRegister(); //register LstmDataLayer INSTALL_LAYER(LstmDataLayer); WPtr<Solver> solver = loadSolverFromPrototxt("solver-gpu.prototxt"); //solver->Restore("models/blstmctc_iter_12111.solverstate"); solver->Solve(); } ``` # 前向運算 ``` c++ void test(){ //... WPtr<Net> net = loadNetFromPrototxt("deploy.prototxt"); net->copyTrainedParamFromFile("models/blstmctc_iter_6044.caffemodel"); im.convertTo(im, CV_32F, 1/127.5, -1); Blob* input = net->input_blob(0); input->Reshape(1, 3, im.rows, im.cols); net->Reshape(); Mat ms[3]; float* ptr = input->mutable_cpu_data(); for (int i = 0; i < 3; ++i){ ms[i] = Mat(input->height(), input->width(), CV_32F, ptr); ptr += input->width()*input->height(); } split(im, ms); net->Forward(); Blob* out = net->output_blob(0); //... //out就是結果 } ``` # SSD的一步訓練 ``` c++ #include <cc_utils.h> using namespace cc; class SSDMyDataLayer : public SSDDataLayer{ public: SETUP_LAYERFUNC(SSDMyDataLayer); SSDMyDataLayer(){ this->datum_ = createAnnDatum(); this->label_map_ = loadLabelMap("labelmap_voc.prototxt"); } virtual ~SSDMyDataLayer(){ releaseAnnDatum(this->datum_); } virtual int getBatchCacheSize(){ return 3; } virtual void* getAnnDatum(){ if (!loadAnnDatum("00001.jpg", "00001.xml", 0, 0, 0, 0, true, "jpg", "xml", this->label_map_, this->datum_)){ printf("無法加載.\n"); exit(0); } return this->datum_; } virtual void releaseAnnDatum(void* datum){ } private: void* datum_; void* label_map_; }; void main(){ installRegister(); INSTALL_LAYER(SSDMyDataLayer); WPtr<Solver> solver = loadSolverFromPrototxt("solver.prototxt"); solver->net()->copyTrainedParamFromFile("VGG_ILSVRC_16_layers_fc_reduced.caffemodel"); //solver->Restore("models/blstmctc_iter_12111.solverstate"); solver->Solve(); } ``` # SSD的train.prototxt的data層: ``` protobuf layer { name: "data" type: "CPP" top: "data" top: "label" include { phase: TRAIN } cpp_param{ type: "SSDMyDataLayer" } transform_param { mirror: true mean_value: 104 mean_value: 117 mean_value: 123 resize_param { prob: 1 resize_mode: WARP height: 300 width: 300 } emit_constraint { emit_type: CENTER } } #... 參考標準SSD的數據層部分即可,主要修改了type和cpp_param } ```
                  <ruby id="bdb3f"></ruby>

                  <p id="bdb3f"><cite id="bdb3f"></cite></p>

                    <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
                      <p id="bdb3f"><cite id="bdb3f"></cite></p>

                        <pre id="bdb3f"></pre>
                        <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

                        <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
                        <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

                        <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                              <ruby id="bdb3f"></ruby>

                              哎呀哎呀视频在线观看