<ruby id="bdb3f"></ruby>

    <p id="bdb3f"><cite id="bdb3f"></cite></p>

      <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
        <p id="bdb3f"><cite id="bdb3f"></cite></p>

          <pre id="bdb3f"></pre>
          <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

          <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
          <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

          <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                <ruby id="bdb3f"></ruby>

                ??碼云GVP開源項目 12k star Uniapp+ElementUI 功能強大 支持多語言、二開方便! 廣告
                ``` #include <iostream> #include <Eigen/Dense> #include <vector> class NeuralNetwork { public: NeuralNetwork(std::vector<int> _architecture , const Eigen::MatrixXd _train_dataX , const Eigen::MatrixXd _train_LabelY , double _learning_rate = 0.5 , int _mini_batch_size = 30 , int _iteration_size = 1000 , double _lambda = 0.0); // 執行訓練操作 void train(); // 求解單個樣本輸出 Eigen::MatrixXd predict(const Eigen::MatrixXd& _input); // 評估模型, 默認label為one_hot編碼 double evaluate(const Eigen::MatrixXd& _test_dataX, const Eigen::MatrixXd& _test_dataY, bool one_hot = true); private: // _z為上層輸出線性組合值:[z1, z2, z3, ...], 例如z1為一個樣本的計算值 Eigen::MatrixXd sigmoid(const Eigen::MatrixXd& _z); // 激勵函數梯度計算,_a為激勵函數輸出值 Eigen::MatrixXd sigmoid_grad(const Eigen::MatrixXd& _a); // 損失函數實現 double loss(const Eigen::MatrixXd& pre_y, const Eigen::MatrixXd& ori_y, int m); // 前向傳播, _x為樣本矩陣[x1, x2, x3,...], 例如x1為一個樣本 Eigen::MatrixXd feedforword(const Eigen::MatrixXd& _x); // 反向傳播, _x為訓練樣本,_y為樣本與輸出 void backforward(const Eigen::MatrixXd& _x, const Eigen::MatrixXd& _y); // 得到列向量的最大值行號 int argmax(const Eigen::MatrixXd& _y); // 返回將列向量_bias復制_m列的矩陣 Eigen::MatrixXd replicate(const Eigen::MatrixXd& _bias, int _m); private: std::vector<int> architecture; // 神經網絡的結構(4, 4, 1) 表示有一個input layer(4個神經元, 和輸入數據的維度一致), //一個hidden layer(4個神經元), 一個output layer(1個神經元) const Eigen::MatrixXd train_dataX; // 訓練數據(n, m) 表示有m個訓練樣本, 每個樣本是n維向量 const Eigen::MatrixXd train_dataY; // 訓練數據label std::vector<Eigen::MatrixXd> train_weights; // 訓練權重 std::vector<Eigen::MatrixXd> train_weights_grad;// 權重梯度 std::vector<Eigen::MatrixXd> train_bias; // 訓練偏置 std::vector<Eigen::MatrixXd> train_bias_grad; // 偏置梯度 std::vector<Eigen::MatrixXd> feedforword_a; // 前向傳播得到的激活值的中間輸出 std::vector<Eigen::MatrixXd> error_term; // 殘差 std::vector<Eigen::MatrixXd> predict_a; // 預測中間值, 用于單個樣本的預測輸出 double learning_rate; // 反向傳播學習率 double lambda; // 過擬合參數 int mini_batch_size; // 批量梯度下降的一個批次數量 int iteration_size; // 迭代次數 }; NeuralNetwork::NeuralNetwork(std::vector<int> _architecture , const Eigen::MatrixXd _train_dataX , const Eigen::MatrixXd _train_LabelY , double _learning_rate , int _mini_batch_size , int _iteration_size , double _lambda) :architecture(_architecture) , train_dataX(_train_dataX) , train_dataY(_train_LabelY) , learning_rate(_learning_rate) , mini_batch_size(_mini_batch_size) , iteration_size(_iteration_size) , lambda(_lambda) { // 構建神經網絡 for (int i = 1; i < architecture.size(); ++i) { // 權重初始化較小隨機值 Eigen::MatrixXd w(architecture[i], architecture[i - 1]); w.setRandom(); train_weights.push_back(w); // 初始化梯度 Eigen::MatrixXd wd(architecture[i], architecture[i - 1]); wd.setZero(); train_weights_grad.push_back(wd); // 偏置初始化為隨機值 Eigen::MatrixXd b(architecture[i], 1); b.setRandom(); train_bias.push_back(b); // 初始化偏置梯度 Eigen::MatrixXd bd(architecture[i], mini_batch_size); bd.setZero(); train_bias_grad.push_back(bd); // 初始化激活值 Eigen::MatrixXd a(architecture[i], mini_batch_size); a.setZero(); feedforword_a.push_back(a); // 初始化殘差 Eigen::MatrixXd e(architecture[i], mini_batch_size); e.setZero(); error_term.push_back(e); // 初始化預測中間值 Eigen::MatrixXd pa(architecture[i], 1); pa.setZero(); predict_a.push_back(pa); }// for }// end // 前向傳播, _x為樣本矩陣[x1, x2, x3,...], 例如x1為一個樣本 Eigen::MatrixXd NeuralNetwork::feedforword(const Eigen::MatrixXd& _x) { for (int i = 0; i < feedforword_a.size(); ++i) { if (i == 0) // 輸入值為樣本 { feedforword_a.at(i) = sigmoid(train_weights.at(i) * _x + replicate(train_bias.at(i), mini_batch_size));//偏置列數要與樣本列數一致 }// if else { feedforword_a.at(i) = sigmoid(train_weights.at(i) * feedforword_a.at(i - 1) + replicate(train_bias.at(i), mini_batch_size)); }// else }// for return feedforword_a.at(feedforword_a.size() - 1); }// end // _z為上層輸出線性組合值:[z1, z2, z3, ...], 例如z1為一個樣本的計算值,_z.array()指的是逐元素操作 Eigen::MatrixXd NeuralNetwork::sigmoid(const Eigen::MatrixXd& _z) { return 1.0 / (1.0 + (-_z.array()).exp()); }// end // 返回將列向量_bias復制_m列的矩陣 Eigen::MatrixXd NeuralNetwork::replicate(const Eigen::MatrixXd& _bias, int _m) { Eigen::MatrixXd ret_bias(_bias.rows(), _m); for (int i = 0; i < _m; ++i) { ret_bias.col(i) = _bias; } return ret_bias; } // 反向傳播, _x為訓練樣本,_y為樣本與輸出 void NeuralNetwork::backforward(const Eigen::MatrixXd& _x, const Eigen::MatrixXd& _y) { // 1, 計算第2層到最后一層的激活值 feedforword(_x); // 從后向前,一層層的計算 for (int i = error_term.size() - 1; i >= 0; --i) { // 2, 反向計算殘差 if (i == error_term.size() - 1) // 輸出層 { error_term.at(i) = -(_y.array() - feedforword_a.at(i).array()) * sigmoid_grad(feedforword_a.at(i)).array(); }// if else { error_term.at(i) = (train_weights.at(i + 1).transpose() * error_term.at(i + 1)).array() * sigmoid_grad(feedforword_a.at(i)).array(); }// else // 3, 梯度計算,計算結果有mini_batch_size列,而后會在一次批量計算結束后進行累加 train_bias_grad.at(i) = error_term.at(i); if (i > 0) train_weights_grad.at(i) = error_term.at(i) * feedforword_a.at(i - 1).transpose(); else train_weights_grad.at(i) = error_term.at(i) * _x.transpose(); }// for }// end // 激勵函數梯度計算,_a為激勵函數輸出值 Eigen::MatrixXd NeuralNetwork::sigmoid_grad(const Eigen::MatrixXd& _a) { return _a.array() * (1.0 - _a.array()); }// end // 執行訓練操作 void NeuralNetwork::train() { std::cout << "training..." << std::endl; for (int i = 0; i < train_weights.size(); ++i) { std::cout << "train_weights: " << train_weights.at(i) << std::endl; std::cout << "train_bias: " << train_bias.at(i) << std::endl; }// for // 批量梯度下降迭代 for (int i = 0; i < iteration_size; ++i) { for (int k = 0; k < train_dataX.cols() - mini_batch_size; k += mini_batch_size) { // 獲取一個mini_batch_size的樣本集合 Eigen::MatrixXd mini_train_x = train_dataX.middleCols(k, mini_batch_size); Eigen::MatrixXd mini_train_y = train_dataY.middleCols(k, mini_batch_size); // 計算梯度 backforward(mini_train_x, mini_train_y); // 更新權重 for (int j = 0; j < train_weights.size(); ++j) { // 權重的批量累計值實際上在反向傳播過程中已經通過矩陣相乘得到了 train_weights.at(j) = train_weights.at(j) - learning_rate * (train_weights_grad.at(j) / mini_batch_size + lambda * train_weights.at(j)); Eigen::MatrixXd tempBias(mini_batch_size, 1);// 這里的矩陣是為了求偏置累計和 tempBias.setOnes(); train_bias.at(j) = train_bias.at(j) - learning_rate * (train_bias_grad.at(j) * tempBias / mini_batch_size); }// for if (0 == i % 50) {//if110 if (0 == k % 500) {//if220 std::cout << "iter " << i << "\t[k:" << k << "-->loss:\t" << loss(feedforword_a.at(feedforword_a.size() - 1), mini_train_y, mini_batch_size) << std::endl; }//if220 }//if110 // std::cout << "iter " << i << "-->loss : " << loss(feedforword_a.at(feedforword_a.size() - 1), mini_train_y, mini_batch_size) << std::endl; }// for }// for for (int i = 0; i < train_weights.size(); ++i) { std::cout << "train_weights: " << train_weights.at(i) << std::endl; std::cout << "train_bias: " << train_bias.at(i) << std::endl; }// for std::cout << "trained..." << std::endl; }// end // 損失函數實現,對著公式應該就能看懂 double NeuralNetwork::loss(const Eigen::MatrixXd& pre_y, const Eigen::MatrixXd& ori_y, int m) { // 誤差項 double left_term = 0.0; for (int i = 0; i < m; ++i) { Eigen::MatrixXd temp_m = (pre_y.col(i) - ori_y.col(i)).transpose() * (pre_y.col(i) - ori_y.col(i)) / 2.0; left_term = temp_m(0, 0); } left_term /= m; // 正則化項 double norm_term = 0.0; for (int i = 0; i < train_weights.size(); ++i) { Eigen::MatrixXd temp_m = train_weights.at(i); for (int j = 0; j < temp_m.cols(); ++j) { for (int k = 0; k < temp_m.rows(); ++k) { norm_term += temp_m(k, j) * temp_m(k, j); }// for }// for }// for norm_term *= (lambda / 2); return left_term + norm_term; }// end // 評估模型, 默認label為one_hot編碼 double NeuralNetwork::evaluate(const Eigen::MatrixXd& _test_dataX, const Eigen::MatrixXd& _test_dataY, bool one_hot) { int cnt = 0; for (int i = 0; i < _test_dataX.cols(); ++i) { // 獲取一個測試樣本 Eigen::MatrixXd x = _test_dataX.col(i); // 送入神經網絡 Eigen::MatrixXd y_pre = predict(x); if (one_hot) { if (argmax(y_pre) == argmax(_test_dataY.col(i))) { ++cnt; }// if }// if else { if ((y_pre(0, 0) - _test_dataY(0, i)) < 0.1) { ++cnt; }// if }// if }// for return cnt * 1.0 / _test_dataX.cols(); }// end // 得到列向量的最大值行號 int NeuralNetwork::argmax(const Eigen::MatrixXd& _y) { double _max = _y(0, 0); int ret = 0; for (int i = 1; i < _y.rows(); ++i) { if (_y(i, 0) > _max) { _max = _y(i, 0); ret = i; } } return ret; } // 求解單個樣本輸出 Eigen::MatrixXd NeuralNetwork::predict(const Eigen::MatrixXd& _input) { for (int i = 0; i < predict_a.size(); ++i) { if (i == 0) { predict_a.at(i) = sigmoid(train_weights.at(i) * _input + train_bias.at(i)); }// if else { predict_a.at(i) = sigmoid(train_weights.at(i) * predict_a.at(i - 1) + train_bias.at(i)); }// else }// for return predict_a.at(predict_a.size() - 1); }// end class CustomData { public: CustomData(int numberOfTrainData, int numberOfTestData); const Eigen::MatrixXd getTrainData() const; const Eigen::MatrixXd getTrainLabel() const; const Eigen::MatrixXd getTestData() const; const Eigen::MatrixXd getTestLabel() const; private: void generatorData(int numberOfTrainData, int numberOfTestData); private: Eigen::MatrixXd mtrain_x, mtrain_y, mtest_x, mtest_y; }; CustomData::CustomData(int numberOfTrainData, int numberOfTestData) { generatorData(numberOfTrainData, numberOfTestData); } const Eigen::MatrixXd CustomData::getTrainData() const { return mtrain_x; } const Eigen::MatrixXd CustomData::getTrainLabel() const { return mtrain_y; } const Eigen::MatrixXd CustomData::getTestData() const { return mtest_x; } const Eigen::MatrixXd CustomData::getTestLabel() const { return mtest_y; } void CustomData::generatorData(int numberOfTrainData, int numberOfTestData) { mtrain_x.resize(4, numberOfTrainData); mtrain_x.setZero(); mtrain_y.resize(1, numberOfTrainData); mtest_x.resize(4, numberOfTestData); mtest_x.setZero(); mtest_y.resize(1, numberOfTestData); for (int i = 0; i < numberOfTrainData; ++i) { int index = i % 4; mtrain_x(index, i) = 1; for (size_t j = 0; j != mtrain_x.rows(); ++j) { mtrain_x(j, i) += (5e-3 * rand() / RAND_MAX - 2.5e-3); } mtrain_y(0, i) = (index + 1) * 1.0 / 4; } for (int i = 0; i < numberOfTestData; ++i) { int index = i % 4; mtest_x(index, i) = 1; for (int j = 0; j < mtest_x.rows(); ++j) { mtest_x(j, i) += (5e-3 * rand() / RAND_MAX - 2.5e-3); } mtest_y(0, i) = (index + 1) * 1.0 / 4; } } int main(int argc, char** argv) { CustomData data(1000, 300); //CustomData data(10000, 3000); std::vector<int> architecture = { 4, 4, 1 }; NeuralNetwork network(architecture, data.getTrainData(), data.getTrainLabel()); network.train(); Eigen::MatrixXd input1(4, 1); input1 << 0.990, 0.002, 0.003, 0.00013; std::cout << "predict:" << network.predict(input1) << std::endl; Eigen::MatrixXd input2(4, 1); input2 << 0.0103, 0.987, 0.0006, 0.00014; std::cout << "predict:" << network.predict(input2) << std::endl; Eigen::MatrixXd input3(4, 1); input3 << 0.0201, 0.002, 0.9579, 0.00015; std::cout << "predict:" << network.predict(input3) << std::endl; Eigen::MatrixXd input4(4, 1); input4 << 0.004, 0.001, 0.005, 0.9399; std::cout << "predict:" << network.predict(input4) << std::endl; return 0; } //https://blog.csdn.net/xmdxcsj/article/details/52643891 //原文鏈接:https ://blog.csdn.net/licaizi1025365743/article/details/78073291 ``` ``` train_weights: -0.663259 -0.807855 0.815424 -0.882382 0.476608 0.468307 0.613758 -0.983581 0.985534 0.403241 0.30015 0.855098 -0.229835 -0.860714 0.311869 0.283792 train_bias: -0.485397 -0.780755 -0.930723 0.16129 train_weights: -0.643422 0.164586 0.136265 -0.697195 train_bias: 0.7528 iter 0 [k:0-->loss: 0.000253159 iter 50 [k:0-->loss: 3.40094e-08 iter 100 [k:0-->loss: 5.13223e-10 iter 150 [k:0-->loss: 2.98455e-09 iter 200 [k:0-->loss: 5.47251e-09 iter 250 [k:0-->loss: 7.31469e-09 iter 300 [k:0-->loss: 8.65317e-09 iter 350 [k:0-->loss: 9.64792e-09 iter 400 [k:0-->loss: 1.04079e-08 iter 450 [k:0-->loss: 1.10033e-08 iter 500 [k:0-->loss: 1.14801e-08 iter 550 [k:0-->loss: 1.18691e-08 iter 600 [k:0-->loss: 1.21916e-08 iter 650 [k:0-->loss: 1.24625e-08 iter 700 [k:0-->loss: 1.26927e-08 iter 750 [k:0-->loss: 1.28905e-08 iter 800 [k:0-->loss: 1.30618e-08 iter 850 [k:0-->loss: 1.32113e-08 iter 900 [k:0-->loss: 1.33428e-08 iter 950 [k:0-->loss: 1.34591e-08 train_weights: 0.274916 -0.610794 0.522179 -1.56288 2.80653 1.07326 0.0791943 -2.67451 0.0604637 0.235923 0.330263 2.11216 -0.23768 -0.792924 0.100121 0.559474 train_bias: -0.324225 -0.0721563 -0.73569 0.285205 train_weights: -0.970355 -4.65688 2.03796 1.11265 train_bias: 2.49186 trained... predict:0.251846 predict:0.496524 predict:0.737199 predict:0.987601 ```
                  <ruby id="bdb3f"></ruby>

                  <p id="bdb3f"><cite id="bdb3f"></cite></p>

                    <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
                      <p id="bdb3f"><cite id="bdb3f"></cite></p>

                        <pre id="bdb3f"></pre>
                        <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

                        <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
                        <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

                        <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                              <ruby id="bdb3f"></ruby>

                              哎呀哎呀视频在线观看