<ruby id="bdb3f"></ruby>

    <p id="bdb3f"><cite id="bdb3f"></cite></p>

      <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
        <p id="bdb3f"><cite id="bdb3f"></cite></p>

          <pre id="bdb3f"></pre>
          <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

          <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
          <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

          <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                <ruby id="bdb3f"></ruby>

                合規國際互聯網加速 OSASE為企業客戶提供高速穩定SD-WAN國際加速解決方案。 廣告
                c++寫完整人工神經網絡,要求輸入層有4個Nodes,一個隱藏層有3個Nodes,輸出層有2個Nodes,要求必須含有反向傳播、梯度下降法更新權重和偏置等。 創建一個簡單的神經網絡如您所描述的,我們需要定義以下幾個部分: 1. 神經網絡的結構 2. 前向傳播(Forward Propagation) 3. 反向傳播(Back Propagation) 4. 更新權重和偏置(梯度下降法) 下面是一個基本的實現: ``` // c++人工神經網絡反向傳播梯度下降更新權重偏置230810a18.cpp : 此文件包含 "main" 函數。程序執行將在此處開始并結束。 #include <iostream> #include <vector> #include <cmath> #include <ctime> #include <cstdlib> int Ninpu9t = 9; int Nhidde12n = 12;// 4;// 11; int nOutpu2t = 5;// 2;// 3; // 使用sigmoid作為激活函數 double sigmoid(double x) { return 1.0 / (1.0 + std::exp(-x)); } double sigmoid_derivative(double x) { double s = sigmoid(x); return s * (1 - s); } class NeuralNetwork { private: std::vector<std::vector<double>> weights1, weights2; // weights std::vector<double> bias1, bias2; // biases double learning_rate; public: NeuralNetwork() : learning_rate(0.1) { //01) { srand(time(nullptr)); // 初始化權重和偏置 weights1.resize(Ninpu9t, std::vector<double>(Nhidde12n)); weights2.resize(Nhidde12n, std::vector<double>(nOutpu2t)); bias1.resize(Nhidde12n); bias2.resize(nOutpu2t); for (int i = 0; i < Ninpu9t; ++i) for (int j = 0; j < Nhidde12n; ++j) weights1[i][j] = (rand() % 2000 - 1000) / 1000.0; // [-1, 1] for (int i = 0; i < Nhidde12n; ++i) {//for1100i bias1[i] = (rand() % 2000 - 1000) / 1000.0; // [-1, 1] for (int j = 0; j < nOutpu2t; ++j) weights2[i][j] = (rand() % 2000 - 1000) / 1000.0; // [-1, 1] }//for1100i for (int i = 0; i < nOutpu2t; ++i) bias2[i] = (rand() % 2000 - 1000) / 1000.0; // [-1, 1] } std::vector<double> forward(const std::vector<double>& input) { std::vector<double> hidden(Nhidde12n); std::vector<double> output(nOutpu2t); for (int i = 0; i < Nhidde12n; ++i) {//for110i hidden[i] = 0; for (int j = 0; j < Ninpu9t; ++j) { hidden[i] += input[j] * weights1[j][i]; } hidden[i] += bias1[i]; hidden[i] = sigmoid(hidden[i]); }//for110i for (int i = 0; i < nOutpu2t; ++i) {//for220i output[i] = 0; for (int j = 0; j < Nhidde12n; ++j) { output[i] += hidden[j] * weights2[j][i]; } output[i] += bias2[i]; output[i] = sigmoid(output[i]); }//for220i return output; } void train(const std::vector<double>& input, const std::vector<double>& target) { // Forward std::vector<double> hidden(Nhidde12n); std::vector<double> output(nOutpu2t); std::vector<double> hidden_sum(Nhidde12n, 0); std::vector<double> output_sum(nOutpu2t, 0); for (int i = 0; i < Nhidde12n; ++i) { for (int j = 0; j < Ninpu9t; ++j) { hidden_sum[i] += input[j] * weights1[j][i]; } hidden_sum[i] += bias1[i]; hidden[i] = sigmoid(hidden_sum[i]); }//for110i for (int i = 0; i < nOutpu2t; ++i) {//for220i for (int j = 0; j < Nhidde12n; ++j) output_sum[i] += hidden[j] * weights2[j][i]; output_sum[i] += bias2[i]; output[i] = sigmoid(output_sum[i]); }//for220i // Backpropagation std::vector<double> output_errors(nOutpu2t); for (int i = 0; i < nOutpu2t; ++i) output_errors[i] = target[i] - output[i]; std::vector<double> d_output(nOutpu2t); for (int i = 0; i < nOutpu2t; ++i) d_output[i] = output_errors[i] * sigmoid_derivative(output_sum[i]); std::vector<double> hidden_errors(Nhidde12n, 0); for (int i = 0; i < Nhidde12n; ++i) {//for440i for (int j = 0; j < nOutpu2t; ++j) hidden_errors[i] += weights2[i][j] * d_output[j]; }//for440i std::vector<double> d_hidden(Nhidde12n); for (int i = 0; i < Nhidde12n; ++i) d_hidden[i] = hidden_errors[i] * sigmoid_derivative(hidden_sum[i]); // Update weights and biases for (int i = 0; i < Nhidde12n; ++i) {//for66i for (int j = 0; j < nOutpu2t; ++j) weights2[i][j] += learning_rate * d_output[j] * hidden[i]; }//for66i for (int i = 0; i < nOutpu2t; ++i) bias2[i] += learning_rate * d_output[i]; for (int i = 0; i < Ninpu9t; ++i) {//for990i for (int j = 0; j < Nhidde12n; ++j) weights1[i][j] += learning_rate * d_hidden[j] * input[i]; }//for990i for (int i = 0; i < Nhidde12n; ++i) bias1[i] += learning_rate * d_hidden[i]; }//void train(const std::vector<double>& input, const std::vector<double>& target }; //class NeuralNetwork { int main() { NeuralNetwork nn; // Example std::vector<double> input[5]; input[0] = { 0,1,0, 0,1,0, 0,1,0 }; //1{ 1.0, 0.5, 0.25, 0.125 }; input[1] = { 0,0,0, 1,1,1,0,0,0 }; //-{ 1.0, 0.5, 0.25, 0.125 }; input[2] = { 0,1,0, 1,1,1, 0,1,0 }; //+{ 1.0, 0.5, 0.25, 0.125 }; input[3] = { 0,1,0, 0,2, 0, 0, 3, 0.12 }; // 'O'或 '[]'型號{ 1.0, 0.5, 0.25, 0.125 }; input[4] = { 1,1,0, 9,0,9.8, 1,1,1 }; //+{ 1.0, 0.5, 0.25, 0.125 }; std::vector<double> target[5]; target[0] = { 1.0, 0,0,0,0 };// , 0}; //0.0, 1.0, 0.5}; //{ 0.0, 1.0 }; target[1] = { 0,1.0 ,0,0,0};// 91.0, 0};// , 0, 0}; // target[2] = { 0,0,1,0,0 };// 1.0, 0.5}; target[3] = { 1,0,0,1,0}; target[4] = { 0,0,0,0,5 }; // Training for (int i = 0; i < 10000/*00 */; ++i) {//for220i for (int jj = 0; jj < 4; ++jj) { nn.train(input[jj], target[jj]); } }//for220i // Test input[0] = { 0,1,0, 0,1,0, 0,1,0 }; //1{ 1.0, 0.5, 0.25, 0.125 }; std::vector<double> output = nn.forward(input[0]); for (auto& val : output) std::cout << val << " "; std::cout << std::endl; input[1] = { 0,0,0, 1,1,1, 0,0,0 }; //std::vector<double> output = nn.forward(input[1]); for (auto& val : output) std::cout << val << " "; return 0; }//main ``` 運行結果: 0.978955 0.00350907 0.0190363 0.0246372 0.00523369 0.00402561 0.981827 0.0193466 6.64613e-05 0.00696526 F:\嘗試加多輸入c++神經網絡反向傳播梯度下降更新權重偏置230810a18\x64\Debug\c++人工神經網絡反向傳播梯度下降更新權重偏置230810a18.exe
                  <ruby id="bdb3f"></ruby>

                  <p id="bdb3f"><cite id="bdb3f"></cite></p>

                    <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
                      <p id="bdb3f"><cite id="bdb3f"></cite></p>

                        <pre id="bdb3f"></pre>
                        <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

                        <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
                        <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

                        <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                              <ruby id="bdb3f"></ruby>

                              哎呀哎呀视频在线观看