<ruby id="bdb3f"></ruby>

    <p id="bdb3f"><cite id="bdb3f"></cite></p>

      <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
        <p id="bdb3f"><cite id="bdb3f"></cite></p>

          <pre id="bdb3f"></pre>
          <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

          <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
          <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

          <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                <ruby id="bdb3f"></ruby>

                企業??AI智能體構建引擎,智能編排和調試,一鍵部署,支持知識庫和私有化部署方案 廣告
                ``` // c++神經網絡手擼20梯度下降22_230820a.cpp : 此文件包含 "main" 函數。程序執行將在此處開始并結束。 #include<iostream> #include<vector> #include<iomanip> //std::setprecision #include<sstream> //std::getline std::stof() using namespace std; // float Loss誤差損失之合001 = 0.0; class NN神經網絡NN { private: int inputNode輸入層之節點數s, hidden01Nodes, hidden22Nodes, outputNode輸出層結點數s; vector<vector<float>> 輸入層到第1隱藏層之權重矩陣, 隱藏層1到第二隱藏層2之權重矩陣1to2, 隱藏22到輸出層de權重矩陣; //這些變量為矩陣 vector<float> 隱藏層1偏置1, 隱藏層2偏置2, O輸出偏置o; vector<float>隱藏層1數據1, 隱藏層2數據2, 輸出數據output; void initLayer每一層的WeightsAndBiases(vector<vector<float>>& weights權重, vector<float>& biases偏置) { for (size_t i = 0; i < weights權重.size(); ++i) {//for110i for (size_t j = 0; j < weights權重[0].size(); ++j) { weights權重[i][j] = ((rand() % 2) - 1) / 1.0; } biases偏置[i] = ((rand() % 2) - 1) / 1.0; }//for110i }//void initLayerWeightsAndBiases( void initWeightsAndBiases初始化權重和偏置矩陣() { initLayer每一層的WeightsAndBiases(輸入層到第1隱藏層之權重矩陣, 隱藏層1偏置1); initLayer每一層的WeightsAndBiases( 隱藏層1到第二隱藏層2之權重矩陣1to2, 隱藏層2偏置2); initLayer每一層的WeightsAndBiases(隱藏22到輸出層de權重矩陣 , O輸出偏置o); } //激活函數-激活的過程 vector<float> activate(const vector<float>& inputs, const vector< vector<float>>& weights, const vector<float>& biases) { vector<float> layer_output(weights.size(), 0.0); for (size_t i = 0; i < weights.size(); i++) { for (size_t j = 0; j < inputs.size(); j++) { layer_output[i] += inputs[j] * weights[i][j]; }//for220j layer_output[i] += biases[i]; layer_output[i] = sigmoid(layer_output[i]); }//for110i return layer_output; }//vector<float> activate //subtract求差:兩個 向量的差 std::vector<float> subtract(const std::vector<float>& a, const std::vector<float>& b) { std::vector<float> result(a.size(), 0.0); for (size_t i = 0; i < a.size(); i++) { result[i] = a[i] - b[i]; } return result; }//vector<float>subtract //dotT點乘 std::vector<float> dotT(const std::vector<float>& a, const std::vector<std::vector<float>>& b) { std::vector<float> result(b[0].size(), 0.0); for (size_t i = 0; i < b[0].size(); i++) { for (size_t j = 0; j < a.size(); j++) { result[i] += a[j] * b[j][i]; } } return result; } //更新權重矩陣s(們), 和偏置(向量)S們 void updateWeights(const std::vector<float>& inputs, const std::vector<float>& errors, const std::vector<float>& outputs, std::vector<std::vector<float>>& weights, std::vector<float>& biases, float lr) { for (size_t i = 0; i < weights.size(); i++) { for (size_t j = 0; j < weights[0].size(); j++) { weights[i][j] += lr * errors[i] * sigmoid導函數prime(outputs[i]) * inputs[j]; } biases[i] += lr * errors[i] * sigmoid導函數prime(outputs[i]); } }//void updateWeights( public: NN神經網絡NN(int inputNode輸入層之節點數s, int hidden01Nodes, int hidden22Nodes, int outputNode輸出層結點數s) :inputNode輸入層之節點數s(inputNode輸入層之節點數s), hidden01Nodes(hidden01Nodes), hidden22Nodes(hidden22Nodes), outputNode輸出層結點數s(outputNode輸出層結點數s) { srand(time(NULL)); //初始換權重矩陣 輸入層到第1隱藏層之權重矩陣.resize(hidden01Nodes, vector<float>(inputNode輸入層之節點數s)); 隱藏層1到第二隱藏層2之權重矩陣1to2.resize(hidden22Nodes, vector<float>(hidden01Nodes)); 隱藏22到輸出層de權重矩陣.resize(outputNode輸出層結點數s, vector<float>(hidden22Nodes));// 隱藏層1偏置1.resize(hidden01Nodes); 隱藏層2偏置2.resize(hidden22Nodes); O輸出偏置o.resize(outputNode輸出層結點數s); initWeightsAndBiases初始化權重和偏置矩陣(); }//NN神經網絡NN(i //sigmoid激活函數及導數 float sigmoid(float x){ return 1.0 / (1.0 + exp(-x)); } float sigmoid導函數prime(float x) { return x * (1 - x); } //Forward前向傳播 vector<float> predict(const vector<float>& input輸入數據) { //用激活函數-激活的過程 隱藏層1數據1 = activate(input輸入數據, 輸入層到第1隱藏層之權重矩陣, 隱藏層1偏置1); //激活函數 // 第一隱藏層到第二隱藏層 隱藏層2數據2 = activate(隱藏層1數據1, 隱藏層1到第二隱藏層2之權重矩陣1to2, 隱藏層2偏置2);//hidden1, wh1h2, bias_h2); // 第二隱藏層到輸出層 輸出數據output = activate(隱藏層2數據2, 隱藏22到輸出層de權重矩陣, O輸出偏置o);// , wh2o, bias_o); return 輸出數據output; }//vector<float>predict( // 反向傳播//Backpropagation void train(const vector<float>& inputs, const vector<float>& target目標數據s, float lr學習率) { vector<float> output嘗試的輸出數據s = predict(inputs); // 輸出層誤差 vector<float> output_error輸出誤差s = subtract(target目標數據s, output嘗試的輸出數據s);// Loss誤差損失之合001 = 0.0; for (int ii = 0; ii < outputNode輸出層結點數s; ++ii) { Loss誤差損失之合001 += fabs(output_error輸出誤差s[ii]); } //========================================================================= // 隱藏層2誤差 vector<float> hidden2_errors = dotT(output_error輸出誤差s, 隱藏22到輸出層de權重矩陣); // 隱藏層1誤差 vector<float> hidden1_errors = dotT(hidden2_errors, 隱藏層1到第二隱藏層2之權重矩陣1to2); // 更新權重: 隱藏層2到輸出層(的權重矩陣 updateWeights(隱藏層2數據2, output_error輸出誤差s, output嘗試的輸出數據s, 隱藏22到輸出層de權重矩陣, O輸出偏置o, lr學習率); // 更新權重: 隱藏層1到隱藏層2 updateWeights(隱藏層1數據1, hidden2_errors, 隱藏層2數據2, 隱藏層1到第二隱藏層2之權重矩陣1to2, 隱藏層2偏置2, lr學習率); // 更新權重: 輸入層到隱藏層1的權重矩陣) updateWeights(inputs, hidden1_errors, 隱藏層1數據1, 輸入層到第1隱藏層之權重矩陣, 隱藏層1偏置1, lr學習率); }// void train( // // 反向傳播//Backpropagation };//class NN神經網絡NN { #define Num訓練數據的個數s 5 int main() { NN神經網絡NN nn(9, 6, 4, 2);// 11, 10, 4); // Example int 訓練數據的個數s = Num訓練數據的個數s; std::vector<float> input[Num訓練數據的個數s]; /* input[0] = {0,1,0, 0,1,0, 0,1,0}; //1“豎線”或 “1”字{ 1.0, 0.5, 0.25, 0.125 }; input[1] = { 0,0,0, 1,1,1,0,0,0 }; //-“橫線”或 “-”減號{ 1.0, 0.5, 0.25, 0.125 }; input[2] = { 0,1,0, 1,1,1, 0,1,0 }; //+“+”加號{ 1.0, 0.5, 0.25, 0.125 }; input[3] = { 0,1,0, 0,1.2, 0, 0,1, 0 }; // '1'或 '|'字型{ 1.0, 0.5, 0.25, 0.125 }; input[4] = { 1,1,0, 1,0,1.2, 1,1,1 }; //“口”字型+{ 1.0, 0.5, 0.25, 0.125 }; std::vector<float> target[Num訓練數據的個數s]; target[0] = { 1.0, 0,0,0 };// , 0};//1 , 0}; //0.0, 1.0, 0.5}; //{ 0.0, 1.0 }; target[1] = { 0, 1.0 ,0,0 };// , 0};//- 91.0, 0};// , 0, 0}; // target[2] = { 0,0,1.0,0 };// , 0};//+ 1.0, 0.5}; target[3] = { 1.0 ,0,0, 0.5 };// , 0}; //1 target[4] = { 0,0,0,0 };// , 1.0}; //“口” */ vector<float> target[Num訓練數據的個數s]; input[0] = { 0,0,0, 1,1,1, 0,0,0 }; target[0] = { 0, 1 }; //"-" input[1] = { 0,1,0, 0,1,0, 0,1,0 }; target[1] = { 1.0, 0 }; input[2] = { 1,1,1, 0,0,0, 0,0,0 }; target[2] = { 0, 0.5 }; input[3] = { 0,1,0, 0,1,0, 0,1,0 }; target[3] = { 0.9, 0 }; for (int i = 0; i < 90000; ++i) {//for110i for (int jj = 0; jj < Num訓練數據的個數s - 1; ++jj) { //for (auto& val: input ) { nn.train(input[jj], target[jj], 0.001); if (0 ==i % 1000) { cout << "[Lost:" << Loss誤差損失之合001 << endl; } }//for220jj }//for110i cout << endl; //-------------------------------------- input[1] = { 0,0,0, 1,1, 0.98, 0,0,0 }; //1/ std::vector<float> outpu輸出數據001t = nn.predict(input[0]); for (auto& val : outpu輸出數據001t) std::cout << std::fixed << std::setprecision(9) << val << " "; cout << endl; //------------------------------------------------------------- std::string str0001; do { std::cout << std::endl << "請輸入一個字符串(要求字符串是包含9個由逗號分隔的數字的字符串,如 1,2,0,0,5,0,0,8,9等): " << std::endl; std::getline(std::cin, str0001); std::stringstream s01s001(str0001); for (int i = 0; i < 9; ++i) { std::string temp; std::getline(s01s001, temp, ','); input[1][i] = (float)std::stof(temp); // 將字符串轉化為整數 } std::cout << "數字數組為: "; for (int i = 0; i < 9; ++i) { std::cout << input[1][i] << " "; } outpu輸出數據001t = nn.predict(input[1]); std::cout << std::endl; for (auto& val : outpu輸出數據001t) std::cout << std::fixed << std::setprecision(9) << val << " "; } while (true);// 1 == 1); //====================================== cout << "Hello World!\n"; }// ```
                  <ruby id="bdb3f"></ruby>

                  <p id="bdb3f"><cite id="bdb3f"></cite></p>

                    <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
                      <p id="bdb3f"><cite id="bdb3f"></cite></p>

                        <pre id="bdb3f"></pre>
                        <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

                        <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
                        <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

                        <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                              <ruby id="bdb3f"></ruby>

                              哎呀哎呀视频在线观看