<ruby id="bdb3f"></ruby>

    <p id="bdb3f"><cite id="bdb3f"></cite></p>

      <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
        <p id="bdb3f"><cite id="bdb3f"></cite></p>

          <pre id="bdb3f"></pre>
          <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

          <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
          <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

          <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                <ruby id="bdb3f"></ruby>

                ??碼云GVP開源項目 12k star Uniapp+ElementUI 功能強大 支持多語言、二開方便! 廣告
                這次先上 運行“結果”: ``` 訓練次數 誤差 0 16.519723 500 0.135468 1000 0.105337 1500 0.082751 2000 0.076986 2500 0.073428 3000 0.070383 3500 0.067702 4000 0.065119 4500 0.062573 5000 0.060117 5500 0.057829 6000 0.055517 6500 0.053316 7000 0.051277 6*3=:17.721119 檢驗 訓練“BP網絡”訓練的 成果: 3.200000*6=:19.920839 4.300000*8=:35.636405 6.000000*8=:49.607321 2.100000*7=:15.587592 4.300000*8=:35.636405 ``` 結果說明: “BP神經網絡”學習乘法……成果是, TA學的是那個意思(精度一般般)…… 但就像小動物,能猜個大概其……也是一種進步。 二、 再上 C的 程序代碼 和 注釋: ``` // 在微軟VS-C++2017調試運行通過-ConA1neuron20200310a.cpp : 此文件包含 "main" 函數。程序執行將在此處開始并結束。 // #define _CRT_SECURE_NO_WARNINGS #include "pch.h" #include <iostream> #include <stdio.h> #include <time.h> #include <math.h> #include <stdlib.h> #define Error001Precision 0.05 //誤差允許精度 #define Display01Frequency 500 //顯示訓練次數的頻次//頻率 #define Data 820 //Data 用來表示已經知道的數據樣本的數量,也就是訓練樣本的數量 #define In 2 //In 表示對于每個樣本有多少個輸入變量 #define Out 1 #define Neuron 45 //Neuron 表示神經元的數量 #define TrainC 20000 //TrainC 來表示訓練的次數 #define A 0.2 //在具體實現對誤差修改中,我們再加上學習率,并且對先前學習到的修正誤差量進行繼承,直白的說就是都乘上一個0到1之間的數,具體的見如下實現參考代碼 #define B 0.4 #define a 0.2 #define b 0.3 double d_in[Data][In]; // ,d_in[Data][In] 存儲 Data 個樣本,每個樣本的 In 個輸入 double d_out[Data][Out]; //d_out[Data][Out] 存儲 Data 個樣本,每個樣本的 Out 個輸出。 double w[Neuron][In];//w[Neuron][In] 表示某個輸入對某個神經元的權重 double o[Neuron]; //數組 o[Neuron] 記錄的是神經元通過激活函數對外的輸出, , double v[Out][Neuron];//v[Out][Neuron] 來表示某個神經元對某個輸出的權重; double Maxin[In], Minin[In], Maxout[Out], Minout[Out]; double OutputData[Out];//OutputData[Out] 存儲BP神經網絡的輸出。 double dv[Out][Neuron], dw[Neuron][In]; //與之對應的保存它們兩個修正量的數組 dw[Neuron][In] 和 dv[Out][Neuron] double e01ErrorPrecision; //誤差 void writeTest() { FILE *fp1, *fp2; double r1, r2; int i; srand((unsigned)time(NULL)); if ((fp1 = fopen("D:\\in.txt", "w")) == NULL) { printf("can not open the in file\n"); exit(0); } if ((fp2 = fopen("D:\\out.txt", "w")) == NULL) { printf("can not open the out file\n"); exit(0); } for (i = 0; i < Data; i++) { r1 = rand() % 1000 / 100.0; r2 = rand() % 1000 / 100.0; fprintf(fp1, "%lf %lf\n", r1, r2); fprintf(fp2, "%lf \n", r1*r2); //訓練 bp網絡學習乘法! r1 + r2); } fclose(fp1); fclose(fp2); } void readData() { FILE *fp1, *fp2; int i, j; if ((fp1 = fopen("D:\\in.txt", "r")) == NULL) { printf("can not open the in file\n"); exit(0); } for (i = 0; i < Data; i++) for (j = 0; j < In; j++) fscanf(fp1, "%lf", &d_in[i][j]); fclose(fp1); if ((fp2 = fopen("D:\\out.txt", "r")) == NULL) { printf("can not open the out file\n"); exit(0); } for (i = 0; i < Data; i++) for (j = 0; j < Out; j++) fscanf(fp1, "%lf", &d_out[i][j]); fclose(fp2); } void initBPNework() { int i, j; //第01步,先:找到數據最小、最大值: for (i = 0; i < In; i++) { Minin[i] = Maxin[i] = d_in[0][i]; for (j = 0; j < Data; j++) { Maxin[i] = Maxin[i] > d_in[j][i] ? Maxin[i] : d_in[j][i]; Minin[i] = Minin[i] < d_in[j][i] ? Minin[i] : d_in[j][i]; } } //找輸出數據的最小、最大值: for (i = 0; i < Out; i++) { Minout[i] = Maxout[i] = d_out[0][i]; for (j = 0; j < Data; j++) { Maxout[i] = Maxout[i] > d_out[j][i] ? Maxout[i] : d_out[j][i]; Minout[i] = Minout[i] < d_out[j][i] ? Minout[i] : d_out[j][i]; } } //第02:歸一化處理 for (i = 0; i < In; i++) for (j = 0; j < Data; j++) d_in[j][i] = (d_in[j][i] - Minin[i] + 1) / (Maxin[i] - Minin[i] + 1); for (i = 0; i < Out; i++) for (j = 0; j < Data; j++) d_out[j][i] = (d_out[j][i] - Minout[i] + 1) / (Maxout[i] - Minout[i] + 1); //第03:初始化神經元 for (i = 0; i < Neuron; ++i) for (j = 0; j < In; ++j) { w[i][j] = rand()*2.0 / RAND_MAX - 1; dw[i][j] = 0; } for (i = 0; i < Neuron; ++i) for (j = 0; j < Out; ++j) { v[j][i] = rand()*2.0 / RAND_MAX - 1; dv[j][i] = 0; } } //BP神經網絡 //函數 computO(i) 負責的是通過BP神經網絡的機制對樣本 i 的輸入,預測其輸出。回想BP神經網絡的基本模型(詳情見 基本模型)對應的公式(1)還有 激活函數對應的公式(2): void computO(int var) { int i, j; double sum, y; for (i = 0; i < Neuron; ++i) { sum = 0; for (j = 0; j < In; ++j) sum += w[i][j] * d_in[var][j]; o[i] = 1 / (1 + exp(-1 * sum)); } for (i = 0; i < Out; ++i) { sum = 0; for (j = 0; j < Neuron; ++j) sum += v[i][j] * o[j]; OutputData[i] = sum; } } //函數 backUpdate(i) 負責的是將預測輸出的結果與樣本真實的結果進行比對,然后對神經網絡中涉及到的權重進行修正,也這是BP神經網絡實現的關鍵所在。如何求到對于 w[Neuron][In] 和 v[Out][Neuron] 進行修正的誤差量便是關鍵所在!誤差修正量的求法在基本模型一文中數學分析部分有解答,具體問題具體分析,落實到我們設計的這個BP神經網絡上來說,需要得到的是對w[Neuron][In] 和 v[Out][Neuron] 兩個數據進行修正誤差,誤差量用數據結構 dw[Neuron][In] 和 dv[Out][Neuron] 來進行存儲 void backUpdate(int var) { int i, j; double t; for (i = 0; i < Neuron; ++i) { t = 0; for (j = 0; j < Out; ++j) { t += (OutputData[j] - d_out[var][j])*v[j][i]; dv[j][i] = A * dv[j][i] + B * (OutputData[j] - d_out[var][j])*o[i]; v[j][i] -= dv[j][i]; } for (j = 0; j < In; ++j) { dw[i][j] = a * dw[i][j] + b * t*o[i] * (1 - o[i])*d_in[var][j]; w[i][j] -= dw[i][j]; } } } double result(double var1, double var2) { int i, j; double sum, y; var1 = (var1 - Minin[0] + 1) / (Maxin[0] - Minin[0] + 1); var2 = (var2 - Minin[1] + 1) / (Maxin[1] - Minin[1] + 1); for (i = 0; i < Neuron; ++i) { sum = 0; sum = w[i][0] * var1 + w[i][1] * var2; o[i] = 1 / (1 + exp(-1 * sum)); } sum = 0; for (j = 0; j < Neuron; ++j) sum += v[0][j] * o[j]; return sum * (Maxout[0] - Minout[0] + 1) + Minout[0] - 1; } void writeNeuron() { FILE *fp1; int i, j; if ((fp1 = fopen("D:\\neuron.txt", "w")) == NULL) { printf("can not open the neuron file\n"); exit(0); } for (i = 0; i < Neuron; ++i) for (j = 0; j < In; ++j) { fprintf(fp1, "%lf ", w[i][j]); } fprintf(fp1, "\n\n\n\n"); for (i = 0; i < Neuron; ++i) for (j = 0; j < Out; ++j) { fprintf(fp1, "%lf ", v[j][i]); } fclose(fp1); } void trainNetwork() //訓練 { int i, c01Count = 0;// c01Count訓練次數 計數 int j; do //do110 { e01ErrorPrecision = 0; for (i = 0; i < Data; ++i) { computO(i); for (j = 0; j < Out; ++j) e01ErrorPrecision += fabs((OutputData[j] - d_out[i][j]) / d_out[i][j]); backUpdate(i); } if (0 == c01Count % Display01Frequency) //1000) //20) //if220 { printf("%d %lf\n", c01Count, e01ErrorPrecision / Data); }//if220 c01Count++; } while (c01Count<TrainC && e01ErrorPrecision / Data> Error001Precision); // 0.05);// 1); // 0.01); //do110 }//訓練-End int main() { std::cout << "Hello World!\n"; writeTest(); readData(); initBPNework(); trainNetwork(); printf("%d*%d=:%lf \n",6,3, result(6, 3)); printf("%f*%d=:%lf \n",3.2, 6, result(3.2, 6)); printf("%f*%d=:%lf \n",4.3,8, result(4.3, 8)); printf("%f*%d=:%lf \n",6.0,8, result(6, 8)); printf("%f*%d=:%lf \n",2.1,7, result(2.1, 7)); printf("%f*%d=:%lf \n",4.3,8, result(4.3, 8)); writeNeuron(); return 0; }// // ``` 再運行一次, 訓練BP網絡學會“乘法”: ``` 訓練次數 誤差 8500 0.081898 9000 0.079318 9500 0.075935 10000 0.071483 10500 0.066695 11000 0.063684 11500 0.062921 12000 0.062295 12500 0.062092 13000 0.061443 13500 0.060544 14000 0.059567 14500 0.058434 15000 0.057286 15500 0.056034 16000 0.054670 16500 0.053297 17000 0.051820 17500 0.050263 檢驗“BP網絡”訓練的成果: 6*3=:16.459228 3.200000*6=:18.756765 4.300000*8=:33.943728 6.000000*8=:47.677582 2.100000*7=:14.412758 4.300000*8=:33.943728 ```
                  <ruby id="bdb3f"></ruby>

                  <p id="bdb3f"><cite id="bdb3f"></cite></p>

                    <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
                      <p id="bdb3f"><cite id="bdb3f"></cite></p>

                        <pre id="bdb3f"></pre>
                        <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

                        <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
                        <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

                        <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                              <ruby id="bdb3f"></ruby>

                              哎呀哎呀视频在线观看