```
// neural21ConApp1.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include <iostream>
float x[4][2] = { {-2,-1},{25,6},{17,4} ,{-15,-6} };
float all_y_trues[4] = { 1,0,0,1 };
//
double ww[3][2];
double b[4];
//
double sigmoid01(double x)
{
return (1.0 / 1.0 + exp(0 - x));
}
double deriv_sigmoid(double x)
{
//# Derivative of sigmoid : f'(x) = f(x) * (1 - f(x))
double fx = sigmoid01(x);
return fx * (1 - fx);
}
float learn_rate = 0.1;
int epochs = 1000;// # number of times to loop through the entire dataset
double feedforward(double x[])//(self, x) :
{
//# x is a numpy array with 2 elements.
double h1 = sigmoid01(ww[0][0] * x[0] + ww[0][1] * x[1] + b[0]);
double h2 = sigmoid01(ww[1][0] * x[0] + ww[1][1] * x[1] + b[1]);
double o1 = sigmoid01(ww[2][0] * h1 + ww[2][1] * h2 + b[2]);
return o1;
}
int main()
{
std::cout << "Hello World!\n";
double sum_h[2];
double h[2];
for (int i = 0; i < 3; ++i) {
for (int j = 0; j < 2; ++j) { ww[i][j] = rand(); }
}
double b[4];
for (int i = 0; i < 3; ++i) { b[i] = rand(); }
//
for (int epoch = 0; epoch < 100; ++epoch) {//for10epoch
for (int kk = 0; kk < 4; ++kk) {//for100kk
//
for (int i = 0; i < 2; ++i) {//for110
double sum_h1 = 0;
for (int j = 0; j < 2; ++j) {//for220j
sum_h[i] += ww[i][j] * x[kk][i];
}//for220j
sum_h[i] = sum_h[i] + b[i];
h[i] = sigmoid01(sum_h[i]);
}//for110
double sum_o1;
sum_o1 = ww[2][0] * h[0] + ww[2][1] * h[1] + b[3];
double o1;
o1 = sigmoid01(sum_o1);
double y_pred = o1;
double d_L_d_ypred = -2 * (all_y_trues[kk] - y_pred);
//
//Neuron o1
double d_ypred_d_w5 = h[0] * deriv_sigmoid(sum_o1);
double d_ypred_d_w6 = h[1] * deriv_sigmoid(sum_o1);
double d_ypred_d_b3 = deriv_sigmoid(sum_o1);
double d_ypred_d_h1 = ww[2][0] * deriv_sigmoid(sum_o1);
double d_ypred_d_h2 = ww[2][1] * deriv_sigmoid(sum_o1);
//# Neuron h1
double d_h1_d_w1 = x[kk][0] * deriv_sigmoid(sum_h[0]);
double d_h1_d_w2 = x[kk][1] * deriv_sigmoid(sum_h[0]);
double d_h1_d_b1 = deriv_sigmoid(sum_h[0]);
//# Neuron h2
double d_h2_d_w3 = x[kk][0] * deriv_sigmoid(sum_h[1]);
double d_h2_d_w4 = x[kk][1] * deriv_sigmoid(sum_h[1]);
double d_h2_d_b2 = deriv_sigmoid(sum_h[1]);
//# --- Update weights and biases 完成:反向傳播(參數調整)
//# Neuron h1
ww[0][0] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w1;
ww[0][1] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_w2;
b[0] -= learn_rate * d_L_d_ypred * d_ypred_d_h1 * d_h1_d_b1;
//# Neuron h2
ww[1][0] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w3;
ww[1][1] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_w4;
b[1] -= learn_rate * d_L_d_ypred * d_ypred_d_h2 * d_h2_d_b2;
//# Neuron o1
ww[2][0] -= learn_rate * d_L_d_ypred * d_ypred_d_w5;
ww[2][1] -= learn_rate * d_L_d_ypred * d_ypred_d_w6;
b[3] -= learn_rate * d_L_d_ypred * d_ypred_d_b3;
}//for100kk
double loss = 0;
if (0 == (epoch % 10)) {
//繪圖:y_preds = np.apply_along_axis(self.feedforward, 1, data)
}
} //for10epoch
}//
```
- BP神經網絡到c++實現等--機器學習“掐死教程”
- 訓練bp(神經)網絡學會“乘法”--用”蚊子“訓練高射炮
- Ann計算異或&前饋神經網絡20200302
- 神經網絡ANN的表示20200312
- 簡單神經網絡的后向傳播(Backpropagration, BP)算法
- 牛頓迭代法求局部最優(解)20200310
- ubuntu安裝numpy和pip3等
- 從零實現一個神經網絡-numpy篇01
- _美國普林斯頓大學VictorZhou神經網絡神文的改進和翻譯20200311
- c語言-普林斯頓victorZhou神經網絡實現210301
- bp網絡實現xor異或的C語言實現202102
- bp網絡實現xor異或-自動錄入輸入(寫死20210202
- Mnist在python3.6上跑tensorFlow2.0一步一坑20210210
- numpy手寫數字識別-直接用bp網絡識別210201