Apparently, it is a good decision to have a structure or a function to encapsulate feeding the signal from a layer to the next together with calculating action-potentials and activation levels for each neuron in the next layer.
#include <vector>
#include <numeric>
/*
* NOTE:
*
* std::inner_product could be implemented as follows
*
* Result result = 0;
* for(; begin1 < end1; ++begin1, ++begin2)
* result += (*begin1) * (*begin2);
* return result;
*/
// this function calculates the dot-product of two vectors
template < class InputIt1, class InputIt2, class Result = double >
inline Result dot(InputIt1 begin1, InputIt1 end1, InputIt2 begin2) {
return std::inner_product(
std::move(begin1), std::move(end1), std::move(begin2),
static_cast<Result>(0) );
}
void feed_forward(
const std::vector<int> & input,
const std::vector<double> & weights,
const std::vector<double> & theta,
std::vector<int> & output
) {
size_t passes = weights.size() / input.size();
std::vector <double> fields(passes , 0);
auto weights_begin = weights.begin();
size_t i;
for (i = 0; i < passes; ++i) {
fields[i] = dot(input.begin(), input.end(), weights_begin);
weights_begin += input.size();
}
for (i = 0; i < fields.size(); ++i)
output[i] = static_cast<int>(fields[i] >= theta[i]);
}
std::vector<int> xor_mfnn(const std::vector<int> & input) {
assert(input.size() == 2);
std::vector<double> weights_hidden = { 0.15, 0.15, 0.3, 0.3 };
std::vector<double> theta_hidden = { 0.2, 0.2 };
std::vector<int> hidden_output(2, 0);
std::vector<double> weights_output = { -0.3, 0.3 };
std::vector<double> theta_output = { 0.2 };
std::vector<int> network_output(1, 0);
// feed-forward from input layer to hidden layer
feed_forward(
input, weights_hidden, theta_hidden, hidden_output);
// feed-forward from hidden layer to output layer
feed_forward(
hidden_output, weights_output, theta_output, network_output);
return network_output;
}