Commit c32b1643 by Nazrul_being

Added lab 3

parent 41f3c9b1
......@@ -41,6 +41,15 @@ public:
void vectorReLU(std::vector<double>& inputVector, std::vector<double>& outputVector);
void vectorSigmoid(std::vector<double>& inputVector, std::vector<double>& outputVector);
// Print a 2D matrix
void printMatrix(int rows, int cols, const std::vector<std::vector<double>>& matrix);
// Compute cost for logistic regression
double computeCost(int m, const std::vector<std::vector<double>>& yhat, const std::vector<std::vector<double>>& y);
// Normalize a 2D matrix
int normalizeData2D(const std::vector<std::vector<double>>& inputMatrix, std::vector<std::vector<double>>& outputMatrix);
// Save network
void saveNetwork(const std::string& filename, int numOfFeatures, int numOfHiddenNodes, int numOfOutputNodes,
std::vector<std::vector<double>>& inputToHiddenWeights, std::vector<double>& hiddenLayerBias,
......
......@@ -2,8 +2,10 @@
#include <cmath>
#include <vector>
#include <fstream>
#include <limits>
#include "../includes/NeuralNetwork.h"
double NeuralNetwork::singleNeuron(double input, double weight)
{
return 0;
......@@ -68,10 +70,51 @@ void NeuralNetwork::vectorSigmoid(std::vector<double>& inputVector, std::vector<
return;
}
void NeuralNetwork::printMatrix(int rows, int cols, const std::vector<std::vector<double>>& matrix)
{
return;
}
double NeuralNetwork::computeCost(int m, const std::vector<std::vector<double>>& yhat, const std::vector<std::vector<double>>& y)
{
return 0;
}
int NeuralNetwork::normalizeData2D(const std::vector<std::vector<double>>& inputMatrix, std::vector<std::vector<double>>& outputMatrix)
{
int rows = inputMatrix.size();
int cols = inputMatrix[0].size();
if (rows <= 1) {
std::cerr << "ERROR: At least 2 examples are required. Current dataset length is " << rows << std::endl;
return 1;
} else {
for (int j = 0; j < cols; j++) {
double max = -9999999;
double min = 9999999;
// Find MIN and MAX values in the given column
for (int i = 0; i < rows; i++) {
if (inputMatrix[i][j] > max) {
max = inputMatrix[i][j];
}
if (inputMatrix[i][j] < min) {
min = inputMatrix[i][j];
}
}
// Normalization
for (int i = 0; i < rows; i++) {
outputMatrix[i][j] = (inputMatrix[i][j] - min) / (max - min);
}
}
}
return 0;
}
void NeuralNetwork::saveNetwork(const std::string& filename, int numOfFeatures, int numOfHiddenNodes, int numOfOutputNodes,
std::vector<std::vector<double>>& inputToHiddenWeights, std::vector<double>& hiddenLayerBias,
std::vector<std::vector<double>>& hiddenToOutputWeights, std::vector<double>& outputLayerBias)
{
std::vector<std::vector<double>>& hiddenToOutputWeights, std::vector<double>& outputLayerBias) {
std::ofstream file(filename);
if (!file.is_open()) {
......@@ -113,8 +156,18 @@ void NeuralNetwork::saveNetwork(const std::string& filename, int numOfFeatures,
void NeuralNetwork::loadNetwork(const std::string& filename, int numOfFeatures, int numOfHiddenNodes, int numOfOutputNodes,
std::vector<std::vector<double>>& inputToHiddenWeights, std::vector<double>& hiddenLayerBias,
std::vector<std::vector<double>>& hiddenToOutputWeights, std::vector<double>& outputLayerBias)
{
std::vector<std::vector<double>>& hiddenToOutputWeights, std::vector<double>& outputLayerBias) {
// Clear vectors and resize to the correct dimensions
inputToHiddenWeights.clear();
hiddenLayerBias.clear();
hiddenToOutputWeights.clear();
outputLayerBias.clear();
inputToHiddenWeights.resize(numOfHiddenNodes, std::vector<double>(numOfFeatures));
hiddenLayerBias.resize(numOfHiddenNodes);
hiddenToOutputWeights.resize(numOfOutputNodes, std::vector<double>(numOfHiddenNodes));
outputLayerBias.resize(numOfOutputNodes);
std::ifstream file(filename);
if (!file.is_open()) {
......@@ -123,30 +176,58 @@ void NeuralNetwork::loadNetwork(const std::string& filename, int numOfFeatures,
}
std::string temp;
file >> temp >> temp; // Skip "Hidden Layer Weights:"
// Read "Hidden Layer Weights:" line and skip to the next line
std::getline(file, temp);
for (int i = 0; i < numOfHiddenNodes; i++) {
for (int j = 0; j < numOfFeatures; j++) {
file >> inputToHiddenWeights[i][j];
if (!(file >> inputToHiddenWeights[i][j])) {
std::cerr << "Error reading input-to-hidden weight at (" << i << ", " << j << ")\n";
return;
}
}
}
// Flush remaining newline characters
file.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
file >> temp >> temp; // Skip "Hidden Layer Biases:"
// Read "Hidden Layer Biases:" line and skip to the next line
std::getline(file, temp);
for (int i = 0; i < numOfHiddenNodes; i++) {
file >> hiddenLayerBias[i];
if (!(file >> hiddenLayerBias[i])) {
std::cerr << "Error reading hidden layer bias at index " << i << "\n";
return;
}
}
// Flush remaining newline characters
file.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
file >> temp >> temp; // Skip "Output Layer Weights:"
// Read "Output Layer Weights:" line and skip to the next line
std::getline(file, temp);
for (int i = 0; i < numOfOutputNodes; i++) {
for (int j = 0; j < numOfHiddenNodes; j++) {
file >> hiddenToOutputWeights[i][j];
if (!(file >> hiddenToOutputWeights[i][j])) {
std::cerr << "Error reading hidden-to-output weight at (" << i << ", " << j << ")\n";
return;
}
}
}
// Flush remaining newline characters
file.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
file >> temp >> temp; // Skip "Output Layer Biases:"
// Read "Output Layer Biases:" line and skip to the next line
std::getline(file, temp);
for (int i = 0; i < numOfOutputNodes; i++) {
file >> outputLayerBias[i];
if (!(file >> outputLayerBias[i])) {
std::cerr << "Error reading output layer bias at index " << i << "\n";
return;
}
}
if (file.fail()) {
std::cerr << "File stream encountered an error.\n";
return;
}
file.close();
std::cout << "Network loaded from file: " << filename << "\n";
}
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment