From da1113ff50127a71ee166995335911ec65f24fcd Mon Sep 17 00:00:00 2001 From: andreimesquita Date: Mon, 3 Aug 2020 16:41:31 -0300 Subject: [PATCH 01/20] Adding library and export option --- CMakeLists.txt | 3 +++ Export/Exporter.cpp | 20 ++++++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 Export/Exporter.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 5d8a7ee..d56ce77 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,3 +6,6 @@ add_subdirectory(AdversarialSearch/Minimax) add_subdirectory(NeuralNetwork/Common) add_subdirectory(NeuralNetwork/Perceptron) add_subdirectory(NeuralNetwork/MLP) +add_definitions(-D_EXPORT_DLL_) +set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/dll) +add_library(AIEngine SHARED Export/Exporter.cpp) diff --git a/Export/Exporter.cpp b/Export/Exporter.cpp new file mode 100644 index 0000000..514282f --- /dev/null +++ b/Export/Exporter.cpp @@ -0,0 +1,20 @@ +#ifndef AIENGINE_EXPORTER_C +#define AIENGINE_EXPORTER_C + +#ifdef _EXPORT_DLL_ +#define API __declspec(dllexport) +#else +#define API __declspec(dllimport) +#endif // _EXPORT_DLL_ + +#include "../AdversarialSearch/Minimax/BitMath.h" +#include "../AdversarialSearch/Minimax/TicTacToeMinimax.h" +#include "../NeuralNetwork/Common/Random.h" +#include "../NeuralNetwork/Common/Matrix.h" +#include "../NeuralNetwork/Common/ActivationFunctions.h" +#include "../NeuralNetwork/Perceptron/Neuron.h" +#include "../NeuralNetwork/Perceptron/Perceptron.h" +#include "../NeuralNetwork/MLP/Layer.h" +#include "../NeuralNetwork/MLP/MLP.h" + +#endif //AIENGINE_EXPORTER_C From 9548665d42812f7c0568706b5684c766d7a2f101 Mon Sep 17 00:00:00 2001 From: andreimesquita Date: Mon, 17 Aug 2020 14:24:17 -0300 Subject: [PATCH 02/20] Using pragma once in header files --- AdversarialSearch/Minimax/MinimaxBenchmarker.h | 4 +--- AdversarialSearch/Minimax/TicTacToeMinimax.h | 4 +--- Export/Exporter.cpp | 5 ----- NeuralNetwork/Common/ActivationFunctions.h | 6 ++---- NeuralNetwork/Common/Matrix.h | 5 ++--- NeuralNetwork/Common/Random.h | 5 ++--- NeuralNetwork/MLP/Layer.h | 5 +---- NeuralNetwork/MLP/MLP.h | 6 ++---- NeuralNetwork/Perceptron/Neuron.h | 5 +---- NeuralNetwork/Perceptron/Perceptron.h | 5 ++--- 10 files changed, 14 insertions(+), 36 deletions(-) diff --git a/AdversarialSearch/Minimax/MinimaxBenchmarker.h b/AdversarialSearch/Minimax/MinimaxBenchmarker.h index 2a816d6..2e8cda8 100644 --- a/AdversarialSearch/Minimax/MinimaxBenchmarker.h +++ b/AdversarialSearch/Minimax/MinimaxBenchmarker.h @@ -1,5 +1,4 @@ -#ifndef _MINIMAX_BENCHMARKER_H -#define _MINIMAX_BENCHMARKER_H +#pragma once #include #include @@ -21,4 +20,3 @@ class MinimaxBenchmarker void printBoard(unsigned int board); void printState(int state); }; -#endif diff --git a/AdversarialSearch/Minimax/TicTacToeMinimax.h b/AdversarialSearch/Minimax/TicTacToeMinimax.h index df387f4..11de0d4 100644 --- a/AdversarialSearch/Minimax/TicTacToeMinimax.h +++ b/AdversarialSearch/Minimax/TicTacToeMinimax.h @@ -1,5 +1,4 @@ -#ifndef _TIC_TAC_TOE_MINIMAX_H -#define _TIC_TAC_TOE_MINIMAX_H +#pragma once #include #include @@ -27,4 +26,3 @@ class TicTacToeMinimax unsigned int evaluate(unsigned int board, bool isMaxTurn); std::vector evaluateAll(unsigned int board, bool isMaxTurn); }; -#endif diff --git a/Export/Exporter.cpp b/Export/Exporter.cpp index 514282f..85104d4 100644 --- a/Export/Exporter.cpp +++ b/Export/Exporter.cpp @@ -1,6 +1,3 @@ -#ifndef AIENGINE_EXPORTER_C -#define AIENGINE_EXPORTER_C - #ifdef _EXPORT_DLL_ #define API __declspec(dllexport) #else @@ -16,5 +13,3 @@ #include "../NeuralNetwork/Perceptron/Perceptron.h" #include "../NeuralNetwork/MLP/Layer.h" #include "../NeuralNetwork/MLP/MLP.h" - -#endif //AIENGINE_EXPORTER_C diff --git a/NeuralNetwork/Common/ActivationFunctions.h b/NeuralNetwork/Common/ActivationFunctions.h index 689484c..4a1ee77 100644 --- a/NeuralNetwork/Common/ActivationFunctions.h +++ b/NeuralNetwork/Common/ActivationFunctions.h @@ -1,5 +1,5 @@ -#ifndef AIENGINE_ACTIVATIONFUNCTIONS_H -#define AIENGINE_ACTIVATIONFUNCTIONS_H +#pragma once + #include float sigmoid(float value); @@ -7,5 +7,3 @@ float sign(float value); float zeroOrOne(float value); float noActivation(float value); void setZeroOrOneThreshold(float threshold); - -#endif //AIENGINE_ACTIVATIONFUNCTIONS_H diff --git a/NeuralNetwork/Common/Matrix.h b/NeuralNetwork/Common/Matrix.h index c9917e3..fd518b8 100644 --- a/NeuralNetwork/Common/Matrix.h +++ b/NeuralNetwork/Common/Matrix.h @@ -1,5 +1,5 @@ -#ifndef NEURALNETWORK_MATRIX_H -#define NEURALNETWORK_MATRIX_H +#pragma once + #include #include #include @@ -32,4 +32,3 @@ class Matrix static void multiply(Matrix& target, float scalar); static void add(Matrix& target, float value); }; -#endif //NEURALNETWORK_MATRIX_H diff --git a/NeuralNetwork/Common/Random.h b/NeuralNetwork/Common/Random.h index fc7a352..b267e0d 100644 --- a/NeuralNetwork/Common/Random.h +++ b/NeuralNetwork/Common/Random.h @@ -1,8 +1,7 @@ -#ifndef AIENGINE_RANDOM_H -#define AIENGINE_RANDOM_H +#pragma once + namespace random { void initRandomSeed(); float range(float a, float b); } -#endif //AIENGINE_RANDOM_H diff --git a/NeuralNetwork/MLP/Layer.h b/NeuralNetwork/MLP/Layer.h index a6875f2..fba14fa 100644 --- a/NeuralNetwork/MLP/Layer.h +++ b/NeuralNetwork/MLP/Layer.h @@ -1,5 +1,4 @@ -#ifndef AIENGINE_LAYER_H -#define AIENGINE_LAYER_H +#pragma once #include #include "../Perceptron/Perceptron.h" @@ -19,5 +18,3 @@ class Layer void feedforward(const Matrix& inputs); Matrix& getOutputs() const; }; - -#endif //AIENGINE_LAYER_H diff --git a/NeuralNetwork/MLP/MLP.h b/NeuralNetwork/MLP/MLP.h index 48d8701..f7b2831 100644 --- a/NeuralNetwork/MLP/MLP.h +++ b/NeuralNetwork/MLP/MLP.h @@ -1,5 +1,5 @@ -#ifndef AIENGINE_MLP_H -#define AIENGINE_MLP_H +#pragma once + #include "Layer.h" class MultiLayerPerceptron @@ -22,5 +22,3 @@ class MultiLayerPerceptron void recover(std::istream& stream); void setOutputActivationFunction(FloatFunction function); }; - -#endif //AIENGINE_MLP_H diff --git a/NeuralNetwork/Perceptron/Neuron.h b/NeuralNetwork/Perceptron/Neuron.h index c7afb1f..82f444f 100644 --- a/NeuralNetwork/Perceptron/Neuron.h +++ b/NeuralNetwork/Perceptron/Neuron.h @@ -1,5 +1,4 @@ -#ifndef NEURALNETWORK_NEURON_H -#define NEURALNETWORK_NEURON_H +#pragma once #include "../Common/Matrix.h" @@ -18,5 +17,3 @@ class Neuron float getBias() const; void setBias(float bias); }; - -#endif //NEURALNETWORK_NEURON_H diff --git a/NeuralNetwork/Perceptron/Perceptron.h b/NeuralNetwork/Perceptron/Perceptron.h index 40c84cc..7bc70f6 100644 --- a/NeuralNetwork/Perceptron/Perceptron.h +++ b/NeuralNetwork/Perceptron/Perceptron.h @@ -1,5 +1,5 @@ -#ifndef NEURALNETWORK_PERCEPTRON_H -#define NEURALNETWORK_PERCEPTRON_H +#pragma once + #include "Neuron.h" #include "../Common/ActivationFunctions.h" @@ -21,4 +21,3 @@ class Perceptron void train(const Matrix& inputs, const float output); Neuron& getNeuron(); }; -#endif //NEURALNETWORK_PERCEPTRON_H From 3a01849d5825ffbf1d14e892e8ed3cdee1db7329 Mon Sep 17 00:00:00 2001 From: andreimesquita Date: Sat, 28 Nov 2020 22:26:03 -0300 Subject: [PATCH 03/20] Implementing simple methods in the export API (working example) --- .gitignore | 1 + CMakeLists.txt | 4 +--- Export/CMakeLists.txt | 3 +++ Export/Exporter.cpp | 23 +++++++++++++---------- 4 files changed, 18 insertions(+), 13 deletions(-) create mode 100644 Export/CMakeLists.txt diff --git a/.gitignore b/.gitignore index f28f2b5..7f9110d 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,4 @@ build *.asta.lock *.jude *.idea +cmake-build-release \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt index d56ce77..78f8a82 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,6 +6,4 @@ add_subdirectory(AdversarialSearch/Minimax) add_subdirectory(NeuralNetwork/Common) add_subdirectory(NeuralNetwork/Perceptron) add_subdirectory(NeuralNetwork/MLP) -add_definitions(-D_EXPORT_DLL_) -set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/dll) -add_library(AIEngine SHARED Export/Exporter.cpp) +add_subdirectory(Export) diff --git a/Export/CMakeLists.txt b/Export/CMakeLists.txt new file mode 100644 index 0000000..d6bb5e3 --- /dev/null +++ b/Export/CMakeLists.txt @@ -0,0 +1,3 @@ +add_definitions(-D_EXPORT_DLL_) +set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/dll) +add_library(DLL SHARED Exporter.cpp) diff --git a/Export/Exporter.cpp b/Export/Exporter.cpp index 85104d4..32583f5 100644 --- a/Export/Exporter.cpp +++ b/Export/Exporter.cpp @@ -2,14 +2,17 @@ #define API __declspec(dllexport) #else #define API __declspec(dllimport) -#endif // _EXPORT_DLL_ +#endif -#include "../AdversarialSearch/Minimax/BitMath.h" -#include "../AdversarialSearch/Minimax/TicTacToeMinimax.h" -#include "../NeuralNetwork/Common/Random.h" -#include "../NeuralNetwork/Common/Matrix.h" -#include "../NeuralNetwork/Common/ActivationFunctions.h" -#include "../NeuralNetwork/Perceptron/Neuron.h" -#include "../NeuralNetwork/Perceptron/Perceptron.h" -#include "../NeuralNetwork/MLP/Layer.h" -#include "../NeuralNetwork/MLP/MLP.h" +extern "C" +{ +API int sum(int lhs, int rhs) +{ + return lhs + rhs; +} + +API int getOne() +{ + return 1; +} +} From 17eb885e90cad0b5292d1841d661162b1fb14a62 Mon Sep 17 00:00:00 2001 From: andreimesquita Date: Sat, 28 Nov 2020 22:34:44 -0300 Subject: [PATCH 04/20] Adding forgotten spacings --- Export/Exporter.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Export/Exporter.cpp b/Export/Exporter.cpp index 32583f5..29b492c 100644 --- a/Export/Exporter.cpp +++ b/Export/Exporter.cpp @@ -6,13 +6,13 @@ extern "C" { -API int sum(int lhs, int rhs) -{ - return lhs + rhs; -} + API int sum(int lhs, int rhs) + { + return lhs + rhs; + } -API int getOne() -{ - return 1; -} + API int getOne() + { + return 1; + } } From dea42149e267e54bb02ebc7d6e76029c62521632 Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Mon, 24 Jan 2022 15:41:53 -0300 Subject: [PATCH 05/20] Creating a MathLibrary to be used as an example when exporting shared libraries from this project --- Export/CMakeLists.txt | 10 ++++++++-- Export/Exporter.cpp | 18 ------------------ Export/MathLibrary.cpp | 16 ++++++++++++++++ Export/MathLibrary.h | 12 ++++++++++++ 4 files changed, 36 insertions(+), 20 deletions(-) delete mode 100644 Export/Exporter.cpp create mode 100644 Export/MathLibrary.cpp create mode 100644 Export/MathLibrary.h diff --git a/Export/CMakeLists.txt b/Export/CMakeLists.txt index d6bb5e3..f5561df 100644 --- a/Export/CMakeLists.txt +++ b/Export/CMakeLists.txt @@ -1,3 +1,9 @@ -add_definitions(-D_EXPORT_DLL_) +add_definitions(-D MATHLIBRARY_EXPORTS) set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/dll) -add_library(DLL SHARED Exporter.cpp) + +set(FILES + MathLibrary.cpp + ) +add_executable(MathLibrary ${FILES}) + +add_library(MathLibrary_DLL SHARED MathLibrary.cpp) diff --git a/Export/Exporter.cpp b/Export/Exporter.cpp deleted file mode 100644 index 29b492c..0000000 --- a/Export/Exporter.cpp +++ /dev/null @@ -1,18 +0,0 @@ -#ifdef _EXPORT_DLL_ -#define API __declspec(dllexport) -#else -#define API __declspec(dllimport) -#endif - -extern "C" -{ - API int sum(int lhs, int rhs) - { - return lhs + rhs; - } - - API int getOne() - { - return 1; - } -} diff --git a/Export/MathLibrary.cpp b/Export/MathLibrary.cpp new file mode 100644 index 0000000..aac091f --- /dev/null +++ b/Export/MathLibrary.cpp @@ -0,0 +1,16 @@ +#include "MathLibrary.h" + +int sum(int lhs, int rhs) +{ + return lhs + rhs; +} + +int getOne() +{ + return 1; +} + +int main(int argc, char *argv[]) +{ + return 0; +} \ No newline at end of file diff --git a/Export/MathLibrary.h b/Export/MathLibrary.h new file mode 100644 index 0000000..a0d5bd9 --- /dev/null +++ b/Export/MathLibrary.h @@ -0,0 +1,12 @@ +// MathLibrary.h - Contains declarations of math functions +#pragma once + +#ifdef MATHLIBRARY_EXPORTS +#define MATHLIBRARY_API __declspec(dllexport) +#else +#define MATHLIBRARY_API __declspec(dllimport) +#endif + +extern "C" MATHLIBRARY_API int sum(int lhs, int rhs); + +extern "C" MATHLIBRARY_API int getOne(); From 3a3a746b51482ffa92035081f0659e46be325b6d Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Fri, 4 Feb 2022 18:51:21 -0300 Subject: [PATCH 06/20] Updating LIBRARY_OUTPUT_PATH to be in UPPERCASE --- Export/CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Export/CMakeLists.txt b/Export/CMakeLists.txt index f5561df..09f76a5 100644 --- a/Export/CMakeLists.txt +++ b/Export/CMakeLists.txt @@ -1,5 +1,5 @@ add_definitions(-D MATHLIBRARY_EXPORTS) -set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/dll) +set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/DLL) set(FILES MathLibrary.cpp From 0bb268f34bdac662ec1b36367a99be7fa8b36698 Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Mon, 7 Feb 2022 16:16:21 -0300 Subject: [PATCH 07/20] Enhancing code structure --- .../Minimax/{ => Tests}/CMakeLists.txt | 4 ++-- .../{Main.cpp => Tests/MinimaxTestMain.cpp} | 12 ++---------- .../Minimax/{ => Tests}/MinimaxTester.cpp | 11 +++++++++++ .../Minimax/{ => Tests}/MinimaxTester.h | 18 +++--------------- AdversarialSearch/Minimax/TicTacToeMinimax.h | 2 +- CMakeLists.txt | 15 ++++++++++++--- Examples/MathLibrary/CMakeLists.txt | 15 +++++++++++++++ .../MathLibrary}/MathLibrary.cpp | 2 +- {Export => Examples/MathLibrary}/MathLibrary.h | 0 Export/CMakeLists.txt | 9 --------- .../Minimax => Miscellaneous}/BitMath.h | 0 Miscellaneous/CMakeLists.txt | 5 ----- Miscellaneous/Tests/CMakeLists.txt | 5 +++++ .../MatrixTestMain.cpp} | 2 +- NeuralNetwork/Perceptron/Perceptron.cpp | 2 +- NeuralNetwork/Perceptron/Perceptron.h | 6 +++--- 16 files changed, 57 insertions(+), 51 deletions(-) rename AdversarialSearch/Minimax/{ => Tests}/CMakeLists.txt (54%) rename AdversarialSearch/Minimax/{Main.cpp => Tests/MinimaxTestMain.cpp} (50%) rename AdversarialSearch/Minimax/{ => Tests}/MinimaxTester.cpp (95%) rename AdversarialSearch/Minimax/{ => Tests}/MinimaxTester.h (56%) create mode 100644 Examples/MathLibrary/CMakeLists.txt rename {Export => Examples/MathLibrary}/MathLibrary.cpp (98%) rename {Export => Examples/MathLibrary}/MathLibrary.h (100%) delete mode 100644 Export/CMakeLists.txt rename {AdversarialSearch/Minimax => Miscellaneous}/BitMath.h (100%) delete mode 100644 Miscellaneous/CMakeLists.txt create mode 100644 Miscellaneous/Tests/CMakeLists.txt rename Miscellaneous/{MatrixTests.cpp => Tests/MatrixTestMain.cpp} (98%) diff --git a/AdversarialSearch/Minimax/CMakeLists.txt b/AdversarialSearch/Minimax/Tests/CMakeLists.txt similarity index 54% rename from AdversarialSearch/Minimax/CMakeLists.txt rename to AdversarialSearch/Minimax/Tests/CMakeLists.txt index ad474b9..058fbdc 100644 --- a/AdversarialSearch/Minimax/CMakeLists.txt +++ b/AdversarialSearch/Minimax/Tests/CMakeLists.txt @@ -1,6 +1,6 @@ set(FILES - TicTacToeMinimax.cpp + ../TicTacToeMinimax.cpp MinimaxTester.cpp - Main.cpp + MinimaxTestMain.cpp ) add_executable(Minimax ${FILES}) diff --git a/AdversarialSearch/Minimax/Main.cpp b/AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp similarity index 50% rename from AdversarialSearch/Minimax/Main.cpp rename to AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp index 448fe0b..229db87 100644 --- a/AdversarialSearch/Minimax/Main.cpp +++ b/AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp @@ -1,7 +1,5 @@ #include "MinimaxTester.h" -#define RUN_MINIMAX_TESTS 0 - int main(int argc, char *argv[]) { if (argc > 1) @@ -11,14 +9,8 @@ int main(int argc, char *argv[]) } else { - if (RUN_MINIMAX_TESTS) - { - TicTacToeMinimax::RunMinimaxTests(); - } - else - { - TicTacToeMinimax::benchmarkMinimaxVsMinimax(0, true); - } + TicTacToeMinimax::RunMinimaxTests(); + TicTacToeMinimax::benchmarkMinimaxVsMinimax(0, true); } return 0; } diff --git a/AdversarialSearch/Minimax/MinimaxTester.cpp b/AdversarialSearch/Minimax/Tests/MinimaxTester.cpp similarity index 95% rename from AdversarialSearch/Minimax/MinimaxTester.cpp rename to AdversarialSearch/Minimax/Tests/MinimaxTester.cpp index 5386983..c822906 100644 --- a/AdversarialSearch/Minimax/MinimaxTester.cpp +++ b/AdversarialSearch/Minimax/Tests/MinimaxTester.cpp @@ -1,5 +1,16 @@ #include "MinimaxTester.h" +string getStateText(int state) +{ + if (state == TicTacToeMinimax::PLAYING) + return "Playing"; + else if (state == TicTacToeMinimax::DRAW) + return "Draw"; + else if (state == TicTacToeMinimax::CROSS_WINS) + return "CrossWins"; + return "CircleWins"; +} + struct MinimaxTestCase { public: diff --git a/AdversarialSearch/Minimax/MinimaxTester.h b/AdversarialSearch/Minimax/Tests/MinimaxTester.h similarity index 56% rename from AdversarialSearch/Minimax/MinimaxTester.h rename to AdversarialSearch/Minimax/Tests/MinimaxTester.h index f1d3e15..bde26c2 100644 --- a/AdversarialSearch/Minimax/MinimaxTester.h +++ b/AdversarialSearch/Minimax/Tests/MinimaxTester.h @@ -1,4 +1,7 @@ #pragma once + +#include "../TicTacToeMinimax.h" + #include #include #include @@ -7,26 +10,11 @@ #include #include #include -#include "TicTacToeMinimax.h" using namespace std; namespace TicTacToeMinimax { - namespace - { - string getStateText(int state) - { - if (state == TicTacToeMinimax::PLAYING) - return "Playing"; - else if (state == TicTacToeMinimax::DRAW) - return "Draw"; - else if (state == TicTacToeMinimax::CROSS_WINS) - return "CrossWins"; - return "CircleWins"; - } - } - void RunMinimaxTests(); void benchmarkMinimax(int board, bool isMaxTurn); void benchmarkMinimaxVsMinimax(int board, bool isMaxTurn); diff --git a/AdversarialSearch/Minimax/TicTacToeMinimax.h b/AdversarialSearch/Minimax/TicTacToeMinimax.h index ca78c17..80114ef 100644 --- a/AdversarialSearch/Minimax/TicTacToeMinimax.h +++ b/AdversarialSearch/Minimax/TicTacToeMinimax.h @@ -1,6 +1,6 @@ #pragma once -#include +#include #include namespace TicTacToeMinimax diff --git a/CMakeLists.txt b/CMakeLists.txt index ac09fb4..72e7d5d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,9 +1,18 @@ +# set up cmake version cmake_minimum_required(VERSION 3.14.5) + +# create the project project(AIEngine) + +# set project properties set(CMAKE_CXX_STANDARD 14) set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build) -add_subdirectory(AdversarialSearch/Minimax) -add_subdirectory(Miscellaneous) + +# include tests from subdirectories +add_subdirectory(AdversarialSearch/Minimax/Tests) +add_subdirectory(Miscellaneous/Tests) add_subdirectory(NeuralNetwork/Perceptron) add_subdirectory(NeuralNetwork/MLP) -add_subdirectory(Export) + +# include examples from subdirectories +add_subdirectory(Examples/MathLibrary) diff --git a/Examples/MathLibrary/CMakeLists.txt b/Examples/MathLibrary/CMakeLists.txt new file mode 100644 index 0000000..25f453c --- /dev/null +++ b/Examples/MathLibrary/CMakeLists.txt @@ -0,0 +1,15 @@ +add_definitions(-D MATHLIBRARY_EXPORTS) +set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/Libs) + +# list of source files +set(FILES + MathLibrary.cpp + ) + +# shared and static libraries built from the same object files +add_library(MathLibraryShared SHARED ${FILES}) +add_library(MathLibraryStatic STATIC ${FILES}) + +# rename output name of DLL and LIB files +set_target_properties(MathLibraryShared PROPERTIES OUTPUT_NAME MathLibrary) +set_target_properties(MathLibraryStatic PROPERTIES OUTPUT_NAME MathLibrary) diff --git a/Export/MathLibrary.cpp b/Examples/MathLibrary/MathLibrary.cpp similarity index 98% rename from Export/MathLibrary.cpp rename to Examples/MathLibrary/MathLibrary.cpp index aac091f..1586af6 100644 --- a/Export/MathLibrary.cpp +++ b/Examples/MathLibrary/MathLibrary.cpp @@ -13,4 +13,4 @@ int getOne() int main(int argc, char *argv[]) { return 0; -} \ No newline at end of file +} diff --git a/Export/MathLibrary.h b/Examples/MathLibrary/MathLibrary.h similarity index 100% rename from Export/MathLibrary.h rename to Examples/MathLibrary/MathLibrary.h diff --git a/Export/CMakeLists.txt b/Export/CMakeLists.txt deleted file mode 100644 index 09f76a5..0000000 --- a/Export/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -add_definitions(-D MATHLIBRARY_EXPORTS) -set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/DLL) - -set(FILES - MathLibrary.cpp - ) -add_executable(MathLibrary ${FILES}) - -add_library(MathLibrary_DLL SHARED MathLibrary.cpp) diff --git a/AdversarialSearch/Minimax/BitMath.h b/Miscellaneous/BitMath.h similarity index 100% rename from AdversarialSearch/Minimax/BitMath.h rename to Miscellaneous/BitMath.h diff --git a/Miscellaneous/CMakeLists.txt b/Miscellaneous/CMakeLists.txt deleted file mode 100644 index 02f5806..0000000 --- a/Miscellaneous/CMakeLists.txt +++ /dev/null @@ -1,5 +0,0 @@ -set(FILES - Matrix.cpp - MatrixTests.cpp -) -add_executable(Matrix ${FILES}) diff --git a/Miscellaneous/Tests/CMakeLists.txt b/Miscellaneous/Tests/CMakeLists.txt new file mode 100644 index 0000000..1a43817 --- /dev/null +++ b/Miscellaneous/Tests/CMakeLists.txt @@ -0,0 +1,5 @@ +set(FILES + ../Matrix.cpp + MatrixTestMain.cpp +) +add_executable(Matrix ${FILES}) diff --git a/Miscellaneous/MatrixTests.cpp b/Miscellaneous/Tests/MatrixTestMain.cpp similarity index 98% rename from Miscellaneous/MatrixTests.cpp rename to Miscellaneous/Tests/MatrixTestMain.cpp index 2482689..885c520 100644 --- a/Miscellaneous/MatrixTests.cpp +++ b/Miscellaneous/Tests/MatrixTestMain.cpp @@ -1,4 +1,4 @@ -#include "Matrix.h" +#include "../Matrix.h" #include using namespace std; diff --git a/NeuralNetwork/Perceptron/Perceptron.cpp b/NeuralNetwork/Perceptron/Perceptron.cpp index f70d28b..224f358 100644 --- a/NeuralNetwork/Perceptron/Perceptron.cpp +++ b/NeuralNetwork/Perceptron/Perceptron.cpp @@ -1,6 +1,6 @@ #include "Perceptron.h" -Perceptron::Perceptron(int weights) +Perceptron::Perceptron(int weights) : _bias(0.0) { _neuron = std::make_unique(weights); } diff --git a/NeuralNetwork/Perceptron/Perceptron.h b/NeuralNetwork/Perceptron/Perceptron.h index 5fd25d3..5eaf24d 100644 --- a/NeuralNetwork/Perceptron/Perceptron.h +++ b/NeuralNetwork/Perceptron/Perceptron.h @@ -11,9 +11,9 @@ class Perceptron final : public ISerializable std::unique_ptr _neuron; public: - Perceptron(int weights); - double feedforward(const std::vector inputs); - void train(const std::vector inputs, double target, double learningRate); + explicit Perceptron(int weights); + double feedforward(std::vector inputs); + void train(std::vector inputs, double target, double learningRate); void setActivationFunction(activationfunction::function activationFunction); void randomizeWeights(); void serialize(std::ostream &stream) const override; From 9fca7b5cd5be4c97827785388134a0285f2f5400 Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Tue, 8 Feb 2022 11:10:24 -0300 Subject: [PATCH 08/20] Fixing DLL exporting on Unix platforms --- CMakeLists.txt | 4 ++-- Examples/MathLibrary/MathLibrary.h | 11 ++++++++--- NeuralNetwork/MLP/CMakeLists.txt | 13 ------------- NeuralNetwork/MLP/Tests/CMakeLists.txt | 13 +++++++++++++ NeuralNetwork/MLP/{ => Tests}/MLPMain.cpp | 2 +- NeuralNetwork/Perceptron/CMakeLists.txt | 12 ------------ NeuralNetwork/Perceptron/Tests/CMakeLists.txt | 12 ++++++++++++ .../Perceptron/{ => Tests}/PerceptronMain.cpp | 2 +- 8 files changed, 37 insertions(+), 32 deletions(-) delete mode 100644 NeuralNetwork/MLP/CMakeLists.txt create mode 100644 NeuralNetwork/MLP/Tests/CMakeLists.txt rename NeuralNetwork/MLP/{ => Tests}/MLPMain.cpp (99%) delete mode 100644 NeuralNetwork/Perceptron/CMakeLists.txt create mode 100644 NeuralNetwork/Perceptron/Tests/CMakeLists.txt rename NeuralNetwork/Perceptron/{ => Tests}/PerceptronMain.cpp (99%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 72e7d5d..9201c93 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,8 +11,8 @@ set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build) # include tests from subdirectories add_subdirectory(AdversarialSearch/Minimax/Tests) add_subdirectory(Miscellaneous/Tests) -add_subdirectory(NeuralNetwork/Perceptron) -add_subdirectory(NeuralNetwork/MLP) +add_subdirectory(NeuralNetwork/Perceptron/Tests) +add_subdirectory(NeuralNetwork/MLP/Tests) # include examples from subdirectories add_subdirectory(Examples/MathLibrary) diff --git a/Examples/MathLibrary/MathLibrary.h b/Examples/MathLibrary/MathLibrary.h index a0d5bd9..1332f1e 100644 --- a/Examples/MathLibrary/MathLibrary.h +++ b/Examples/MathLibrary/MathLibrary.h @@ -1,10 +1,15 @@ // MathLibrary.h - Contains declarations of math functions #pragma once -#ifdef MATHLIBRARY_EXPORTS -#define MATHLIBRARY_API __declspec(dllexport) +#if defined(_WIN32) || defined(WIN32) + +# ifdef MATHLIBRARY_EXPORTS +# define MATHLIBRARY_API __declspec(dllexport) +# else +# define MATHLIBRARY_API __declspec(dllimport) +# endif #else -#define MATHLIBRARY_API __declspec(dllimport) +# define MATHLIBRARY_API #endif extern "C" MATHLIBRARY_API int sum(int lhs, int rhs); diff --git a/NeuralNetwork/MLP/CMakeLists.txt b/NeuralNetwork/MLP/CMakeLists.txt deleted file mode 100644 index 44ded0b..0000000 --- a/NeuralNetwork/MLP/CMakeLists.txt +++ /dev/null @@ -1,13 +0,0 @@ -# uncomment to persist weights from MLP tests -#add_definitions(-DPERSIST_WEIGHTS) - -set(FILES - ../../Miscellaneous/Random.cpp - ../../Miscellaneous/ISerializable.cpp - ../Common/ActivationFunctions.cpp - ../Common/Neuron.cpp - Layer.cpp - MLP.cpp - MLPMain.cpp -) -add_executable(MultiLayerPerceptron ${FILES}) diff --git a/NeuralNetwork/MLP/Tests/CMakeLists.txt b/NeuralNetwork/MLP/Tests/CMakeLists.txt new file mode 100644 index 0000000..c28111c --- /dev/null +++ b/NeuralNetwork/MLP/Tests/CMakeLists.txt @@ -0,0 +1,13 @@ +# uncomment to persist weights from MLP tests +#add_definitions(-DPERSIST_WEIGHTS) + +set(FILES + ../../../Miscellaneous/Random.cpp + ../../../Miscellaneous/ISerializable.cpp + ../../Common/ActivationFunctions.cpp + ../../Common/Neuron.cpp + ../Layer.cpp + ../MLP.cpp + MLPMain.cpp +) +add_executable(MultiLayerPerceptron ${FILES}) diff --git a/NeuralNetwork/MLP/MLPMain.cpp b/NeuralNetwork/MLP/Tests/MLPMain.cpp similarity index 99% rename from NeuralNetwork/MLP/MLPMain.cpp rename to NeuralNetwork/MLP/Tests/MLPMain.cpp index 3f4a725..1b80a9c 100644 --- a/NeuralNetwork/MLP/MLPMain.cpp +++ b/NeuralNetwork/MLP/Tests/MLPMain.cpp @@ -1,4 +1,4 @@ -#include "MLP.h" +#include "../MLP.h" #include diff --git a/NeuralNetwork/Perceptron/CMakeLists.txt b/NeuralNetwork/Perceptron/CMakeLists.txt deleted file mode 100644 index 19de707..0000000 --- a/NeuralNetwork/Perceptron/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -# uncomment to persist weights from MLP tests -#add_definitions(-DPERSIST_WEIGHTS) - -set(FILES - ../../Miscellaneous/Random.cpp - ../../Miscellaneous/ISerializable.cpp - ../Common/ActivationFunctions.cpp - ../Common/Neuron.cpp - Perceptron.cpp - PerceptronMain.cpp -) -add_executable(Perceptron ${FILES}) diff --git a/NeuralNetwork/Perceptron/Tests/CMakeLists.txt b/NeuralNetwork/Perceptron/Tests/CMakeLists.txt new file mode 100644 index 0000000..dd61cef --- /dev/null +++ b/NeuralNetwork/Perceptron/Tests/CMakeLists.txt @@ -0,0 +1,12 @@ +# uncomment to persist weights from MLP tests +#add_definitions(-DPERSIST_WEIGHTS) + +set(FILES + ../../../Miscellaneous/Random.cpp + ../../../Miscellaneous/ISerializable.cpp + ../../Common/ActivationFunctions.cpp + ../../Common/Neuron.cpp + ../Perceptron.cpp + PerceptronMain.cpp +) +add_executable(Perceptron ${FILES}) diff --git a/NeuralNetwork/Perceptron/PerceptronMain.cpp b/NeuralNetwork/Perceptron/Tests/PerceptronMain.cpp similarity index 99% rename from NeuralNetwork/Perceptron/PerceptronMain.cpp rename to NeuralNetwork/Perceptron/Tests/PerceptronMain.cpp index 711f2ea..67317c6 100644 --- a/NeuralNetwork/Perceptron/PerceptronMain.cpp +++ b/NeuralNetwork/Perceptron/Tests/PerceptronMain.cpp @@ -1,4 +1,4 @@ -#include "Perceptron.h" +#include "../Perceptron.h" #include From 0d4d90725bed5b7755fe0712709c4ef8fba43061 Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Tue, 8 Feb 2022 12:00:50 -0300 Subject: [PATCH 09/20] Fixing warning due to invalid formatting parameter when using size_t metadeta --- NeuralNetwork/MLP/Tests/MLPMain.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/NeuralNetwork/MLP/Tests/MLPMain.cpp b/NeuralNetwork/MLP/Tests/MLPMain.cpp index 1b80a9c..3c99b92 100644 --- a/NeuralNetwork/MLP/Tests/MLPMain.cpp +++ b/NeuralNetwork/MLP/Tests/MLPMain.cpp @@ -51,7 +51,7 @@ void ApplySupervisedLearning(MultiLayerPerceptron& mlp, trainingIndex++; trainingIndex %= trainingInputs.size(); } - printf("The network has been trained in '%ld' iterations.\n", totalIterations); + printf("The network has been trained in '%zu' iterations.\n", totalIterations); } void TEST_MLP_XOR_2_2_1() From f3ebe75b8b71aaf1c2aa967909b99fced45191d6 Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Tue, 8 Feb 2022 12:04:14 -0300 Subject: [PATCH 10/20] Renaming some TestMain files --- NeuralNetwork/MLP/Tests/CMakeLists.txt | 2 +- NeuralNetwork/MLP/Tests/{MLPMain.cpp => MLPTestMain.cpp} | 0 NeuralNetwork/Perceptron/Tests/CMakeLists.txt | 2 +- .../Tests/{PerceptronMain.cpp => PerceptronTestMain.cpp} | 0 4 files changed, 2 insertions(+), 2 deletions(-) rename NeuralNetwork/MLP/Tests/{MLPMain.cpp => MLPTestMain.cpp} (100%) rename NeuralNetwork/Perceptron/Tests/{PerceptronMain.cpp => PerceptronTestMain.cpp} (100%) diff --git a/NeuralNetwork/MLP/Tests/CMakeLists.txt b/NeuralNetwork/MLP/Tests/CMakeLists.txt index c28111c..74dab09 100644 --- a/NeuralNetwork/MLP/Tests/CMakeLists.txt +++ b/NeuralNetwork/MLP/Tests/CMakeLists.txt @@ -8,6 +8,6 @@ set(FILES ../../Common/Neuron.cpp ../Layer.cpp ../MLP.cpp - MLPMain.cpp + MLPTestMain.cpp ) add_executable(MultiLayerPerceptron ${FILES}) diff --git a/NeuralNetwork/MLP/Tests/MLPMain.cpp b/NeuralNetwork/MLP/Tests/MLPTestMain.cpp similarity index 100% rename from NeuralNetwork/MLP/Tests/MLPMain.cpp rename to NeuralNetwork/MLP/Tests/MLPTestMain.cpp diff --git a/NeuralNetwork/Perceptron/Tests/CMakeLists.txt b/NeuralNetwork/Perceptron/Tests/CMakeLists.txt index dd61cef..24a19fc 100644 --- a/NeuralNetwork/Perceptron/Tests/CMakeLists.txt +++ b/NeuralNetwork/Perceptron/Tests/CMakeLists.txt @@ -7,6 +7,6 @@ set(FILES ../../Common/ActivationFunctions.cpp ../../Common/Neuron.cpp ../Perceptron.cpp - PerceptronMain.cpp + PerceptronTestMain.cpp ) add_executable(Perceptron ${FILES}) diff --git a/NeuralNetwork/Perceptron/Tests/PerceptronMain.cpp b/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp similarity index 100% rename from NeuralNetwork/Perceptron/Tests/PerceptronMain.cpp rename to NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp From 77e91a584fbd6a746ea2fee8b1c75e6bc45e6613 Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Tue, 8 Feb 2022 12:18:29 -0300 Subject: [PATCH 11/20] Removing Equals(vector, vector) function --- NeuralNetwork/MLP/Tests/MLPTestMain.cpp | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/NeuralNetwork/MLP/Tests/MLPTestMain.cpp b/NeuralNetwork/MLP/Tests/MLPTestMain.cpp index 3c99b92..04d30e9 100644 --- a/NeuralNetwork/MLP/Tests/MLPTestMain.cpp +++ b/NeuralNetwork/MLP/Tests/MLPTestMain.cpp @@ -4,16 +4,6 @@ using namespace NeuralNetwork; -bool equals(const std::vector& lhs, const std::vector& rhs) -{ - for (int i = 0; i < lhs.size(); ++i) - { - if (lhs[i] != rhs[i]) - return false; - } - return true; -} - void ApplySupervisedLearning(MultiLayerPerceptron& mlp, // training inputs and outputs const std::vector>& trainingInputs, @@ -30,7 +20,7 @@ void ApplySupervisedLearning(MultiLayerPerceptron& mlp, const std::vector& expectedOutputs = trainingOutputs[trainingIndex]; const std::vector outputs = mlp.Feedforward(inputs); - if (equals(outputs, expectedOutputs)) + if (outputs == expectedOutputs) { rightGuesses++; } From ec476e47d4f7e48365b8da5ae46341faf029e1fd Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Tue, 8 Feb 2022 15:04:08 -0300 Subject: [PATCH 12/20] Integrating c++11 random library into RandomUtils --- Miscellaneous/Random.cpp | 50 ++++++++++++++++--- Miscellaneous/Random.h | 17 ++++--- NeuralNetwork/Common/Neuron.cpp | 2 +- NeuralNetwork/MLP/Tests/MLPTestMain.cpp | 10 ++-- NeuralNetwork/Perceptron/Perceptron.cpp | 2 +- .../Perceptron/Tests/PerceptronTestMain.cpp | 4 +- 6 files changed, 62 insertions(+), 23 deletions(-) diff --git a/Miscellaneous/Random.cpp b/Miscellaneous/Random.cpp index c814a63..38e67ea 100644 --- a/Miscellaneous/Random.cpp +++ b/Miscellaneous/Random.cpp @@ -1,15 +1,49 @@ #include "Random.h" -void RandomUtils::initRandomSeed() +#include +#include + +std::mt19937& GetEngine() +{ + static std::random_device randomDevice; + static std::mt19937 engine(randomDevice()); + return engine; +} + +void RandomUtils::SetRandomSeed() +{ + SetSeed(std::time(nullptr)); +} + +void RandomUtils::SetSeed(long long seed) +{ + GetEngine().seed(seed); +} + +int RandomUtils::Range(int min, int max) +{ + std::uniform_int_distribution<> intDistribution(min, max); + int value = intDistribution(GetEngine()); + return value; +} + +double RandomUtils::Range(double min, double max) +{ + std::uniform_real_distribution<> realDistribution(min, max); + double value = realDistribution(GetEngine()); + return value; +} + +int RandomUtils::Int() { - auto seed = static_cast(time(nullptr)); - srand(seed); + std::uniform_int_distribution<> realDistribution; + int value = realDistribution(GetEngine()); + return value; } -double RandomUtils::range(double min, double max) +double RandomUtils::Double() { - double randomValue = ((double) std::rand()) / (double) RAND_MAX; - double difference = max - min; - double result = randomValue * difference; - return min + result; + std::uniform_real_distribution<> realDistribution; + double value = realDistribution(GetEngine()); + return value; } diff --git a/Miscellaneous/Random.h b/Miscellaneous/Random.h index 65a140e..2eef2ef 100644 --- a/Miscellaneous/Random.h +++ b/Miscellaneous/Random.h @@ -1,11 +1,16 @@ #pragma once -#include -#include -#include - namespace RandomUtils { - void initRandomSeed(); - double range(double a, double b); + // seed + void SetRandomSeed(); + void SetSeed(long long seed); + + // random fixed and floating point values + int Int(); + double Double(); + + // range of values + int Range(int min, int max); + double Range(double min, double max); } diff --git a/NeuralNetwork/Common/Neuron.cpp b/NeuralNetwork/Common/Neuron.cpp index e7a9d2a..0006e45 100644 --- a/NeuralNetwork/Common/Neuron.cpp +++ b/NeuralNetwork/Common/Neuron.cpp @@ -28,7 +28,7 @@ void Neuron::randomizeWeights() { for (double& weight : _weights) { - weight = RandomUtils::range(-1.0, 1.0); + weight = RandomUtils::Range(-1.0, 1.0); // fix absolute zero weights if (weight == 0.0) diff --git a/NeuralNetwork/MLP/Tests/MLPTestMain.cpp b/NeuralNetwork/MLP/Tests/MLPTestMain.cpp index 04d30e9..146df9b 100644 --- a/NeuralNetwork/MLP/Tests/MLPTestMain.cpp +++ b/NeuralNetwork/MLP/Tests/MLPTestMain.cpp @@ -49,7 +49,7 @@ void TEST_MLP_XOR_2_2_1() std::cout << __FUNCTION__ << "... "; std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); Activation::setThreshold(0.7); - RandomUtils::initRandomSeed(); + RandomUtils::SetRandomSeed(); std::vector neuronsByLayer({2,1}); MultiLayerPerceptron mlp(2, neuronsByLayer); mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; @@ -72,7 +72,7 @@ void TEST_MLP_XOR_2_3_1() std::cout << __FUNCTION__ << "... "; std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); Activation::setThreshold(0.7); - RandomUtils::initRandomSeed(); + RandomUtils::SetRandomSeed(); std::vector neuronsByLayer({3,1}); MultiLayerPerceptron mlp(2, neuronsByLayer); mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; @@ -96,7 +96,7 @@ void TEST_MLP_XOR_2_3_3_3_1() std::cout << __FUNCTION__ << "... "; std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); Activation::setThreshold(0.7); - RandomUtils::initRandomSeed(); + RandomUtils::SetRandomSeed(); std::vector neuronsByLayer({3, 3, 3, 1}); MultiLayerPerceptron mlp(2, neuronsByLayer); mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; @@ -120,7 +120,7 @@ void TEST_MLP_XOR_2_3_2() std::cout << __FUNCTION__ << "... "; std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); Activation::setThreshold(0.7); - RandomUtils::initRandomSeed(); + RandomUtils::SetRandomSeed(); std::vector neuronsByLayer({3, 2}); MultiLayerPerceptron mlp(2, neuronsByLayer); mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; @@ -144,7 +144,7 @@ void TEST_MLP_number_recognition_digital_clock_0_to_9() std::cout << __FUNCTION__ << "... "; std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); Activation::setThreshold(0.7); - RandomUtils::initRandomSeed(); + RandomUtils::SetRandomSeed(); std::vector neuronsByLayer({10, 10}); MultiLayerPerceptron mlp(7, neuronsByLayer); mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; diff --git a/NeuralNetwork/Perceptron/Perceptron.cpp b/NeuralNetwork/Perceptron/Perceptron.cpp index 438ceff..cc082d9 100644 --- a/NeuralNetwork/Perceptron/Perceptron.cpp +++ b/NeuralNetwork/Perceptron/Perceptron.cpp @@ -42,7 +42,7 @@ void Perceptron::Deserialize(std::istream &stream) void Perceptron::randomizeWeights() { _neuron->randomizeWeights(); - _bias = RandomUtils::range(-0.1, 0.1); + _bias = RandomUtils::Range(-0.1, 0.1); } Neuron *Perceptron::GetNeuron() diff --git a/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp b/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp index 67317c6..7fcb6bd 100644 --- a/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp +++ b/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp @@ -5,7 +5,7 @@ void Test_Neuron_W1_greater_than_W2() { std::cout << __FUNCTION__ << "... "; - RandomUtils::initRandomSeed(); + RandomUtils::SetRandomSeed(); std::unique_ptr perceptron = std::make_unique(2); perceptron->GetNeuron()->ActivationFunction = NeuralNetwork::Activation::EActivationFunctionType::Sign; perceptron->randomizeWeights(); @@ -51,7 +51,7 @@ void Test_Neuron_W1_greater_than_W2() void Test_Neuron_W2_greater_than_W1() { std::cout << __FUNCTION__ << "... "; - RandomUtils::initRandomSeed(); + RandomUtils::SetRandomSeed(); std::unique_ptr perceptron = std::make_unique(2); perceptron->GetNeuron()->ActivationFunction = NeuralNetwork::Activation::EActivationFunctionType::Sign; perceptron->randomizeWeights(); From bbb904eda3bc88e88f19d5839110e8a92cf3260f Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Tue, 8 Feb 2022 16:08:52 -0300 Subject: [PATCH 13/20] Returning CMakeLists to its default state --- CMakeLists.txt | 1 + Examples/MathLibrary/CMakeLists.txt | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 9201c93..bf29d12 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -7,6 +7,7 @@ project(AIEngine) # set project properties set(CMAKE_CXX_STANDARD 14) set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build) +set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/Libs) # include tests from subdirectories add_subdirectory(AdversarialSearch/Minimax/Tests) diff --git a/Examples/MathLibrary/CMakeLists.txt b/Examples/MathLibrary/CMakeLists.txt index 25f453c..7ea2a0a 100644 --- a/Examples/MathLibrary/CMakeLists.txt +++ b/Examples/MathLibrary/CMakeLists.txt @@ -1,5 +1,4 @@ add_definitions(-D MATHLIBRARY_EXPORTS) -set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/Libs) # list of source files set(FILES From 5abfcc87b875c4310f0ea9ae0bed3a977389abdc Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Mon, 28 Aug 2023 17:04:51 -0300 Subject: [PATCH 14/20] Updating files to follow our code standards --- .../Minimax/Tests/CMakeLists.txt | 12 +- .../Minimax/Tests/MinimaxTestMain.cpp | 32 +- .../Minimax/Tests/MinimaxTester.cpp | 327 ++++++------- .../Minimax/Tests/MinimaxTester.h | 50 +- .../Minimax/TicTacToeMinimax.cpp | 428 +++++++++--------- AdversarialSearch/Minimax/TicTacToeMinimax.h | 32 +- Examples/MathLibrary/CMakeLists.txt | 28 +- Examples/MathLibrary/MathLibrary.cpp | 32 +- Examples/MathLibrary/MathLibrary.h | 33 +- Miscellaneous/BitMath.h | 48 +- Miscellaneous/ISerializable.cpp | 52 +-- Miscellaneous/ISerializable.h | 34 +- Miscellaneous/Matrix.cpp | 294 ++++++------ Miscellaneous/Matrix.h | 68 +-- Miscellaneous/Random.cpp | 98 ++-- Miscellaneous/Random.h | 32 +- Miscellaneous/Tests/CMakeLists.txt | 10 +- Miscellaneous/Tests/MatrixTestMain.cpp | 152 +++---- NeuralNetwork/Common/ActivationFunctions.cpp | 204 ++++----- NeuralNetwork/Common/ActivationFunctions.h | 62 +-- NeuralNetwork/Common/Neuron.cpp | 134 +++--- NeuralNetwork/Common/Neuron.h | 58 +-- NeuralNetwork/MLP/Layer.cpp | 110 ++--- NeuralNetwork/MLP/Layer.h | 46 +- NeuralNetwork/MLP/MLP.cpp | 286 ++++++------ NeuralNetwork/MLP/MLP.h | 52 +-- NeuralNetwork/MLP/Tests/CMakeLists.txt | 26 +- NeuralNetwork/MLP/Tests/MLPTestMain.cpp | 396 ++++++++-------- NeuralNetwork/Perceptron/Perceptron.cpp | 102 ++--- NeuralNetwork/Perceptron/Perceptron.h | 48 +- NeuralNetwork/Perceptron/Tests/CMakeLists.txt | 24 +- .../Perceptron/Tests/PerceptronTestMain.cpp | 206 ++++----- 32 files changed, 1761 insertions(+), 1755 deletions(-) diff --git a/AdversarialSearch/Minimax/Tests/CMakeLists.txt b/AdversarialSearch/Minimax/Tests/CMakeLists.txt index 058fbdc..e8b1214 100644 --- a/AdversarialSearch/Minimax/Tests/CMakeLists.txt +++ b/AdversarialSearch/Minimax/Tests/CMakeLists.txt @@ -1,6 +1,6 @@ -set(FILES - ../TicTacToeMinimax.cpp - MinimaxTester.cpp - MinimaxTestMain.cpp -) -add_executable(Minimax ${FILES}) +set(FILES + ../TicTacToeMinimax.cpp + MinimaxTester.cpp + MinimaxTestMain.cpp +) +add_executable(Minimax ${FILES}) diff --git a/AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp b/AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp index 229db87..2a8535a 100644 --- a/AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp +++ b/AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp @@ -1,16 +1,16 @@ -#include "MinimaxTester.h" - -int main(int argc, char *argv[]) -{ - if (argc > 1) - { - int boardState = atoi(argv[1]); - TicTacToeMinimax::benchmarkMinimax(boardState, true); - } - else - { - TicTacToeMinimax::RunMinimaxTests(); - TicTacToeMinimax::benchmarkMinimaxVsMinimax(0, true); - } - return 0; -} +#include "MinimaxTester.h" + +int main(int argc, char *argv[]) +{ + if (argc > 1) + { + int boardState = atoi(argv[1]); + TicTacToeMinimax::benchmarkMinimax(boardState, true); + } + else + { + TicTacToeMinimax::RunMinimaxTests(); + TicTacToeMinimax::benchmarkMinimaxVsMinimax(0, true); + } + return 0; +} diff --git a/AdversarialSearch/Minimax/Tests/MinimaxTester.cpp b/AdversarialSearch/Minimax/Tests/MinimaxTester.cpp index c822906..68b26a9 100644 --- a/AdversarialSearch/Minimax/Tests/MinimaxTester.cpp +++ b/AdversarialSearch/Minimax/Tests/MinimaxTester.cpp @@ -1,160 +1,167 @@ -#include "MinimaxTester.h" - -string getStateText(int state) -{ - if (state == TicTacToeMinimax::PLAYING) - return "Playing"; - else if (state == TicTacToeMinimax::DRAW) - return "Draw"; - else if (state == TicTacToeMinimax::CROSS_WINS) - return "CrossWins"; - return "CircleWins"; -} - -struct MinimaxTestCase -{ -public: - const int CurrentBoard; - const vector ExpectedBoards; - const bool IsMaxTurn; - MinimaxTestCase(int currentBoard, bool isMaxTurn, vector expectedNextBoard) - : CurrentBoard(currentBoard), ExpectedBoards(expectedNextBoard), IsMaxTurn(isMaxTurn) - { - // - } -}; - -void TicTacToeMinimax::RunMinimaxTests() -{ - vector testCases = { - MinimaxTestCase(0b10'10'00'00'11'00'00'00'00, true, {0b10'10'11'00'11'00'00'00'00}), - MinimaxTestCase(0b11'11'00'00'10'00'00'00'00, false, {0b11'11'10'00'10'00'00'00'00}), - MinimaxTestCase(0b00'00'00'00'11'00'00'10'10, true, {0b00'00'00'00'11'00'11'10'10}), - MinimaxTestCase(0b00'00'00'00'10'00'00'11'11, false, {0b00'00'00'00'10'00'10'11'11}), - MinimaxTestCase(0b00'10'10'00'11'11'00'00'00, true, {0b00'10'10'11'11'11'00'00'00}), - MinimaxTestCase(0b00'11'11'00'10'10'00'00'00, false, {0b00'11'11'10'10'10'00'00'00}), - MinimaxTestCase(0b00'11'00'11'10'11'10'00'10, true, {0b11'11'00'11'10'11'10'00'10, - 0b00'11'11'11'10'11'10'00'10, - 0b00'11'00'11'10'11'10'11'10}), - MinimaxTestCase(0b00'10'00'10'11'10'11'00'11, false, {0b10'10'00'10'11'10'11'00'11, - 0b00'10'10'10'11'10'11'00'11, - 0b00'10'00'10'11'10'11'10'11}), - }; - - for(unsigned int i = 0; i < testCases.size(); i++) - { - std::cout << "Test Case " << (i + 1) << " ("; - MinimaxTestCase testCase = testCases[i]; - std::cout << std::boolalpha; - std::cout << "IsMaxTurn=" << testCase.IsMaxTurn << " / "; - std::cout << "Board=" << testCase.CurrentBoard << " / "; - std::cout << "Expected="; - bool isResultExpected = false; - int result = TicTacToeMinimax::predict(testCase.CurrentBoard, testCase.IsMaxTurn, 6); - for(unsigned int x = 0; x < testCase.ExpectedBoards.size(); x++) - { - if (i > 0U && i < testCase.ExpectedBoards.size() - 1U) - { - std::cout << ";"; - } - int expected = testCase.ExpectedBoards[x]; - std::cout << expected; - isResultExpected |= result == expected; - } - std::cout << ") "; - if (isResultExpected) - { - std::cout << "[PASSED]"; - } - else - { - std::cout << "[FAILED] Result was=" << result; - std::cout << "CurrentBoard:" << std::endl; - printBoard(testCase.CurrentBoard); - std::cout << "Predicted board state:" << std::endl; - printBoard(result); - } - std::cout << std::endl; - } -} - -void TicTacToeMinimax::benchmarkMinimax(int board, bool isMaxTurn) -{ - long long int totalDuration = 0ll; - printBoard(board); - int state = TicTacToeMinimax::getState(board); - if (state == PLAYING) - { - auto begin = std::chrono::steady_clock::now(); - board = TicTacToeMinimax::predict(board, isMaxTurn, 6); - auto end = std::chrono::steady_clock::now(); - totalDuration += (end - begin).count(); - } - printBoard(board); - printState(state); - cout << "benchmarkMinimax: " << totalDuration << " ns"; - cout << endl << endl; -} - -void TicTacToeMinimax::benchmarkMinimaxVsMinimax(int board, bool isMaxTurn) -{ - std::vector durationByMove; - long long int totalDuration = 0ll; - int firstBoard = board; - int currentBoard = firstBoard; - int state = TicTacToeMinimax::getState(currentBoard); - while (state == PLAYING) - { - auto begin = std::chrono::steady_clock::now(); - int bestMove = TicTacToeMinimax::predict(currentBoard, isMaxTurn, 6); - auto end = std::chrono::steady_clock::now(); - long long int nanos = (end - begin).count(); - totalDuration += nanos; - durationByMove.emplace_back(nanos); - isMaxTurn = !isMaxTurn; - currentBoard = bestMove; - state = TicTacToeMinimax::getState(currentBoard); - if (state != PLAYING && state != DRAW) - { - std::cerr << "'" << getStateText(state) << "' state must never happen during a Minimax vs Minimax battle!" << std::endl; - return; - } - } - printBoard(firstBoard); - printBoard(currentBoard); - printState(state); - for (int i = 0; i < durationByMove.size(); ++i) - { - long long int nanoseconds = durationByMove[i]; - printf("Move %i took %.6f ms\n", (i + 1), (nanoseconds / 1'000'000.0)); - } - printf("Total duration: %.6f ms\n", (totalDuration / 1'000'000.0)); - cout << endl << endl; -} - -void TicTacToeMinimax::printBoard(int board) -{ - int crossMask = 3; - cout << endl; - for (int x = 0; x < 9; x++) - { - if (x > 0 && x % 3 == 0) cout << endl; - if ((board & crossMask) == 0) - { - cout << "[ ]"; - } - else - { - if ((board & crossMask) == crossMask) cout << "[X]"; - else cout << "[O]"; - } - crossMask <<= 2; - } - cout << endl; -} - -void TicTacToeMinimax::printState(int state) -{ - string stateText = getStateText(state); - cout << stateText << endl; -} +#include "MinimaxTester.h" + +string getStateText(int state) +{ + if (state == TicTacToeMinimax::PLAYING) + return "Playing"; + else if (state == TicTacToeMinimax::DRAW) + return "Draw"; + else if (state == TicTacToeMinimax::CROSS_WINS) + return "CrossWins"; + return "CircleWins"; +} + +struct MinimaxTestCase +{ +public: + const int CurrentBoard; + const vector ExpectedBoards; + const bool IsMaxTurn; + MinimaxTestCase(int currentBoard, bool isMaxTurn, vector expectedNextBoard) + : CurrentBoard(currentBoard), ExpectedBoards(expectedNextBoard), IsMaxTurn(isMaxTurn) + { + // + } +}; + +void TicTacToeMinimax::RunMinimaxTests() +{ + const vector testCases = { + MinimaxTestCase(0b10'10'00'00'11'00'00'00'00, true, {0b10'10'11'00'11'00'00'00'00}), + MinimaxTestCase(0b11'11'00'00'10'00'00'00'00, false, {0b11'11'10'00'10'00'00'00'00}), + MinimaxTestCase(0b00'00'00'00'11'00'00'10'10, true, {0b00'00'00'00'11'00'11'10'10}), + MinimaxTestCase(0b00'00'00'00'10'00'00'11'11, false, {0b00'00'00'00'10'00'10'11'11}), + MinimaxTestCase(0b00'10'10'00'11'11'00'00'00, true, {0b00'10'10'11'11'11'00'00'00}), + MinimaxTestCase(0b00'11'11'00'10'10'00'00'00, false, {0b00'11'11'10'10'10'00'00'00}), + MinimaxTestCase(0b00'11'00'11'10'11'10'00'10, true, {0b11'11'00'11'10'11'10'00'10, + 0b00'11'11'11'10'11'10'00'10, + 0b00'11'00'11'10'11'10'11'10}), + MinimaxTestCase(0b00'10'00'10'11'10'11'00'11, false, {0b10'10'00'10'11'10'11'00'11, + 0b00'10'10'10'11'10'11'00'11, + 0b00'10'00'10'11'10'11'10'11}), + }; + + for(unsigned int i = 0; i < testCases.size(); i++) + { + std::cout << "Test Case " << (i + 1) << " ("; + const MinimaxTestCase testCase = testCases[i]; + std::cout << std::boolalpha; + std::cout << "IsMaxTurn=" << testCase.IsMaxTurn << " / "; + std::cout << "Board=" << testCase.CurrentBoard << " / "; + std::cout << "Expected="; + bool isResultExpected = false; + int result = TicTacToeMinimax::predict(testCase.CurrentBoard, testCase.IsMaxTurn, 6); + for(unsigned int x = 0; x < testCase.ExpectedBoards.size(); x++) + { + if (i > 0U && i < testCase.ExpectedBoards.size() - 1U) + { + std::cout << ";"; + } + int expected = testCase.ExpectedBoards[x]; + std::cout << expected; + isResultExpected |= result == expected; + } + std::cout << ") "; + if (isResultExpected) + { + std::cout << "[PASSED]"; + } + else + { + std::cout << "[FAILED] Result was=" << result; + std::cout << "CurrentBoard:" << std::endl; + printBoard(testCase.CurrentBoard); + std::cout << "Predicted board state:" << std::endl; + printBoard(result); + } + std::cout << std::endl; + } +} + +void TicTacToeMinimax::benchmarkMinimax(int board, bool isMaxTurn) +{ + long long int totalDuration = 0ll; + printBoard(board); + int state = TicTacToeMinimax::getState(board); + if (state == PLAYING) + { + auto begin = std::chrono::steady_clock::now(); + board = TicTacToeMinimax::predict(board, isMaxTurn, 6); + auto end = std::chrono::steady_clock::now(); + totalDuration += (end - begin).count(); + } + printBoard(board); + printState(state); + cout << "benchmarkMinimax: " << totalDuration << " ns"; + cout << endl << endl; +} + +void TicTacToeMinimax::benchmarkMinimaxVsMinimax(int board, bool isMaxTurn) +{ + std::vector durationByMove; + long long int totalDuration = 0ll; + int firstBoard = board; + int currentBoard = firstBoard; + int state = TicTacToeMinimax::getState(currentBoard); + while (state == PLAYING) + { + auto begin = std::chrono::steady_clock::now(); + + // actually run the minimax algorithm + int bestMove = TicTacToeMinimax::predict(currentBoard, isMaxTurn, 6); + + auto end = std::chrono::steady_clock::now(); + // calculate the algorithm's duration in nanoseconds + long long int nanos = (end - begin).count(); + totalDuration += nanos; + // store the duration + durationByMove.emplace_back(nanos); + + isMaxTurn = !isMaxTurn; + currentBoard = bestMove; + state = TicTacToeMinimax::getState(currentBoard); + // print an error in case the algorithm ends up doing something completely wrong + if (state != PLAYING && state != DRAW) + { + std::cerr << "'" << getStateText(state) << "' state must never happen during a Minimax vs Minimax battle!" << std::endl; + return; + } + } + printBoard(firstBoard); + printBoard(currentBoard); + printState(state); + for (int i = 0; i < durationByMove.size(); ++i) + { + long long int nanoseconds = durationByMove[i]; + printf("Move %i took %.6f ms\n", (i + 1), (nanoseconds / 1'000'000.0)); + } + printf("Total duration: %.6f ms\n", (totalDuration / 1'000'000.0)); + cout << endl << endl; +} + +void TicTacToeMinimax::printBoard(int board) +{ + int crossMask = 3; + cout << endl; + for (int x = 0; x < 9; x++) + { + if (x > 0 && x % 3 == 0) cout << endl; + if ((board & crossMask) == 0) + { + cout << "[ ]"; + } + else + { + if ((board & crossMask) == crossMask) cout << "[X]"; + else cout << "[O]"; + } + crossMask <<= 2; + } + cout << endl; +} + +void TicTacToeMinimax::printState(int state) +{ + string stateText = getStateText(state); + cout << stateText << endl; +} diff --git a/AdversarialSearch/Minimax/Tests/MinimaxTester.h b/AdversarialSearch/Minimax/Tests/MinimaxTester.h index bde26c2..be5e7d5 100644 --- a/AdversarialSearch/Minimax/Tests/MinimaxTester.h +++ b/AdversarialSearch/Minimax/Tests/MinimaxTester.h @@ -1,25 +1,25 @@ -#pragma once - -#include "../TicTacToeMinimax.h" - -#include -#include -#include -#include -#include -#include -#include -#include - -using namespace std; - -namespace TicTacToeMinimax -{ - void RunMinimaxTests(); - void benchmarkMinimax(int board, bool isMaxTurn); - void benchmarkMinimaxVsMinimax(int board, bool isMaxTurn); - void benchmarkEvaluate(int board, bool isMaxTurn); - void benchmarkEvaluateAll(int board, bool isMaxTurn); - void printBoard(int board); - void printState(int state); -} +#pragma once + +#include "../TicTacToeMinimax.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace std; + +namespace TicTacToeMinimax +{ + void RunMinimaxTests(); + void benchmarkMinimax(int board, bool isMaxTurn); + void benchmarkMinimaxVsMinimax(int board, bool isMaxTurn); + void benchmarkEvaluate(int board, bool isMaxTurn); + void benchmarkEvaluateAll(int board, bool isMaxTurn); + void printBoard(int board); + void printState(int state); +} diff --git a/AdversarialSearch/Minimax/TicTacToeMinimax.cpp b/AdversarialSearch/Minimax/TicTacToeMinimax.cpp index 0a9c49d..1921d1e 100644 --- a/AdversarialSearch/Minimax/TicTacToeMinimax.cpp +++ b/AdversarialSearch/Minimax/TicTacToeMinimax.cpp @@ -1,214 +1,214 @@ -#include "TicTacToeMinimax.h" - -#define MAXIMUM_MASK_VALUE 0b1'00'00'00'00'00'00'00'00'00 -#define DRAW_BOARD_STATE 0b10'10'10'10'10'10'10'10'10 -const int winnerMasks[8] = -{ - 0b10'10'10'00'00'00'00'00'00, - 0b10'00'00'10'00'00'10'00'00, - 0b00'00'00'00'00'00'10'10'10, - 0b00'00'10'00'00'10'00'00'10, - 0b10'00'00'00'10'00'00'00'10, - 0b00'10'00'00'10'00'00'10'00, - 0b00'00'00'10'10'10'00'00'00, - 0b00'00'10'00'10'00'10'00'00, -}; -int _maxDepth; - -namespace TicTacToeMinimax -{ - int predictMinTreeScore(const int board, int alpha, int beta, int depth); - - int predictMaxTreeScore(const int board, int alpha, int beta, int depth) - { - const int state = getState(board); - if (state != PLAYING || depth >= _maxDepth) - { - return state; - } - int bestScore = INT_MIN; - int mask = 0b11; - depth++; - while (mask < MAXIMUM_MASK_VALUE) - { - if ((board & mask) == 0) - { - const int newBoard = board | mask; - const int score = predictMinTreeScore(newBoard, alpha, beta, depth); - if (score >= beta) - return score; - if (score > alpha) - alpha = score; - if (score > bestScore) - bestScore = score; - } - mask <<= 2; - } - return bestScore; - } - - int predictMinTreeScore(int board, int alpha, int beta, int depth) - { - const int state = getState(board); - if (state != PLAYING || depth >= _maxDepth) - { - return state; - } - int bestScore = INT_MAX; - int mask = 0b10; - depth++; - while (mask < MAXIMUM_MASK_VALUE) - { - if ((board & mask) == 0) - { - const int newBoard = board | mask; - const int score = predictMaxTreeScore(newBoard, alpha, beta, depth); - if (score <= alpha) - return score; - if (score < beta) - beta = score; - if (score < bestScore) - bestScore = score; - } - mask <<= 2; - } - return bestScore; - } - - int predictMaxDecision(int board) - { - int bestMove = 0; - int bestMoveScore = INT_MIN; - int mask = 0b11; - while (mask < MAXIMUM_MASK_VALUE) - { - if ((board & mask) == 0) - { - const int newBoard = board | mask; - const int score = predictMinTreeScore(newBoard, INT_MIN, INT_MAX, 1); - if (score > bestMoveScore) - { - bestMoveScore = score; - bestMove = newBoard; - } - } - mask <<= 2; - } - return bestMove; - } - - int predictMinDecision(int board) - { - int bestMove = 0; - int bestMoveScore = INT_MAX; - int mask = 0b10; - while (mask < MAXIMUM_MASK_VALUE) - { - if ((board & mask) == 0) - { - const int newBoard = board | mask; - const int score = predictMaxTreeScore(newBoard, INT_MIN, INT_MAX, 1); - if (score < bestMoveScore) - { - bestMoveScore = score; - bestMove = newBoard; - } - } - mask <<= 2; - } - return bestMove; - } - - std::vector predictMaxDecisions(int board) - { - std::vector bestMoves; - int bestMoveScore = INT_MIN; - int mask = 0b11; - while (mask < MAXIMUM_MASK_VALUE) - { - if ((board & mask) == 0) - { - const int newBoard = (board | mask); - const int score = predictMinTreeScore(newBoard, INT_MIN, INT_MAX, 1); - if (score > bestMoveScore) - { - bestMoveScore = score; - bestMoves.clear(); - bestMoves.push_back(newBoard); - } - else if (score == bestMoveScore) - { - bestMoves.push_back(newBoard); - } - } - mask <<= 2; - } - return bestMoves; - } - - std::vector predictMinDecisions(int board) - { - std::vector bestMoves; - int bestMoveScore = INT_MAX; - int mask = 0b10; - while (mask < MAXIMUM_MASK_VALUE) - { - if ((board & mask) == 0) - { - const int newBoard = (board | mask); - const int score = predictMaxTreeScore(newBoard, INT_MIN, INT_MAX, 1); - if (score < bestMoveScore) - { - bestMoveScore = score; - bestMoves.clear(); - bestMoves.push_back(newBoard); - } - else if (score == bestMoveScore) - { - bestMoves.push_back(newBoard); - } - } - mask <<= 2; - } - return bestMoves; - } - - int predict(int board, bool isMaxTurn, int maxDepth) - { - _maxDepth = maxDepth; - if (isMaxTurn) - { - return predictMaxDecision(board); - } - return predictMinDecision(board); - } - - std::vector predictAll(int board, bool isMaxTurn, int maxDepth) - { - _maxDepth = maxDepth; - if (isMaxTurn) - { - return predictMaxDecisions(board); - } - return predictMinDecisions(board); - } - - int getState(int board) - { - for(int winnerMask : winnerMasks) - { - if ((board & winnerMask) == winnerMask) - { - winnerMask = winnerMask >> 1; - int piecesXor = (winnerMask ^ board) & winnerMask; - if (piecesXor == 0) - return CROSS_WINS; - if (piecesXor == winnerMask) - return CIRCLE_WINS; - } - } - if ((board & DRAW_BOARD_STATE) == DRAW_BOARD_STATE) - return DRAW; - return PLAYING; - } -} +#include "TicTacToeMinimax.h" + +#define MAXIMUM_MASK_VALUE 0b1'00'00'00'00'00'00'00'00'00 +#define DRAW_BOARD_STATE 0b10'10'10'10'10'10'10'10'10 +const int winnerMasks[8] = +{ + 0b10'10'10'00'00'00'00'00'00, + 0b10'00'00'10'00'00'10'00'00, + 0b00'00'00'00'00'00'10'10'10, + 0b00'00'10'00'00'10'00'00'10, + 0b10'00'00'00'10'00'00'00'10, + 0b00'10'00'00'10'00'00'10'00, + 0b00'00'00'10'10'10'00'00'00, + 0b00'00'10'00'10'00'10'00'00, +}; +int _maxDepth; + +namespace TicTacToeMinimax +{ + int predictMinTreeScore(const int board, int alpha, int beta, int depth); + + int predictMaxTreeScore(const int board, int alpha, int beta, int depth) + { + const int state = getState(board); + if (state != PLAYING || depth >= _maxDepth) + { + return state; + } + int bestScore = INT_MIN; + int mask = 0b11; + depth++; + while (mask < MAXIMUM_MASK_VALUE) + { + if ((board & mask) == 0) + { + const int newBoard = board | mask; + const int score = predictMinTreeScore(newBoard, alpha, beta, depth); + if (score >= beta) + return score; + if (score > alpha) + alpha = score; + if (score > bestScore) + bestScore = score; + } + mask <<= 2; + } + return bestScore; + } + + int predictMinTreeScore(int board, int alpha, int beta, int depth) + { + const int state = getState(board); + if (state != PLAYING || depth >= _maxDepth) + { + return state; + } + int bestScore = INT_MAX; + int mask = 0b10; + depth++; + while (mask < MAXIMUM_MASK_VALUE) + { + if ((board & mask) == 0) + { + const int newBoard = board | mask; + const int score = predictMaxTreeScore(newBoard, alpha, beta, depth); + if (score <= alpha) + return score; + if (score < beta) + beta = score; + if (score < bestScore) + bestScore = score; + } + mask <<= 2; + } + return bestScore; + } + + int predictMaxDecision(int board) + { + int bestMove = 0; + int bestMoveScore = INT_MIN; + int mask = 0b11; + while (mask < MAXIMUM_MASK_VALUE) + { + if ((board & mask) == 0) + { + const int newBoard = board | mask; + const int score = predictMinTreeScore(newBoard, INT_MIN, INT_MAX, 1); + if (score > bestMoveScore) + { + bestMoveScore = score; + bestMove = newBoard; + } + } + mask <<= 2; + } + return bestMove; + } + + int predictMinDecision(int board) + { + int bestMove = 0; + int bestMoveScore = INT_MAX; + int mask = 0b10; + while (mask < MAXIMUM_MASK_VALUE) + { + if ((board & mask) == 0) + { + const int newBoard = board | mask; + const int score = predictMaxTreeScore(newBoard, INT_MIN, INT_MAX, 1); + if (score < bestMoveScore) + { + bestMoveScore = score; + bestMove = newBoard; + } + } + mask <<= 2; + } + return bestMove; + } + + std::vector predictMaxDecisions(int board) + { + std::vector bestMoves; + int bestMoveScore = INT_MIN; + int mask = 0b11; + while (mask < MAXIMUM_MASK_VALUE) + { + if ((board & mask) == 0) + { + const int newBoard = (board | mask); + const int score = predictMinTreeScore(newBoard, INT_MIN, INT_MAX, 1); + if (score > bestMoveScore) + { + bestMoveScore = score; + bestMoves.clear(); + bestMoves.push_back(newBoard); + } + else if (score == bestMoveScore) + { + bestMoves.push_back(newBoard); + } + } + mask <<= 2; + } + return bestMoves; + } + + std::vector predictMinDecisions(int board) + { + std::vector bestMoves; + int bestMoveScore = INT_MAX; + int mask = 0b10; + while (mask < MAXIMUM_MASK_VALUE) + { + if ((board & mask) == 0) + { + const int newBoard = (board | mask); + const int score = predictMaxTreeScore(newBoard, INT_MIN, INT_MAX, 1); + if (score < bestMoveScore) + { + bestMoveScore = score; + bestMoves.clear(); + bestMoves.push_back(newBoard); + } + else if (score == bestMoveScore) + { + bestMoves.push_back(newBoard); + } + } + mask <<= 2; + } + return bestMoves; + } + + int predict(int board, bool isMaxTurn, int maxDepth) + { + _maxDepth = maxDepth; + if (isMaxTurn) + { + return predictMaxDecision(board); + } + return predictMinDecision(board); + } + + std::vector predictAll(int board, bool isMaxTurn, int maxDepth) + { + _maxDepth = maxDepth; + if (isMaxTurn) + { + return predictMaxDecisions(board); + } + return predictMinDecisions(board); + } + + int getState(int board) + { + for(int winnerMask : winnerMasks) + { + if ((board & winnerMask) == winnerMask) + { + winnerMask = winnerMask >> 1; + int piecesXor = (winnerMask ^ board) & winnerMask; + if (piecesXor == 0) + return CROSS_WINS; + if (piecesXor == winnerMask) + return CIRCLE_WINS; + } + } + if ((board & DRAW_BOARD_STATE) == DRAW_BOARD_STATE) + return DRAW; + return PLAYING; + } +} diff --git a/AdversarialSearch/Minimax/TicTacToeMinimax.h b/AdversarialSearch/Minimax/TicTacToeMinimax.h index 80114ef..bad8eb0 100644 --- a/AdversarialSearch/Minimax/TicTacToeMinimax.h +++ b/AdversarialSearch/Minimax/TicTacToeMinimax.h @@ -1,16 +1,16 @@ -#pragma once - -#include -#include - -namespace TicTacToeMinimax -{ - const int DRAW = 0; - const int CROSS_WINS = 1; - const int CIRCLE_WINS = -1; - const int PLAYING = 2; - - int predict(int board, bool isMaxTurn, int maxDepth); - std::vector predictAll(int board, bool isMaxTurn, int maxDepth); - int getState(int board); -} +#pragma once + +#include +#include + +namespace TicTacToeMinimax +{ + const int DRAW = 0; + const int CROSS_WINS = 1; + const int CIRCLE_WINS = -1; + const int PLAYING = 2; + + int predict(int board, bool isMaxTurn, int maxDepth); + std::vector predictAll(int board, bool isMaxTurn, int maxDepth); + int getState(int board); +} diff --git a/Examples/MathLibrary/CMakeLists.txt b/Examples/MathLibrary/CMakeLists.txt index 7ea2a0a..2f7ab0a 100644 --- a/Examples/MathLibrary/CMakeLists.txt +++ b/Examples/MathLibrary/CMakeLists.txt @@ -1,14 +1,14 @@ -add_definitions(-D MATHLIBRARY_EXPORTS) - -# list of source files -set(FILES - MathLibrary.cpp - ) - -# shared and static libraries built from the same object files -add_library(MathLibraryShared SHARED ${FILES}) -add_library(MathLibraryStatic STATIC ${FILES}) - -# rename output name of DLL and LIB files -set_target_properties(MathLibraryShared PROPERTIES OUTPUT_NAME MathLibrary) -set_target_properties(MathLibraryStatic PROPERTIES OUTPUT_NAME MathLibrary) +add_definitions(-D MATHLIBRARY_EXPORTS) + +# list of source files +set(FILES + MathLibrary.cpp + ) + +# shared and static libraries built from the same object files +add_library(MathLibraryShared SHARED ${FILES}) +add_library(MathLibraryStatic STATIC ${FILES}) + +# rename output name of DLL and LIB files +set_target_properties(MathLibraryShared PROPERTIES OUTPUT_NAME MathLibrary) +set_target_properties(MathLibraryStatic PROPERTIES OUTPUT_NAME MathLibrary) diff --git a/Examples/MathLibrary/MathLibrary.cpp b/Examples/MathLibrary/MathLibrary.cpp index 1586af6..24f887c 100644 --- a/Examples/MathLibrary/MathLibrary.cpp +++ b/Examples/MathLibrary/MathLibrary.cpp @@ -1,16 +1,16 @@ -#include "MathLibrary.h" - -int sum(int lhs, int rhs) -{ - return lhs + rhs; -} - -int getOne() -{ - return 1; -} - -int main(int argc, char *argv[]) -{ - return 0; -} +#include "MathLibrary.h" + +int sum(int lhs, int rhs) +{ + return lhs + rhs; +} + +int getOne() +{ + return 1; +} + +int main(int argc, char *argv[]) +{ + return 0; +} diff --git a/Examples/MathLibrary/MathLibrary.h b/Examples/MathLibrary/MathLibrary.h index 1332f1e..09a9c7e 100644 --- a/Examples/MathLibrary/MathLibrary.h +++ b/Examples/MathLibrary/MathLibrary.h @@ -1,17 +1,16 @@ -// MathLibrary.h - Contains declarations of math functions -#pragma once - -#if defined(_WIN32) || defined(WIN32) - -# ifdef MATHLIBRARY_EXPORTS -# define MATHLIBRARY_API __declspec(dllexport) -# else -# define MATHLIBRARY_API __declspec(dllimport) -# endif -#else -# define MATHLIBRARY_API -#endif - -extern "C" MATHLIBRARY_API int sum(int lhs, int rhs); - -extern "C" MATHLIBRARY_API int getOne(); +// MathLibrary.h - Contains declarations of math functions +#pragma once + +#if defined(_WIN32) || defined(WIN32) +# ifdef MATHLIBRARY_EXPORTS +# define MATHLIBRARY_API __declspec(dllexport) +# else +# define MATHLIBRARY_API __declspec(dllimport) +# endif +#else +# define MATHLIBRARY_API +#endif + +extern "C" MATHLIBRARY_API int sum(int lhs, int rhs); + +extern "C" MATHLIBRARY_API int getOne(); diff --git a/Miscellaneous/BitMath.h b/Miscellaneous/BitMath.h index e2680ac..54d4c45 100644 --- a/Miscellaneous/BitMath.h +++ b/Miscellaneous/BitMath.h @@ -1,24 +1,24 @@ -#include - -namespace BitMath -{ - int reverseBits(const int value, int maxBits) - { - int reversedBits = 0; - maxBits = (maxBits % 2 == 0) ? maxBits : maxBits - 1; - int halfBits = (int) floor(maxBits / 2); - for (int bit = 0; bit < halfBits; bit++) - { - int transposeDifference = (maxBits - (bit * 2) - 1); - - int rBit = value & (1 << bit); - int lBit = rBit << transposeDifference; - reversedBits |= lBit; - - lBit = value & (1 << (maxBits - 1 - bit)); - rBit = lBit >> transposeDifference; - reversedBits |= rBit; - } - return reversedBits; - } -} +#include + +namespace BitMath +{ + int reverseBits(const int value, int maxBits) + { + int reversedBits = 0; + maxBits = (maxBits % 2 == 0) ? maxBits : maxBits - 1; + int halfBits = (int) floor(maxBits / 2); + for (int bit = 0; bit < halfBits; bit++) + { + int transposeDifference = (maxBits - (bit * 2) - 1); + + int rBit = value & (1 << bit); + int lBit = rBit << transposeDifference; + reversedBits |= lBit; + + lBit = value & (1 << (maxBits - 1 - bit)); + rBit = lBit >> transposeDifference; + reversedBits |= rBit; + } + return reversedBits; + } +} diff --git a/Miscellaneous/ISerializable.cpp b/Miscellaneous/ISerializable.cpp index 0eee1c4..b44b111 100644 --- a/Miscellaneous/ISerializable.cpp +++ b/Miscellaneous/ISerializable.cpp @@ -1,26 +1,26 @@ -#include "ISerializable.h" - -namespace serialization -{ - void loadWeightsFromFile(const char* filePath, ISerializable& target) - { - std::fstream stream; - stream.open (filePath, std::fstream::in | std::fstream::binary); - if (stream.good()) - { - target.Deserialize(stream); - } - stream.close(); - } - - void serializeToFile(const char* filePath, const ISerializable& target) - { - std::fstream stream; - stream.open (filePath, std::fstream::out | std::fstream::binary | std::fstream::trunc); - if (stream.good()) - { - target.Serialize(stream); - } - stream.close(); - } -} +#include "ISerializable.h" + +namespace serialization +{ + void loadWeightsFromFile(const char* filePath, ISerializable& target) + { + std::fstream stream; + stream.open (filePath, std::fstream::in | std::fstream::binary); + if (stream.good()) + { + target.Deserialize(stream); + } + stream.close(); + } + + void serializeToFile(const char* filePath, const ISerializable& target) + { + std::fstream stream; + stream.open (filePath, std::fstream::out | std::fstream::binary | std::fstream::trunc); + if (stream.good()) + { + target.Serialize(stream); + } + stream.close(); + } +} diff --git a/Miscellaneous/ISerializable.h b/Miscellaneous/ISerializable.h index e479033..25cda09 100644 --- a/Miscellaneous/ISerializable.h +++ b/Miscellaneous/ISerializable.h @@ -1,17 +1,17 @@ -#pragma once - -#include -#include - -namespace serialization -{ - class ISerializable - { - public: - virtual void Serialize(std::ostream& stream) const = 0; - virtual void Deserialize(std::istream& stream) = 0; - }; - - void loadWeightsFromFile(const char* filePath, ISerializable& target); - void serializeToFile(const char* filePath, const ISerializable& target); -} +#pragma once + +#include +#include + +namespace serialization +{ + class ISerializable + { + public: + virtual void Serialize(std::ostream& stream) const = 0; + virtual void Deserialize(std::istream& stream) = 0; + }; + + void loadWeightsFromFile(const char* filePath, ISerializable& target); + void serializeToFile(const char* filePath, const ISerializable& target); +} diff --git a/Miscellaneous/Matrix.cpp b/Miscellaneous/Matrix.cpp index cc79e01..8219ddd 100644 --- a/Miscellaneous/Matrix.cpp +++ b/Miscellaneous/Matrix.cpp @@ -1,147 +1,147 @@ -#include "Matrix.h" - -Matrix::Matrix(int rows, int columns) : _rows(rows), - _columns(columns) -{ - assert(columns >= 1 && rows >= 1); - int size = columns * rows; - _values = std::vector(size); -} - -Matrix::Matrix(const Matrix& source) -{ - _values = source._values; - _rows = source._rows; - _columns = source._columns; -} - -double Matrix::get(const int row, const int column) const -{ - int index = row * _columns + column; - assert(index < (_rows * _columns)); - double value = _values[index]; - return value; -} - -void Matrix::set(const int row, const int column, const double value) -{ - assert((row >= 0 && row < _rows) && (column >= 0 && column < _columns)); - const int index = row * _columns + column; - _values[index] = value; -} - -int Matrix::getRows() const -{ - return _rows; -} - -int Matrix::getColumns() const -{ - return _columns; -} - -void Matrix::print(std::ostream& stream) const -{ - print(stream, 3); -} - -void Matrix::print(std::ostream& stream, int decimalPlace) const -{ - int decimalFactor = static_cast(pow(10, decimalPlace)); - assert(decimalFactor > 0); - for (int r = 0; r < _rows; r++) - { - for (int c = 0; c < _columns; c++) - { - double value = get(r, c); - double truncatedValue = floor(value * decimalFactor) / decimalFactor; - stream << "[" << truncatedValue << "] "; - } - stream << std::endl; - } -} - -//STATIC FUNCTIONS -std::unique_ptr Matrix::multiply(const Matrix &left, const Matrix &right) -{ - std::unique_ptr result = std::make_unique(left.getRows(),right.getColumns()); - multiply(left, right, *result); - return result; -} - -void Matrix::multiply(const Matrix& left, const Matrix& right, Matrix& target) -{ - assert(left.getColumns() == right.getRows()); - for(int row = 0; row < target.getRows(); ++row) - { - for(int column = 0; column < target.getColumns(); ++column) - { - double sum = 0.0; - for(int leftColumn = 0; leftColumn < left.getColumns(); ++leftColumn) - { - sum += left.get(row, leftColumn) * right.get(leftColumn, column); - } - target.set(row, column, sum); - } - } -} - -void Matrix::multiply(Matrix& target, const double scalar) -{ - for (int r = 0; r < target.getRows(); ++r) - { - for (int c = 0; c < target.getColumns(); ++c) - { - double value = target.get(r,c) * scalar; - target.set(r,c,value); - } - } -} - -void Matrix::add(Matrix& target, const double value) -{ - for (int r = 0; r < target.getRows(); ++r) - { - for (int c = 0; c < target.getColumns(); ++c) - { - double sum = target.get(r, c) + value; - target.set(r, c, sum); - } - } -} - -std::unique_ptr Matrix::fromVectorRows(const std::vector& vector) -{ - int size = vector.size(); - std::unique_ptr matrix = std::make_unique(size, 1); - for (int i = 0; i < size; ++i) { - const double number = vector[i]; - matrix->set(i, 0, number); - } - return matrix; -} - -std::unique_ptr Matrix::fromVectorColumns(const std::vector& vector) -{ - int size = vector.size(); - std::unique_ptr matrix = std::make_unique(1, size); - for (int i = 0; i < size; ++i) { - const double number = vector[i]; - matrix->set(0, i, number); - } - return matrix; -} - -void Matrix::copy(const Matrix& source, Matrix& target) -{ - assert(source.getColumns() == target.getColumns()); - assert(source.getRows() == target.getRows()); - for (int r = 0; r < source.getRows(); ++r) - { - for (int c = 0; c < source.getRows(); ++c) - { - double value = source.get(r, c); - target.set(r, c, value); - } - } -} +#include "Matrix.h" + +Matrix::Matrix(int rows, int columns) : _rows(rows), + _columns(columns) +{ + assert(columns >= 1 && rows >= 1); + int size = columns * rows; + _values = std::vector(size); +} + +Matrix::Matrix(const Matrix& source) +{ + _values = source._values; + _rows = source._rows; + _columns = source._columns; +} + +double Matrix::get(const int row, const int column) const +{ + int index = row * _columns + column; + assert(index < (_rows * _columns)); + double value = _values[index]; + return value; +} + +void Matrix::set(const int row, const int column, const double value) +{ + assert((row >= 0 && row < _rows) && (column >= 0 && column < _columns)); + const int index = row * _columns + column; + _values[index] = value; +} + +int Matrix::getRows() const +{ + return _rows; +} + +int Matrix::getColumns() const +{ + return _columns; +} + +void Matrix::print(std::ostream& stream) const +{ + print(stream, 3); +} + +void Matrix::print(std::ostream& stream, int decimalPlace) const +{ + int decimalFactor = static_cast(pow(10, decimalPlace)); + assert(decimalFactor > 0); + for (int r = 0; r < _rows; r++) + { + for (int c = 0; c < _columns; c++) + { + double value = get(r, c); + double truncatedValue = floor(value * decimalFactor) / decimalFactor; + stream << "[" << truncatedValue << "] "; + } + stream << std::endl; + } +} + +//STATIC FUNCTIONS +std::unique_ptr Matrix::multiply(const Matrix &left, const Matrix &right) +{ + std::unique_ptr result = std::make_unique(left.getRows(),right.getColumns()); + multiply(left, right, *result); + return result; +} + +void Matrix::multiply(const Matrix& left, const Matrix& right, Matrix& target) +{ + assert(left.getColumns() == right.getRows()); + for(int row = 0; row < target.getRows(); ++row) + { + for(int column = 0; column < target.getColumns(); ++column) + { + double sum = 0.0; + for(int leftColumn = 0; leftColumn < left.getColumns(); ++leftColumn) + { + sum += left.get(row, leftColumn) * right.get(leftColumn, column); + } + target.set(row, column, sum); + } + } +} + +void Matrix::multiply(Matrix& target, const double scalar) +{ + for (int r = 0; r < target.getRows(); ++r) + { + for (int c = 0; c < target.getColumns(); ++c) + { + double value = target.get(r,c) * scalar; + target.set(r,c,value); + } + } +} + +void Matrix::add(Matrix& target, const double value) +{ + for (int r = 0; r < target.getRows(); ++r) + { + for (int c = 0; c < target.getColumns(); ++c) + { + double sum = target.get(r, c) + value; + target.set(r, c, sum); + } + } +} + +std::unique_ptr Matrix::fromVectorRows(const std::vector& vector) +{ + int size = vector.size(); + std::unique_ptr matrix = std::make_unique(size, 1); + for (int i = 0; i < size; ++i) { + const double number = vector[i]; + matrix->set(i, 0, number); + } + return matrix; +} + +std::unique_ptr Matrix::fromVectorColumns(const std::vector& vector) +{ + int size = vector.size(); + std::unique_ptr matrix = std::make_unique(1, size); + for (int i = 0; i < size; ++i) { + const double number = vector[i]; + matrix->set(0, i, number); + } + return matrix; +} + +void Matrix::copy(const Matrix& source, Matrix& target) +{ + assert(source.getColumns() == target.getColumns()); + assert(source.getRows() == target.getRows()); + for (int r = 0; r < source.getRows(); ++r) + { + for (int c = 0; c < source.getRows(); ++c) + { + double value = source.get(r, c); + target.set(r, c, value); + } + } +} diff --git a/Miscellaneous/Matrix.h b/Miscellaneous/Matrix.h index 786ea5f..156f9ef 100644 --- a/Miscellaneous/Matrix.h +++ b/Miscellaneous/Matrix.h @@ -1,34 +1,34 @@ -#pragma once -#include -#include -#include -#include -#include -#include -#include - -class Matrix -{ -private: - std::vector _values; - int _rows; - int _columns; - -public: - Matrix(int rows, int columns); - Matrix(const Matrix& source); - double get(int row, int column) const; - void set(int row, int column, double value); - int getRows() const; - int getColumns() const; - void print(std::ostream& stream) const; - void print(std::ostream& stream, int decimalPlace) const; - - static std::unique_ptr multiply(const Matrix& left, const Matrix& right); - static void multiply(const Matrix& left, const Matrix& right, Matrix& target); - static void multiply(Matrix& target, double scalar); - static void add(Matrix& target, double value); - static std::unique_ptr fromVectorRows(const std::vector& vector); - static std::unique_ptr fromVectorColumns(const std::vector& vector); - static void copy(const Matrix& source, Matrix& target); -}; +#pragma once +#include +#include +#include +#include +#include +#include +#include + +class Matrix +{ +private: + std::vector _values; + int _rows; + int _columns; + +public: + Matrix(int rows, int columns); + Matrix(const Matrix& source); + double get(int row, int column) const; + void set(int row, int column, double value); + int getRows() const; + int getColumns() const; + void print(std::ostream& stream) const; + void print(std::ostream& stream, int decimalPlace) const; + + static std::unique_ptr multiply(const Matrix& left, const Matrix& right); + static void multiply(const Matrix& left, const Matrix& right, Matrix& target); + static void multiply(Matrix& target, double scalar); + static void add(Matrix& target, double value); + static std::unique_ptr fromVectorRows(const std::vector& vector); + static std::unique_ptr fromVectorColumns(const std::vector& vector); + static void copy(const Matrix& source, Matrix& target); +}; diff --git a/Miscellaneous/Random.cpp b/Miscellaneous/Random.cpp index 38e67ea..5b20628 100644 --- a/Miscellaneous/Random.cpp +++ b/Miscellaneous/Random.cpp @@ -1,49 +1,49 @@ -#include "Random.h" - -#include -#include - -std::mt19937& GetEngine() -{ - static std::random_device randomDevice; - static std::mt19937 engine(randomDevice()); - return engine; -} - -void RandomUtils::SetRandomSeed() -{ - SetSeed(std::time(nullptr)); -} - -void RandomUtils::SetSeed(long long seed) -{ - GetEngine().seed(seed); -} - -int RandomUtils::Range(int min, int max) -{ - std::uniform_int_distribution<> intDistribution(min, max); - int value = intDistribution(GetEngine()); - return value; -} - -double RandomUtils::Range(double min, double max) -{ - std::uniform_real_distribution<> realDistribution(min, max); - double value = realDistribution(GetEngine()); - return value; -} - -int RandomUtils::Int() -{ - std::uniform_int_distribution<> realDistribution; - int value = realDistribution(GetEngine()); - return value; -} - -double RandomUtils::Double() -{ - std::uniform_real_distribution<> realDistribution; - double value = realDistribution(GetEngine()); - return value; -} +#include "Random.h" + +#include +#include + +std::mt19937& GetEngine() +{ + static std::random_device randomDevice; + static std::mt19937 engine(randomDevice()); + return engine; +} + +void RandomUtils::SetRandomSeed() +{ + SetSeed(std::time(nullptr)); +} + +void RandomUtils::SetSeed(long long seed) +{ + GetEngine().seed(seed); +} + +int RandomUtils::Range(int min, int max) +{ + std::uniform_int_distribution<> intDistribution(min, max); + int value = intDistribution(GetEngine()); + return value; +} + +double RandomUtils::Range(double min, double max) +{ + std::uniform_real_distribution<> realDistribution(min, max); + double value = realDistribution(GetEngine()); + return value; +} + +int RandomUtils::Int() +{ + std::uniform_int_distribution<> realDistribution; + int value = realDistribution(GetEngine()); + return value; +} + +double RandomUtils::Double() +{ + std::uniform_real_distribution<> realDistribution; + double value = realDistribution(GetEngine()); + return value; +} diff --git a/Miscellaneous/Random.h b/Miscellaneous/Random.h index 2eef2ef..672a7ac 100644 --- a/Miscellaneous/Random.h +++ b/Miscellaneous/Random.h @@ -1,16 +1,16 @@ -#pragma once - -namespace RandomUtils -{ - // seed - void SetRandomSeed(); - void SetSeed(long long seed); - - // random fixed and floating point values - int Int(); - double Double(); - - // range of values - int Range(int min, int max); - double Range(double min, double max); -} +#pragma once + +namespace RandomUtils +{ + // seed + void SetRandomSeed(); + void SetSeed(long long seed); + + // random fixed and floating point values + int Int(); + double Double(); + + // range of values + int Range(int min, int max); + double Range(double min, double max); +} diff --git a/Miscellaneous/Tests/CMakeLists.txt b/Miscellaneous/Tests/CMakeLists.txt index 1a43817..01ac72e 100644 --- a/Miscellaneous/Tests/CMakeLists.txt +++ b/Miscellaneous/Tests/CMakeLists.txt @@ -1,5 +1,5 @@ -set(FILES - ../Matrix.cpp - MatrixTestMain.cpp -) -add_executable(Matrix ${FILES}) +set(FILES + ../Matrix.cpp + MatrixTestMain.cpp +) +add_executable(Matrix ${FILES}) diff --git a/Miscellaneous/Tests/MatrixTestMain.cpp b/Miscellaneous/Tests/MatrixTestMain.cpp index fc17c46..edef05f 100644 --- a/Miscellaneous/Tests/MatrixTestMain.cpp +++ b/Miscellaneous/Tests/MatrixTestMain.cpp @@ -1,76 +1,76 @@ -#include "../Matrix.h" -#include - -using namespace std; - -int main() -{ - cout << "Matrix * Matrix" << endl; - auto left = make_unique(1,3); - left->set(0,0,1); - left->set(0,1,2); - left->set(0,2,3); - auto right = make_unique(3,1); - right->set(0,0,4); - right->set(1,0,5); - right->set(2,0,6); - auto dotProduct = Matrix::multiply(*left, *right); - dotProduct->print(cout); - - cout << endl; - left = make_unique(3,1); - left->set(0,0,4); - left->set(1,0,5); - left->set(2,0,6); - right = make_unique(1,3); - right->set(0,0,1); - right->set(0,1,2); - right->set(0,2,3); - dotProduct = Matrix::multiply(*left, *right); - dotProduct->print(cout); - - cout << endl; - left = make_unique(1,3); - left->set(0,0,3); - left->set(0,1,4); - left->set(0,2,2); - right = make_unique(3,4); - right->set(0,0,13); - right->set(0,1,9); - right->set(0,2,7); - right->set(0,3,15); - right->set(1,0,8); - right->set(1,1,7); - right->set(1,2,4); - right->set(1,3,6); - right->set(2,0,6); - right->set(2,1,4); - right->set(2,2,0); - right->set(2,3,3); - dotProduct = Matrix::multiply(*left, *right); - dotProduct->print(cout); - - cout << endl; - left = make_unique(3,3); - left->set(0,0,0.9); - left->set(0,1,0.3); - left->set(0,2,0.4); - left->set(1,0,0.2); - left->set(1,1,0.8); - left->set(1,2,0.2); - left->set(2,0,0.1); - left->set(2,1,0.5); - left->set(2,2,0.6); - right = make_unique(3, 1); - right->set(0,0,0.9); - right->set(1,0,0.1); - right->set(2,0,0.8); - dotProduct = Matrix::multiply(*left, *right); - dotProduct->print(cout); - - cout << endl << "Matrix * Scalar" << endl; - Matrix::multiply(*dotProduct, 2.0); - dotProduct->print(cout); - - return 0; -} +#include "../Matrix.h" +#include + +using namespace std; + +int main() +{ + cout << "Matrix * Matrix" << endl; + auto left = make_unique(1,3); + left->set(0,0,1); + left->set(0,1,2); + left->set(0,2,3); + auto right = make_unique(3,1); + right->set(0,0,4); + right->set(1,0,5); + right->set(2,0,6); + auto dotProduct = Matrix::multiply(*left, *right); + dotProduct->print(cout); + + cout << endl; + left = make_unique(3,1); + left->set(0,0,4); + left->set(1,0,5); + left->set(2,0,6); + right = make_unique(1,3); + right->set(0,0,1); + right->set(0,1,2); + right->set(0,2,3); + dotProduct = Matrix::multiply(*left, *right); + dotProduct->print(cout); + + cout << endl; + left = make_unique(1,3); + left->set(0,0,3); + left->set(0,1,4); + left->set(0,2,2); + right = make_unique(3,4); + right->set(0,0,13); + right->set(0,1,9); + right->set(0,2,7); + right->set(0,3,15); + right->set(1,0,8); + right->set(1,1,7); + right->set(1,2,4); + right->set(1,3,6); + right->set(2,0,6); + right->set(2,1,4); + right->set(2,2,0); + right->set(2,3,3); + dotProduct = Matrix::multiply(*left, *right); + dotProduct->print(cout); + + cout << endl; + left = make_unique(3,3); + left->set(0,0,0.9); + left->set(0,1,0.3); + left->set(0,2,0.4); + left->set(1,0,0.2); + left->set(1,1,0.8); + left->set(1,2,0.2); + left->set(2,0,0.1); + left->set(2,1,0.5); + left->set(2,2,0.6); + right = make_unique(3, 1); + right->set(0,0,0.9); + right->set(1,0,0.1); + right->set(2,0,0.8); + dotProduct = Matrix::multiply(*left, *right); + dotProduct->print(cout); + + cout << endl << "Matrix * Scalar" << endl; + Matrix::multiply(*dotProduct, 2.0); + dotProduct->print(cout); + + return 0; +} diff --git a/NeuralNetwork/Common/ActivationFunctions.cpp b/NeuralNetwork/Common/ActivationFunctions.cpp index 0356b21..fd708b6 100644 --- a/NeuralNetwork/Common/ActivationFunctions.cpp +++ b/NeuralNetwork/Common/ActivationFunctions.cpp @@ -1,102 +1,102 @@ -#include "ActivationFunctions.h" - -namespace NeuralNetwork -{ - namespace Activation - { - double thresholdValue = 0.5; - - double sigmoid(const double& value) - { - double result = 1.0 / (1.0 + std::exp(-value)); - return result; - } - - void sigmoid(std::vector& vector) - { - for (double& i : vector) - { - i = sigmoid(i); - } - } - - double sign(const double& value) - { - if (value > 0.0) - { - return 1.0; - } - return -1.0; - } - - void sign(std::vector& vector) - { - for (double& i : vector) - { - i = sign(i); - } - } - - double threshold(const double& value) - { - if (value >= thresholdValue) - { - return 1.0; - } - return 0.0; - } - - void threshold(std::vector& vector) - { - for (double& i : vector) - { - i = threshold(i); - } - } - - void setThreshold(const double&& threshold) - { - thresholdValue = threshold; - } - - void Apply(const EActivationFunctionType& activationFunctionType, double& value) - { - switch(activationFunctionType) - { - case EActivationFunctionType::None: break; - - case EActivationFunctionType::Sigmoid: - value = sigmoid(value); - break; - - case EActivationFunctionType::Sign: - value = sign(value); - break; - - case EActivationFunctionType::Threshold: - value = threshold(value); - break; - } - } - - void Apply(const EActivationFunctionType &activationFunctionType, std::vector &vector) - { - switch(activationFunctionType) - { - case EActivationFunctionType::None: break; - - case EActivationFunctionType::Sigmoid: - sigmoid(vector); - break; - - case EActivationFunctionType::Sign: - sign(vector); - break; - - case EActivationFunctionType::Threshold: - threshold(vector); - break; - } - } - } -} +#include "ActivationFunctions.h" + +namespace NeuralNetwork +{ + namespace Activation + { + double thresholdValue = 0.5; + + double sigmoid(const double& value) + { + double result = 1.0 / (1.0 + std::exp(-value)); + return result; + } + + void sigmoid(std::vector& vector) + { + for (double& i : vector) + { + i = sigmoid(i); + } + } + + double sign(const double& value) + { + if (value > 0.0) + { + return 1.0; + } + return -1.0; + } + + void sign(std::vector& vector) + { + for (double& i : vector) + { + i = sign(i); + } + } + + double threshold(const double& value) + { + if (value >= thresholdValue) + { + return 1.0; + } + return 0.0; + } + + void threshold(std::vector& vector) + { + for (double& i : vector) + { + i = threshold(i); + } + } + + void setThreshold(const double&& threshold) + { + thresholdValue = threshold; + } + + void Apply(const EActivationFunctionType& activationFunctionType, double& value) + { + switch(activationFunctionType) + { + case EActivationFunctionType::None: break; + + case EActivationFunctionType::Sigmoid: + value = sigmoid(value); + break; + + case EActivationFunctionType::Sign: + value = sign(value); + break; + + case EActivationFunctionType::Threshold: + value = threshold(value); + break; + } + } + + void Apply(const EActivationFunctionType &activationFunctionType, std::vector &vector) + { + switch(activationFunctionType) + { + case EActivationFunctionType::None: break; + + case EActivationFunctionType::Sigmoid: + sigmoid(vector); + break; + + case EActivationFunctionType::Sign: + sign(vector); + break; + + case EActivationFunctionType::Threshold: + threshold(vector); + break; + } + } + } +} diff --git a/NeuralNetwork/Common/ActivationFunctions.h b/NeuralNetwork/Common/ActivationFunctions.h index 4ecb870..387dbe5 100644 --- a/NeuralNetwork/Common/ActivationFunctions.h +++ b/NeuralNetwork/Common/ActivationFunctions.h @@ -1,31 +1,31 @@ -#pragma once - -#include -#include - -namespace NeuralNetwork -{ - namespace Activation - { - double sigmoid(const double&); - void sigmoid(std::vector&); - - double sign(const double&); - void sign(std::vector&); - - double threshold(const double&); - void threshold(std::vector&); - void setThreshold(const double&&); - - enum class EActivationFunctionType - { - None, - Sigmoid, - Sign, - Threshold, - }; - - void Apply(const EActivationFunctionType& activationFunctionType, double& value); - void Apply(const EActivationFunctionType& activationFunctionType, std::vector& vector); - } -} +#pragma once + +#include +#include + +namespace NeuralNetwork +{ + namespace Activation + { + double sigmoid(const double&); + void sigmoid(std::vector&); + + double sign(const double&); + void sign(std::vector&); + + double threshold(const double&); + void threshold(std::vector&); + void setThreshold(const double&&); + + enum class EActivationFunctionType + { + None, + Sigmoid, + Sign, + Threshold, + }; + + void Apply(const EActivationFunctionType& activationFunctionType, double& value); + void Apply(const EActivationFunctionType& activationFunctionType, std::vector& vector); + } +} diff --git a/NeuralNetwork/Common/Neuron.cpp b/NeuralNetwork/Common/Neuron.cpp index 0006e45..1484469 100644 --- a/NeuralNetwork/Common/Neuron.cpp +++ b/NeuralNetwork/Common/Neuron.cpp @@ -1,67 +1,67 @@ -#include "Neuron.h" - -#include - -using namespace NeuralNetwork; - -Neuron::Neuron(int weights) -{ - _weights = std::vector(weights); -} - -double Neuron::feedforward(const std::vector& inputs, double bias) const -{ - // calculate Neuron's output - double output = bias; - for (int i = 0; i < _weights.size(); ++i) - { - output += _weights[i] * inputs[i]; - } - - // apply the Activation function (if existing) - Activation::Apply(ActivationFunction, output); - - return output; -} - -void Neuron::randomizeWeights() -{ - for (double& weight : _weights) - { - weight = RandomUtils::Range(-1.0, 1.0); - - // fix absolute zero weights - if (weight == 0.0) - { - weight = 0.000001; - } - } -} - -void Neuron::Serialize(std::ostream &stream) const -{ - stream << std::setprecision(18) << std::hex; - for (const double& _weight : _weights) - { - stream << _weight << std::endl; - } -} - -void Neuron::Deserialize(std::istream& stream) -{ - stream >> std::setprecision(18) >> std::hex; - for (double& _weight : _weights) - { - stream >> _weight; - } -} - -const std::vector& Neuron::getWeights() const -{ - return _weights; -} - -void Neuron::setWeights(const std::vector& weights) -{ - _weights = weights; -} +#include "Neuron.h" + +#include + +using namespace NeuralNetwork; + +Neuron::Neuron(int weights) +{ + _weights = std::vector(weights); +} + +double Neuron::feedforward(const std::vector& inputs, double bias) const +{ + // calculate Neuron's output + double output = bias; + for (int i = 0; i < _weights.size(); ++i) + { + output += _weights[i] * inputs[i]; + } + + // apply the Activation function (if existing) + Activation::Apply(ActivationFunction, output); + + return output; +} + +void Neuron::randomizeWeights() +{ + for (double& weight : _weights) + { + weight = RandomUtils::Range(-1.0, 1.0); + + // fix absolute zero weights + if (weight == 0.0) + { + weight = 0.000001; + } + } +} + +void Neuron::Serialize(std::ostream &stream) const +{ + stream << std::setprecision(18) << std::hex; + for (const double& _weight : _weights) + { + stream << _weight << std::endl; + } +} + +void Neuron::Deserialize(std::istream& stream) +{ + stream >> std::setprecision(18) >> std::hex; + for (double& _weight : _weights) + { + stream >> _weight; + } +} + +const std::vector& Neuron::getWeights() const +{ + return _weights; +} + +void Neuron::setWeights(const std::vector& weights) +{ + _weights = weights; +} diff --git a/NeuralNetwork/Common/Neuron.h b/NeuralNetwork/Common/Neuron.h index c9e749e..d7e95e2 100644 --- a/NeuralNetwork/Common/Neuron.h +++ b/NeuralNetwork/Common/Neuron.h @@ -1,29 +1,29 @@ -#pragma once - -#include "../../Miscellaneous/Matrix.h" -#include "../../Miscellaneous/Random.h" -#include "../../Miscellaneous/ISerializable.h" -#include "ActivationFunctions.h" - -namespace NeuralNetwork -{ - class Neuron final : public serialization::ISerializable - { - private: - std::vector _weights; - - public: - Activation::EActivationFunctionType ActivationFunction = Activation::EActivationFunctionType::None; - - explicit Neuron(int weightsLength); - void randomizeWeights(); - - double feedforward(const std::vector& inputs, double bias) const; - void setWeights(const std::vector& weights); - const std::vector& getWeights() const; - - // serialization - void Serialize(std::ostream &stream) const override; - void Deserialize(std::istream &stream) override; - }; -} +#pragma once + +#include "../../Miscellaneous/Matrix.h" +#include "../../Miscellaneous/Random.h" +#include "../../Miscellaneous/ISerializable.h" +#include "ActivationFunctions.h" + +namespace NeuralNetwork +{ + class Neuron final : public serialization::ISerializable + { + private: + std::vector _weights; + + public: + Activation::EActivationFunctionType ActivationFunction = Activation::EActivationFunctionType::None; + + explicit Neuron(int weightsLength); + void randomizeWeights(); + + double feedforward(const std::vector& inputs, double bias) const; + void setWeights(const std::vector& weights); + const std::vector& getWeights() const; + + // serialization + void Serialize(std::ostream &stream) const override; + void Deserialize(std::istream &stream) override; + }; +} diff --git a/NeuralNetwork/MLP/Layer.cpp b/NeuralNetwork/MLP/Layer.cpp index 70b59ee..2b416b0 100644 --- a/NeuralNetwork/MLP/Layer.cpp +++ b/NeuralNetwork/MLP/Layer.cpp @@ -1,55 +1,55 @@ -#include "Layer.h" - -using namespace NeuralNetwork; - -Layer::Layer(size_t neurons, size_t weights) -{ - for(size_t i = 0; i < neurons; i++) - { - std::unique_ptr neuron = std::make_unique(weights); - neuron->ActivationFunction = Activation::EActivationFunctionType::Sigmoid; - neuron->randomizeWeights(); - _neurons.push_back(std::move(neuron)); - } -} - -void Layer::Serialize(std::ostream& stream) const -{ - for(const auto& _neuron : _neurons) - { - Neuron& neuron = *_neuron; - neuron.Serialize(stream); - } -} - -void Layer::Deserialize(std::istream &stream) -{ - for(auto& _neuron : _neurons) - { - Neuron& neuron = *_neuron; - neuron.Deserialize(stream); - } -} - -std::vector Layer::feedforward(const std::vector& inputs) -{ - std::vector outputs(_neurons.size()); - for(size_t n = 0; n < _neurons.size(); n++) - { - Neuron& neuron = *_neurons[n]; - outputs[n] = neuron.feedforward(inputs, 0.0); - } - return outputs; -} - -size_t Layer::getNeuronsLength() const -{ - return _neurons.size(); -} - -Neuron& Layer::getNeuron(int index) const -{ - assert(index < _neurons.size()); - Neuron& neuron = *_neurons[index]; - return neuron; -} +#include "Layer.h" + +using namespace NeuralNetwork; + +Layer::Layer(size_t neurons, size_t weights) +{ + for(size_t i = 0; i < neurons; i++) + { + std::unique_ptr neuron = std::make_unique(weights); + neuron->ActivationFunction = Activation::EActivationFunctionType::Sigmoid; + neuron->randomizeWeights(); + _neurons.push_back(std::move(neuron)); + } +} + +void Layer::Serialize(std::ostream& stream) const +{ + for(const auto& _neuron : _neurons) + { + Neuron& neuron = *_neuron; + neuron.Serialize(stream); + } +} + +void Layer::Deserialize(std::istream &stream) +{ + for(auto& _neuron : _neurons) + { + Neuron& neuron = *_neuron; + neuron.Deserialize(stream); + } +} + +std::vector Layer::feedforward(const std::vector& inputs) +{ + std::vector outputs(_neurons.size()); + for(size_t n = 0; n < _neurons.size(); n++) + { + Neuron& neuron = *_neurons[n]; + outputs[n] = neuron.feedforward(inputs, 0.0); + } + return outputs; +} + +size_t Layer::getNeuronsLength() const +{ + return _neurons.size(); +} + +Neuron& Layer::getNeuron(int index) const +{ + assert(index < _neurons.size()); + Neuron& neuron = *_neurons[index]; + return neuron; +} diff --git a/NeuralNetwork/MLP/Layer.h b/NeuralNetwork/MLP/Layer.h index a1f29a1..e299d4d 100644 --- a/NeuralNetwork/MLP/Layer.h +++ b/NeuralNetwork/MLP/Layer.h @@ -1,23 +1,23 @@ -#pragma once - -#include -#include "../../Miscellaneous/ISerializable.h" -#include "../Common/ActivationFunctions.h" -#include "../Common/Neuron.h" - -namespace NeuralNetwork -{ - class Layer final : public serialization::ISerializable - { - private: - std::vector> _neurons; - - public: - Layer(size_t neurons, size_t weights); - std::vector feedforward(const std::vector& inputs); - void Serialize(std::ostream &stream) const override; - void Deserialize(std::istream &stream) override; - Neuron& getNeuron(int index) const; - size_t getNeuronsLength() const; - }; -} +#pragma once + +#include +#include "../../Miscellaneous/ISerializable.h" +#include "../Common/ActivationFunctions.h" +#include "../Common/Neuron.h" + +namespace NeuralNetwork +{ + class Layer final : public serialization::ISerializable + { + private: + std::vector> _neurons; + + public: + Layer(size_t neurons, size_t weights); + std::vector feedforward(const std::vector& inputs); + void Serialize(std::ostream &stream) const override; + void Deserialize(std::istream &stream) override; + Neuron& getNeuron(int index) const; + size_t getNeuronsLength() const; + }; +} diff --git a/NeuralNetwork/MLP/MLP.cpp b/NeuralNetwork/MLP/MLP.cpp index 303011e..82ef109 100644 --- a/NeuralNetwork/MLP/MLP.cpp +++ b/NeuralNetwork/MLP/MLP.cpp @@ -1,143 +1,143 @@ -#include "MLP.h" - -using namespace NeuralNetwork; - -MultiLayerPerceptron::MultiLayerPerceptron(int inputsLength, const std::vector &neuronsByLayerArr) -{ - int layersLength = neuronsByLayerArr.size(); - _inputsByLayer = std::vector>(layersLength); - int previousLayerLength = inputsLength + 1; // insert bias neuron - for (int l = 0; l < layersLength; l++) - { - _inputsByLayer[l] = std::vector(previousLayerLength); - int weightsLength = neuronsByLayerArr[l]; - if (l < (layersLength - 1)) - { - weightsLength++; // insert bias neuron - } - std::unique_ptr layer = std::make_unique(weightsLength, previousLayerLength); - _layers.push_back(std::move(layer)); - previousLayerLength = weightsLength; - } -} - -std::vector MultiLayerPerceptron::Feedforward(const std::vector& inputs) -{ - std::vector outputs(inputs); - for (int l = 0; l < _layers.size(); l++) - { - const std::unique_ptr &layer = _layers[l]; - outputs.emplace_back(1.0); // insert bias - _inputsByLayer[l] = outputs; - outputs = layer->feedforward(outputs); - } - - // apply activation function - Activation::Apply(ActivationFunction, outputs); - - return outputs; -} - -void GetPreviousLayerErrors(const Neuron& neuron, double error, std::vector& previousLayerErrors) -{ - const std::vector& weights = neuron.getWeights(); - previousLayerErrors.resize(weights.size()); - - double weightsSum = 0.0; - for (double weight : weights) - { - weightsSum += weight; - } - - for (int w = 0; w < weights.size(); ++w) - { - double percentage = weights[w] / weightsSum; - previousLayerErrors[w] += (error * percentage); - } -} - -void UpdateNeuronWeights(Neuron &neuron, const std::vector &inputs, const double output, const double error, const double learningRate) -{ - auto& weights = const_cast&>(neuron.getWeights()); - for (size_t w = 0; w < weights.size(); ++w) - { - double weightDifference = -(error * (output * (1.0 - output)) * inputs[w]); - weightDifference = -(learningRate * weightDifference); - weights[w] += weightDifference; - } -} - -void MultiLayerPerceptron::BackPropagate(const std::vector& inputs, const std::vector& targets, const double learningRate) -{ - std::vector outputs = Feedforward(inputs); - std::vector errors(outputs.size()); - for (int o = 0; o < outputs.size(); ++o) - { - errors[o] = targets[o] - outputs[o]; - } - // BackPropagate errors - for (int l = (_layers.size() - 1); l >= 0; --l) - { - // reset previous layer errors - size_t size = inputs.size(); - if (l > 0) - { - const std::unique_ptr &layer = _layers[l - 1]; - size = layer->getNeuronsLength(); - } - std::vector previousLayerErrors(size); - - //BackPropagate errors - Layer &layer = GetLayer(l); - std::vector layerInputs = _inputsByLayer[l]; - std::vector layerOutputs = layer.feedforward(layerInputs); - for (int n = 0; n < layer.getNeuronsLength(); ++n) - { - Neuron &neuron = layer.getNeuron(n); - GetPreviousLayerErrors(neuron, errors[n], previousLayerErrors); - UpdateNeuronWeights(neuron, layerInputs, layerOutputs[n], errors[n], learningRate); - } - errors = previousLayerErrors; - } -} - -size_t MultiLayerPerceptron::GetLayersLength() const -{ - return _layers.size(); -} - -Layer &MultiLayerPerceptron::GetLayer(const size_t index) const -{ - assert(index < _layers.size()); - const std::unique_ptr &layer = _layers[index]; - return *layer; -} - -void MultiLayerPerceptron::Serialize(std::ostream &stream) const -{ - for (const auto& layer : _layers) - { - layer->Serialize(stream); - } -} - -void MultiLayerPerceptron::Deserialize(std::istream &stream) -{ - for (auto& layer : _layers) - { - layer->Deserialize(stream); - } -} - -void MultiLayerPerceptron::RandomizeWeights() const -{ - for (int l = 0; l < GetLayersLength(); ++l) - { - Layer &layer = GetLayer(l); - for (int n = 0; n < layer.getNeuronsLength(); ++n) - { - Neuron &neuron = layer.getNeuron(n); - neuron.randomizeWeights(); - } - } -} +#include "MLP.h" + +using namespace NeuralNetwork; + +MultiLayerPerceptron::MultiLayerPerceptron(int inputsLength, const std::vector &neuronsByLayerArr) +{ + int layersLength = neuronsByLayerArr.size(); + _inputsByLayer = std::vector>(layersLength); + int previousLayerLength = inputsLength + 1; // insert bias neuron + for (int l = 0; l < layersLength; l++) + { + _inputsByLayer[l] = std::vector(previousLayerLength); + int weightsLength = neuronsByLayerArr[l]; + if (l < (layersLength - 1)) + { + weightsLength++; // insert bias neuron + } + std::unique_ptr layer = std::make_unique(weightsLength, previousLayerLength); + _layers.push_back(std::move(layer)); + previousLayerLength = weightsLength; + } +} + +std::vector MultiLayerPerceptron::Feedforward(const std::vector& inputs) +{ + std::vector outputs(inputs); + for (int l = 0; l < _layers.size(); l++) + { + const std::unique_ptr &layer = _layers[l]; + outputs.emplace_back(1.0); // insert bias + _inputsByLayer[l] = outputs; + outputs = layer->feedforward(outputs); + } + + // apply activation function + Activation::Apply(ActivationFunction, outputs); + + return outputs; +} + +void GetPreviousLayerErrors(const Neuron& neuron, double error, std::vector& previousLayerErrors) +{ + const std::vector& weights = neuron.getWeights(); + previousLayerErrors.resize(weights.size()); + + double weightsSum = 0.0; + for (double weight : weights) + { + weightsSum += weight; + } + + for (int w = 0; w < weights.size(); ++w) + { + double percentage = weights[w] / weightsSum; + previousLayerErrors[w] += (error * percentage); + } +} + +void UpdateNeuronWeights(Neuron &neuron, const std::vector &inputs, const double output, const double error, const double learningRate) +{ + auto& weights = const_cast&>(neuron.getWeights()); + for (size_t w = 0; w < weights.size(); ++w) + { + double weightDifference = -(error * (output * (1.0 - output)) * inputs[w]); + weightDifference = -(learningRate * weightDifference); + weights[w] += weightDifference; + } +} + +void MultiLayerPerceptron::BackPropagate(const std::vector& inputs, const std::vector& targets, const double learningRate) +{ + std::vector outputs = Feedforward(inputs); + std::vector errors(outputs.size()); + for (int o = 0; o < outputs.size(); ++o) + { + errors[o] = targets[o] - outputs[o]; + } + // BackPropagate errors + for (int l = (_layers.size() - 1); l >= 0; --l) + { + // reset previous layer errors + size_t size = inputs.size(); + if (l > 0) + { + const std::unique_ptr &layer = _layers[l - 1]; + size = layer->getNeuronsLength(); + } + std::vector previousLayerErrors(size); + + //BackPropagate errors + Layer &layer = GetLayer(l); + std::vector layerInputs = _inputsByLayer[l]; + std::vector layerOutputs = layer.feedforward(layerInputs); + for (int n = 0; n < layer.getNeuronsLength(); ++n) + { + Neuron &neuron = layer.getNeuron(n); + GetPreviousLayerErrors(neuron, errors[n], previousLayerErrors); + UpdateNeuronWeights(neuron, layerInputs, layerOutputs[n], errors[n], learningRate); + } + errors = previousLayerErrors; + } +} + +size_t MultiLayerPerceptron::GetLayersLength() const +{ + return _layers.size(); +} + +Layer &MultiLayerPerceptron::GetLayer(const size_t index) const +{ + assert(index < _layers.size()); + const std::unique_ptr &layer = _layers[index]; + return *layer; +} + +void MultiLayerPerceptron::Serialize(std::ostream &stream) const +{ + for (const auto& layer : _layers) + { + layer->Serialize(stream); + } +} + +void MultiLayerPerceptron::Deserialize(std::istream &stream) +{ + for (auto& layer : _layers) + { + layer->Deserialize(stream); + } +} + +void MultiLayerPerceptron::RandomizeWeights() const +{ + for (int l = 0; l < GetLayersLength(); ++l) + { + Layer &layer = GetLayer(l); + for (int n = 0; n < layer.getNeuronsLength(); ++n) + { + Neuron &neuron = layer.getNeuron(n); + neuron.randomizeWeights(); + } + } +} diff --git a/NeuralNetwork/MLP/MLP.h b/NeuralNetwork/MLP/MLP.h index 2c888fb..4171154 100644 --- a/NeuralNetwork/MLP/MLP.h +++ b/NeuralNetwork/MLP/MLP.h @@ -1,26 +1,26 @@ -#pragma once - -#include "../Common/ActivationFunctions.h" -#include "Layer.h" - -namespace NeuralNetwork -{ - class MultiLayerPerceptron final : public serialization::ISerializable - { - private: - std::vector> _layers; - std::vector> _inputsByLayer; - - public: - Activation::EActivationFunctionType ActivationFunction = Activation::EActivationFunctionType::None; - - MultiLayerPerceptron(int inputsLength, const std::vector &neuronsByLayerArr); - std::vector Feedforward(const std::vector& inputs); - void BackPropagate(const std::vector& inputs, const std::vector& targets, const double learningRate); - void Serialize(std::ostream& stream) const override; - void Deserialize(std::istream& stream) override; - size_t GetLayersLength() const; - Layer& GetLayer(size_t index) const; - void RandomizeWeights() const; - }; -} +#pragma once + +#include "../Common/ActivationFunctions.h" +#include "Layer.h" + +namespace NeuralNetwork +{ + class MultiLayerPerceptron final : public serialization::ISerializable + { + private: + std::vector> _layers; + std::vector> _inputsByLayer; + + public: + Activation::EActivationFunctionType ActivationFunction = Activation::EActivationFunctionType::None; + + MultiLayerPerceptron(int inputsLength, const std::vector &neuronsByLayerArr); + std::vector Feedforward(const std::vector& inputs); + void BackPropagate(const std::vector& inputs, const std::vector& targets, const double learningRate); + void Serialize(std::ostream& stream) const override; + void Deserialize(std::istream& stream) override; + size_t GetLayersLength() const; + Layer& GetLayer(size_t index) const; + void RandomizeWeights() const; + }; +} diff --git a/NeuralNetwork/MLP/Tests/CMakeLists.txt b/NeuralNetwork/MLP/Tests/CMakeLists.txt index 74dab09..7d1b8e7 100644 --- a/NeuralNetwork/MLP/Tests/CMakeLists.txt +++ b/NeuralNetwork/MLP/Tests/CMakeLists.txt @@ -1,13 +1,13 @@ -# uncomment to persist weights from MLP tests -#add_definitions(-DPERSIST_WEIGHTS) - -set(FILES - ../../../Miscellaneous/Random.cpp - ../../../Miscellaneous/ISerializable.cpp - ../../Common/ActivationFunctions.cpp - ../../Common/Neuron.cpp - ../Layer.cpp - ../MLP.cpp - MLPTestMain.cpp -) -add_executable(MultiLayerPerceptron ${FILES}) +# uncomment to persist weights from MLP tests +#add_definitions(-DPERSIST_WEIGHTS) + +set(FILES + ../../../Miscellaneous/Random.cpp + ../../../Miscellaneous/ISerializable.cpp + ../../Common/ActivationFunctions.cpp + ../../Common/Neuron.cpp + ../Layer.cpp + ../MLP.cpp + MLPTestMain.cpp +) +add_executable(MultiLayerPerceptron ${FILES}) diff --git a/NeuralNetwork/MLP/Tests/MLPTestMain.cpp b/NeuralNetwork/MLP/Tests/MLPTestMain.cpp index 146df9b..e131038 100644 --- a/NeuralNetwork/MLP/Tests/MLPTestMain.cpp +++ b/NeuralNetwork/MLP/Tests/MLPTestMain.cpp @@ -1,198 +1,198 @@ -#include "../MLP.h" - -#include - -using namespace NeuralNetwork; - -void ApplySupervisedLearning(MultiLayerPerceptron& mlp, - // training inputs and outputs - const std::vector>& trainingInputs, - const std::vector>& trainingOutputs) -{ - size_t totalIterations = 0L; - size_t iteration = 0L; - size_t trainingIndex = 0; - size_t rightGuesses = 0; - - while (rightGuesses < trainingInputs.size()) - { - const std::vector& inputs = trainingInputs[trainingIndex]; - const std::vector& expectedOutputs = trainingOutputs[trainingIndex]; - const std::vector outputs = mlp.Feedforward(inputs); - - if (outputs == expectedOutputs) - { - rightGuesses++; - } - else - { - iteration++; - totalIterations++; - mlp.BackPropagate(inputs, expectedOutputs, 0.1); - rightGuesses = 0; - - // if number of iteration exploits a threshold - if (iteration >= 100000ll) - { - iteration = 0l; - mlp.RandomizeWeights(); - } - } - trainingIndex++; - trainingIndex %= trainingInputs.size(); - } - printf("The network has been trained in '%zu' iterations.\n", totalIterations); -} - -void TEST_MLP_XOR_2_2_1() -{ - std::cout << __FUNCTION__ << "... "; - std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - Activation::setThreshold(0.7); - RandomUtils::SetRandomSeed(); - std::vector neuronsByLayer({2,1}); - MultiLayerPerceptron mlp(2, neuronsByLayer); - mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; - mlp.RandomizeWeights(); -#ifdef PERSIST_WEIGHTS - loadWeightsFromFile(weightsFilePath.c_str(), mlp); -#endif - - std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; - std::vector> trainingOutputs = { {0.0}, {1.0}, {1.0}, {0.0} }; - ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); - -#ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), mlp); -#endif -} - -void TEST_MLP_XOR_2_3_1() -{ - std::cout << __FUNCTION__ << "... "; - std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - Activation::setThreshold(0.7); - RandomUtils::SetRandomSeed(); - std::vector neuronsByLayer({3,1}); - MultiLayerPerceptron mlp(2, neuronsByLayer); - mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; - mlp.RandomizeWeights(); - -#ifdef PERSIST_WEIGHTS - loadWeightsFromFile(weightsFilePath.c_str(), mlp); -#endif - - std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; - std::vector> trainingOutputs = { {0.0}, {1.0}, {1.0}, {0.0} }; - ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); - -#ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), mlp); -#endif -} - -void TEST_MLP_XOR_2_3_3_3_1() -{ - std::cout << __FUNCTION__ << "... "; - std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - Activation::setThreshold(0.7); - RandomUtils::SetRandomSeed(); - std::vector neuronsByLayer({3, 3, 3, 1}); - MultiLayerPerceptron mlp(2, neuronsByLayer); - mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; - mlp.RandomizeWeights(); - -#ifdef PERSIST_WEIGHTS - loadWeightsFromFile(weightsFilePath.c_str(), mlp); -#endif - - std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; - std::vector> trainingOutputs = { {0.0}, {1.0}, {1.0}, {0.0} }; - ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); - -#ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), mlp); -#endif -} - -void TEST_MLP_XOR_2_3_2() -{ - std::cout << __FUNCTION__ << "... "; - std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - Activation::setThreshold(0.7); - RandomUtils::SetRandomSeed(); - std::vector neuronsByLayer({3, 2}); - MultiLayerPerceptron mlp(2, neuronsByLayer); - mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; - mlp.RandomizeWeights(); - -#ifdef PERSIST_WEIGHTS - loadWeightsFromFile(weightsFilePath.c_str(), mlp); -#endif - - std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; - std::vector> trainingOutputs = { {1.0, 0.0}, {0.0, 1.0}, {0.0, 1.0}, {1.0, 0.0} }; - ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); - -#ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), mlp); -#endif -} - -void TEST_MLP_number_recognition_digital_clock_0_to_9() -{ - std::cout << __FUNCTION__ << "... "; - std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - Activation::setThreshold(0.7); - RandomUtils::SetRandomSeed(); - std::vector neuronsByLayer({10, 10}); - MultiLayerPerceptron mlp(7, neuronsByLayer); - mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; - mlp.RandomizeWeights(); - -#ifdef PERSIST_WEIGHTS - loadWeightsFromFile(weightsFilePath.c_str(), mlp); -#endif - - std::vector> trainingInputs = - { - { 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0 }, //0 - { 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0 }, //1 - { 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0 }, //2 - { 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0 }, //3 - { 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0 }, //4 - { 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0 }, //5 - { 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0 }, //6 - { 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0 }, //7 - { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 }, //8 - { 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0 }, //9 - }; - std::vector> trainingOutputs = - { - { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, - { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, - { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, - { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, - { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, - { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, - { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, - { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, - { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, - { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }, - }; - ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); - -#ifdef PERSIST_WEIGHTS - serialization::serializeToFile(weightsFilePath.c_str(), mlp); -#endif -} - -int main() -{ - TEST_MLP_XOR_2_2_1(); - TEST_MLP_XOR_2_3_1(); - TEST_MLP_XOR_2_3_3_3_1(); - TEST_MLP_XOR_2_3_2(); - TEST_MLP_number_recognition_digital_clock_0_to_9(); - return 0; -} +#include "../MLP.h" + +#include + +using namespace NeuralNetwork; + +void ApplySupervisedLearning(MultiLayerPerceptron& mlp, + // training inputs and outputs + const std::vector>& trainingInputs, + const std::vector>& trainingOutputs) +{ + size_t totalIterations = 0L; + size_t iteration = 0L; + size_t trainingIndex = 0; + size_t rightGuesses = 0; + + while (rightGuesses < trainingInputs.size()) + { + const std::vector& inputs = trainingInputs[trainingIndex]; + const std::vector& expectedOutputs = trainingOutputs[trainingIndex]; + const std::vector outputs = mlp.Feedforward(inputs); + + if (outputs == expectedOutputs) + { + rightGuesses++; + } + else + { + iteration++; + totalIterations++; + mlp.BackPropagate(inputs, expectedOutputs, 0.1); + rightGuesses = 0; + + // if number of iteration exploits a threshold + if (iteration >= 100000ll) + { + iteration = 0l; + mlp.RandomizeWeights(); + } + } + trainingIndex++; + trainingIndex %= trainingInputs.size(); + } + printf("The network has been trained in '%zu' iterations.\n", totalIterations); +} + +void TEST_MLP_XOR_2_2_1() +{ + std::cout << __FUNCTION__ << "... "; + std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); + Activation::setThreshold(0.7); + RandomUtils::SetRandomSeed(); + std::vector neuronsByLayer({2,1}); + MultiLayerPerceptron mlp(2, neuronsByLayer); + mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; + mlp.RandomizeWeights(); +#ifdef PERSIST_WEIGHTS + loadWeightsFromFile(weightsFilePath.c_str(), mlp); +#endif + + std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; + std::vector> trainingOutputs = { {0.0}, {1.0}, {1.0}, {0.0} }; + ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); + +#ifdef PERSIST_WEIGHTS + serializeToFile(weightsFilePath.c_str(), mlp); +#endif +} + +void TEST_MLP_XOR_2_3_1() +{ + std::cout << __FUNCTION__ << "... "; + std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); + Activation::setThreshold(0.7); + RandomUtils::SetRandomSeed(); + std::vector neuronsByLayer({3,1}); + MultiLayerPerceptron mlp(2, neuronsByLayer); + mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; + mlp.RandomizeWeights(); + +#ifdef PERSIST_WEIGHTS + loadWeightsFromFile(weightsFilePath.c_str(), mlp); +#endif + + std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; + std::vector> trainingOutputs = { {0.0}, {1.0}, {1.0}, {0.0} }; + ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); + +#ifdef PERSIST_WEIGHTS + serializeToFile(weightsFilePath.c_str(), mlp); +#endif +} + +void TEST_MLP_XOR_2_3_3_3_1() +{ + std::cout << __FUNCTION__ << "... "; + std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); + Activation::setThreshold(0.7); + RandomUtils::SetRandomSeed(); + std::vector neuronsByLayer({3, 3, 3, 1}); + MultiLayerPerceptron mlp(2, neuronsByLayer); + mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; + mlp.RandomizeWeights(); + +#ifdef PERSIST_WEIGHTS + loadWeightsFromFile(weightsFilePath.c_str(), mlp); +#endif + + std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; + std::vector> trainingOutputs = { {0.0}, {1.0}, {1.0}, {0.0} }; + ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); + +#ifdef PERSIST_WEIGHTS + serializeToFile(weightsFilePath.c_str(), mlp); +#endif +} + +void TEST_MLP_XOR_2_3_2() +{ + std::cout << __FUNCTION__ << "... "; + std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); + Activation::setThreshold(0.7); + RandomUtils::SetRandomSeed(); + std::vector neuronsByLayer({3, 2}); + MultiLayerPerceptron mlp(2, neuronsByLayer); + mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; + mlp.RandomizeWeights(); + +#ifdef PERSIST_WEIGHTS + loadWeightsFromFile(weightsFilePath.c_str(), mlp); +#endif + + std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; + std::vector> trainingOutputs = { {1.0, 0.0}, {0.0, 1.0}, {0.0, 1.0}, {1.0, 0.0} }; + ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); + +#ifdef PERSIST_WEIGHTS + serializeToFile(weightsFilePath.c_str(), mlp); +#endif +} + +void TEST_MLP_number_recognition_digital_clock_0_to_9() +{ + std::cout << __FUNCTION__ << "... "; + std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); + Activation::setThreshold(0.7); + RandomUtils::SetRandomSeed(); + std::vector neuronsByLayer({10, 10}); + MultiLayerPerceptron mlp(7, neuronsByLayer); + mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; + mlp.RandomizeWeights(); + +#ifdef PERSIST_WEIGHTS + loadWeightsFromFile(weightsFilePath.c_str(), mlp); +#endif + + std::vector> trainingInputs = + { + { 1.0, 1.0, 1.0, 0.0, 1.0, 1.0, 1.0 }, //0 + { 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0 }, //1 + { 1.0, 0.0, 1.0, 1.0, 1.0, 0.0, 1.0 }, //2 + { 1.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0 }, //3 + { 0.0, 1.0, 1.0, 1.0, 0.0, 1.0, 0.0 }, //4 + { 1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 1.0 }, //5 + { 1.0, 1.0, 0.0, 1.0, 1.0, 1.0, 1.0 }, //6 + { 1.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0 }, //7 + { 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 }, //8 + { 1.0, 1.0, 1.0, 1.0, 0.0, 1.0, 1.0 }, //9 + }; + std::vector> trainingOutputs = + { + { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, + { 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, + { 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, + { 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, + { 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, + { 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 }, + { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0 }, + { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0 }, + { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0 }, + { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }, + }; + ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); + +#ifdef PERSIST_WEIGHTS + serialization::serializeToFile(weightsFilePath.c_str(), mlp); +#endif +} + +int main() +{ + TEST_MLP_XOR_2_2_1(); + TEST_MLP_XOR_2_3_1(); + TEST_MLP_XOR_2_3_3_3_1(); + TEST_MLP_XOR_2_3_2(); + TEST_MLP_number_recognition_digital_clock_0_to_9(); + return 0; +} diff --git a/NeuralNetwork/Perceptron/Perceptron.cpp b/NeuralNetwork/Perceptron/Perceptron.cpp index cc082d9..9227be8 100644 --- a/NeuralNetwork/Perceptron/Perceptron.cpp +++ b/NeuralNetwork/Perceptron/Perceptron.cpp @@ -1,51 +1,51 @@ -#include "Perceptron.h" - -using namespace NeuralNetwork; - -Perceptron::Perceptron(int weights) : _bias(0.0) -{ - _neuron = std::make_unique(weights); -} - -double Perceptron::feedforward(const std::vector& inputs) -{ - double output = _neuron->feedforward(inputs, _bias); - return output; -} - -void Perceptron::train(const std::vector& inputs, double target, double learningRate) -{ - double output = feedforward(inputs); - double error = (target - output); - std::vector weights = _neuron->getWeights(); - for (int c = 0; c < weights.size(); ++c) - { - weights[c] += inputs[c] * error * learningRate; - } - _neuron->setWeights(weights); - _bias += error; -} - -void Perceptron::Serialize(std::ostream &stream) const -{ - stream << std::hex; - stream << _bias << std::endl; - _neuron->Serialize(stream); -} - -void Perceptron::Deserialize(std::istream &stream) -{ - stream >> _bias; - _neuron->Deserialize(stream); -} - -void Perceptron::randomizeWeights() -{ - _neuron->randomizeWeights(); - _bias = RandomUtils::Range(-0.1, 0.1); -} - -Neuron *Perceptron::GetNeuron() -{ - return _neuron.get(); -} +#include "Perceptron.h" + +using namespace NeuralNetwork; + +Perceptron::Perceptron(int weights) : _bias(0.0) +{ + _neuron = std::make_unique(weights); +} + +double Perceptron::feedforward(const std::vector& inputs) +{ + double output = _neuron->feedforward(inputs, _bias); + return output; +} + +void Perceptron::train(const std::vector& inputs, double target, double learningRate) +{ + double output = feedforward(inputs); + double error = (target - output); + std::vector weights = _neuron->getWeights(); + for (int c = 0; c < weights.size(); ++c) + { + weights[c] += inputs[c] * error * learningRate; + } + _neuron->setWeights(weights); + _bias += error; +} + +void Perceptron::Serialize(std::ostream &stream) const +{ + stream << std::hex; + stream << _bias << std::endl; + _neuron->Serialize(stream); +} + +void Perceptron::Deserialize(std::istream &stream) +{ + stream >> _bias; + _neuron->Deserialize(stream); +} + +void Perceptron::randomizeWeights() +{ + _neuron->randomizeWeights(); + _bias = RandomUtils::Range(-0.1, 0.1); +} + +Neuron *Perceptron::GetNeuron() +{ + return _neuron.get(); +} diff --git a/NeuralNetwork/Perceptron/Perceptron.h b/NeuralNetwork/Perceptron/Perceptron.h index 1bb0993..40c1c8a 100644 --- a/NeuralNetwork/Perceptron/Perceptron.h +++ b/NeuralNetwork/Perceptron/Perceptron.h @@ -1,24 +1,24 @@ -#pragma once - -#include -#include "../../Miscellaneous/ISerializable.h" -#include "../Common/Neuron.h" - -namespace NeuralNetwork -{ - class Perceptron final : public serialization::ISerializable - { - private: - double _bias; - std::unique_ptr _neuron; - - public: - explicit Perceptron(int weights); - Neuron* GetNeuron(); - double feedforward(const std::vector& inputs); - void train(const std::vector& inputs, double target, double learningRate); - void randomizeWeights(); - void Serialize(std::ostream &stream) const override; - void Deserialize(std::istream &stream) override; - }; -} +#pragma once + +#include +#include "../../Miscellaneous/ISerializable.h" +#include "../Common/Neuron.h" + +namespace NeuralNetwork +{ + class Perceptron final : public serialization::ISerializable + { + private: + double _bias; + std::unique_ptr _neuron; + + public: + explicit Perceptron(int weights); + Neuron* GetNeuron(); + double feedforward(const std::vector& inputs); + void train(const std::vector& inputs, double target, double learningRate); + void randomizeWeights(); + void Serialize(std::ostream &stream) const override; + void Deserialize(std::istream &stream) override; + }; +} diff --git a/NeuralNetwork/Perceptron/Tests/CMakeLists.txt b/NeuralNetwork/Perceptron/Tests/CMakeLists.txt index 24a19fc..9944f74 100644 --- a/NeuralNetwork/Perceptron/Tests/CMakeLists.txt +++ b/NeuralNetwork/Perceptron/Tests/CMakeLists.txt @@ -1,12 +1,12 @@ -# uncomment to persist weights from MLP tests -#add_definitions(-DPERSIST_WEIGHTS) - -set(FILES - ../../../Miscellaneous/Random.cpp - ../../../Miscellaneous/ISerializable.cpp - ../../Common/ActivationFunctions.cpp - ../../Common/Neuron.cpp - ../Perceptron.cpp - PerceptronTestMain.cpp -) -add_executable(Perceptron ${FILES}) +# uncomment to persist weights from MLP tests +#add_definitions(-DPERSIST_WEIGHTS) + +set(FILES + ../../../Miscellaneous/Random.cpp + ../../../Miscellaneous/ISerializable.cpp + ../../Common/ActivationFunctions.cpp + ../../Common/Neuron.cpp + ../Perceptron.cpp + PerceptronTestMain.cpp +) +add_executable(Perceptron ${FILES}) diff --git a/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp b/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp index 7fcb6bd..50b3cd6 100644 --- a/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp +++ b/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp @@ -1,103 +1,103 @@ -#include "../Perceptron.h" - -#include - -void Test_Neuron_W1_greater_than_W2() -{ - std::cout << __FUNCTION__ << "... "; - RandomUtils::SetRandomSeed(); - std::unique_ptr perceptron = std::make_unique(2); - perceptron->GetNeuron()->ActivationFunction = NeuralNetwork::Activation::EActivationFunctionType::Sign; - perceptron->randomizeWeights(); -#ifdef PERSIST_WEIGHTS - std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - loadWeightsFromFile(weightsFilePath.c_str(), *perceptron); -#endif - - int iterations = 0; - std::vector> inputs = - { - std::vector {1.0, 0.5}, // 1.0 - std::vector {0.5, 1.0}, // -1.0 - std::vector {10.0, 0.0}, // 1.0 - std::vector {0.0, 10.0}, // -1.0 - std::vector {0.0, -10.0}, // 1.0 - std::vector {-10.0, 0.0}, // -1.0 - std::vector {10.0, -10.0}, // 1.0 - std::vector {-10.0, 10.0}, // -1.0 - std::vector {1000.0, -1000.0}, // 1.0 - std::vector {-1000.0, 1000.0}, // -1.0 - std::vector {351.0, -179.0}, // 1.0 - std::vector {-499.8, 732.0}, // -1.0 - }; - std::vector expectedOutputs = {1.0, -1.0, 1.0, -1.0, 1.0, -1.0, - 1.0, -1.0, 1.0, -1.0, 1.0, -1.0}; - for (int i = 0; i < inputs.size(); ++i) - { - double guess = perceptron->feedforward(inputs[i]); - if (guess != expectedOutputs[i]) - { - iterations++; - perceptron->train(inputs[i], expectedOutputs[i], 0.1); - i = -1; - } - } - std::cout << "The network has been trained! (iterations: " << iterations << ")" << std::endl; -#ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), *perceptron); -#endif -} - -void Test_Neuron_W2_greater_than_W1() -{ - std::cout << __FUNCTION__ << "... "; - RandomUtils::SetRandomSeed(); - std::unique_ptr perceptron = std::make_unique(2); - perceptron->GetNeuron()->ActivationFunction = NeuralNetwork::Activation::EActivationFunctionType::Sign; - perceptron->randomizeWeights(); -#ifdef PERSIST_WEIGHTS - std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - loadWeightsFromFile(weightsFilePath.c_str(), *perceptron); -#endif - int iterations = 0; - std::vector> inputs = - { - std::vector {1.0, 0.5}, // -1.0 - std::vector {0.5, 1.0}, // 1.0 - std::vector {10.0, 0.0}, // -1.0 - std::vector {0.0, 10.0}, // 1.0 - std::vector {0.0, -10.0}, // -1.0 - std::vector {-10.0, 0.0}, // 1.0 - std::vector {10.0, -10.0}, // -1.0 - std::vector {-10.0, 10.0}, // 1.0 - }; - std::vector expectedOutputs = {-1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0}; - - // apply sigmoid on inputs - for (std::vector& input : inputs) - { - NeuralNetwork::Activation::sigmoid(input); - } - - for (int i = 0; i < inputs.size(); ++i) - { - double guess = perceptron->feedforward(inputs[i]); - if (guess != expectedOutputs[i]) - { - iterations++; - perceptron->train(inputs[i], expectedOutputs[i], 0.1); - i = -1; - } - } - std::cout << "The network has been trained! (iterations: " << iterations << ")" << std::endl; -#ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), *perceptron); -#endif -} - -int main() -{ - Test_Neuron_W1_greater_than_W2(); - Test_Neuron_W2_greater_than_W1(); - return 0; -} +#include "../Perceptron.h" + +#include + +void Test_Neuron_W1_greater_than_W2() +{ + std::cout << __FUNCTION__ << "... "; + RandomUtils::SetRandomSeed(); + std::unique_ptr perceptron = std::make_unique(2); + perceptron->GetNeuron()->ActivationFunction = NeuralNetwork::Activation::EActivationFunctionType::Sign; + perceptron->randomizeWeights(); +#ifdef PERSIST_WEIGHTS + std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); + loadWeightsFromFile(weightsFilePath.c_str(), *perceptron); +#endif + + int iterations = 0; + std::vector> inputs = + { + std::vector {1.0, 0.5}, // 1.0 + std::vector {0.5, 1.0}, // -1.0 + std::vector {10.0, 0.0}, // 1.0 + std::vector {0.0, 10.0}, // -1.0 + std::vector {0.0, -10.0}, // 1.0 + std::vector {-10.0, 0.0}, // -1.0 + std::vector {10.0, -10.0}, // 1.0 + std::vector {-10.0, 10.0}, // -1.0 + std::vector {1000.0, -1000.0}, // 1.0 + std::vector {-1000.0, 1000.0}, // -1.0 + std::vector {351.0, -179.0}, // 1.0 + std::vector {-499.8, 732.0}, // -1.0 + }; + std::vector expectedOutputs = {1.0, -1.0, 1.0, -1.0, 1.0, -1.0, + 1.0, -1.0, 1.0, -1.0, 1.0, -1.0}; + for (int i = 0; i < inputs.size(); ++i) + { + double guess = perceptron->feedforward(inputs[i]); + if (guess != expectedOutputs[i]) + { + iterations++; + perceptron->train(inputs[i], expectedOutputs[i], 0.1); + i = -1; + } + } + std::cout << "The network has been trained! (iterations: " << iterations << ")" << std::endl; +#ifdef PERSIST_WEIGHTS + serializeToFile(weightsFilePath.c_str(), *perceptron); +#endif +} + +void Test_Neuron_W2_greater_than_W1() +{ + std::cout << __FUNCTION__ << "... "; + RandomUtils::SetRandomSeed(); + std::unique_ptr perceptron = std::make_unique(2); + perceptron->GetNeuron()->ActivationFunction = NeuralNetwork::Activation::EActivationFunctionType::Sign; + perceptron->randomizeWeights(); +#ifdef PERSIST_WEIGHTS + std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); + loadWeightsFromFile(weightsFilePath.c_str(), *perceptron); +#endif + int iterations = 0; + std::vector> inputs = + { + std::vector {1.0, 0.5}, // -1.0 + std::vector {0.5, 1.0}, // 1.0 + std::vector {10.0, 0.0}, // -1.0 + std::vector {0.0, 10.0}, // 1.0 + std::vector {0.0, -10.0}, // -1.0 + std::vector {-10.0, 0.0}, // 1.0 + std::vector {10.0, -10.0}, // -1.0 + std::vector {-10.0, 10.0}, // 1.0 + }; + std::vector expectedOutputs = {-1.0, 1.0, -1.0, 1.0, -1.0, 1.0, -1.0, 1.0}; + + // apply sigmoid on inputs + for (std::vector& input : inputs) + { + NeuralNetwork::Activation::sigmoid(input); + } + + for (int i = 0; i < inputs.size(); ++i) + { + double guess = perceptron->feedforward(inputs[i]); + if (guess != expectedOutputs[i]) + { + iterations++; + perceptron->train(inputs[i], expectedOutputs[i], 0.1); + i = -1; + } + } + std::cout << "The network has been trained! (iterations: " << iterations << ")" << std::endl; +#ifdef PERSIST_WEIGHTS + serializeToFile(weightsFilePath.c_str(), *perceptron); +#endif +} + +int main() +{ + Test_Neuron_W1_greater_than_W2(); + Test_Neuron_W2_greater_than_W1(); + return 0; +} From da12d925d4a5b7d4140209ddc8e7d4b080e278ec Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Mon, 28 Aug 2023 17:05:26 -0300 Subject: [PATCH 15/20] Organizing gitignore contents --- .gitignore | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index f679647..2674946 100644 --- a/.gitignore +++ b/.gitignore @@ -1,11 +1,11 @@ -*.DS_Store -cmake-build-debug -cmake-build-release -AdversarialSearch/Minimax/Main -AdversarialSearch/Minimax/Main.dSYM/ -build -*.asta -*.asta.lock -*.jude -*.idea +*.DS_Store +cmake-build-debug +cmake-build-release +AdversarialSearch/Minimax/Main +AdversarialSearch/Minimax/Main.dSYM/ +build +*.asta +*.asta.lock +*.jude +*.idea cmake-build-release \ No newline at end of file From f80703d2c2e107fd863d7d07ba3e0e44df1debbf Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Mon, 28 Aug 2023 17:06:22 -0300 Subject: [PATCH 16/20] Adding line breaks into README file --- LICENSE | 42 +++++++++++++++++++++--------------------- README.md | 16 ++++++++-------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/LICENSE b/LICENSE index fd8d425..8b66f26 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,21 @@ -MIT License - -Copyright (c) 2019 andreirs - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +MIT License + +Copyright (c) 2019 Ândrei Schuch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md index 833754c..f3bb82f 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,8 @@ -# AIEngine -Artificial Intelligence API meant to be used in game development. It contains algorithms and techniques used in game development. - -API Inteligência Artificial para ser usada no desenvolvimento de jogos digitais. A API contém algoritmos e técnicas usadas no desenvolvimento de jogos. - -

- -

+# AIEngine +Artificial Intelligence API meant to be used in game development. It contains algorithms and techniques used in game development. + +API Inteligência Artificial para ser usada no desenvolvimento de jogos digitais. A API contém algoritmos e técnicas usadas no desenvolvimento de jogos. + +

+ +

From c3e8b7fb9fe6943ec038c64978051701b2cf5fe4 Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Mon, 28 Aug 2023 17:06:47 -0300 Subject: [PATCH 17/20] Adding line breaks into CMakeLists.txt --- CMakeLists.txt | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index bf29d12..785b802 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,19 +1,19 @@ -# set up cmake version -cmake_minimum_required(VERSION 3.14.5) - -# create the project -project(AIEngine) - -# set project properties -set(CMAKE_CXX_STANDARD 14) -set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build) -set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/Libs) - -# include tests from subdirectories -add_subdirectory(AdversarialSearch/Minimax/Tests) -add_subdirectory(Miscellaneous/Tests) -add_subdirectory(NeuralNetwork/Perceptron/Tests) -add_subdirectory(NeuralNetwork/MLP/Tests) - -# include examples from subdirectories -add_subdirectory(Examples/MathLibrary) +# set up cmake version +cmake_minimum_required(VERSION 3.14.5) + +# create the project +project(AIEngine) + +# set project properties +set(CMAKE_CXX_STANDARD 14) +set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build) +set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/Libs) + +# include tests from subdirectories +add_subdirectory(AdversarialSearch/Minimax/Tests) +add_subdirectory(Miscellaneous/Tests) +add_subdirectory(NeuralNetwork/Perceptron/Tests) +add_subdirectory(NeuralNetwork/MLP/Tests) + +# include examples from subdirectories +add_subdirectory(Examples/MathLibrary) From 59420c6cf31148fd86e50fdf0ebf81c961ee06be Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Mon, 28 Aug 2023 17:07:34 -0300 Subject: [PATCH 18/20] Adding line breaks into cmake job file --- .github/workflows/cmake.yml | 74 ++++++++++++++++++------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml index f4ae043..c1b396a 100644 --- a/.github/workflows/cmake.yml +++ b/.github/workflows/cmake.yml @@ -1,37 +1,37 @@ -name: CMake - -on: - push: - branches: [ development ] - pull_request: - branches: [ development ] - -env: - # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) - BUILD_TYPE: Release - -jobs: - build: - # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac. - # You can convert this to a matrix build if you need cross-platform coverage. - # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v2 - - - name: Configure CMake - # Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make. - # See https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html?highlight=cmake_build_type - run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} - - - name: Build - # Build your program with the given configuration - run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} - - - name: Test - working-directory: ${{github.workspace}}/build - # Execute tests defined by the CMake configuration. - # See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail - run: ctest -C ${{env.BUILD_TYPE}} - +name: CMake + +on: + push: + branches: [ development ] + pull_request: + branches: [ development ] + +env: + # Customize the CMake build type here (Release, Debug, RelWithDebInfo, etc.) + BUILD_TYPE: Release + +jobs: + build: + # The CMake configure and build commands are platform agnostic and should work equally well on Windows or Mac. + # You can convert this to a matrix build if you need cross-platform coverage. + # See: https://docs.github.com/en/free-pro-team@latest/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Configure CMake + # Configure CMake in a 'build' subdirectory. `CMAKE_BUILD_TYPE` is only required if you are using a single-configuration generator such as make. + # See https://cmake.org/cmake/help/latest/variable/CMAKE_BUILD_TYPE.html?highlight=cmake_build_type + run: cmake -B ${{github.workspace}}/build -DCMAKE_BUILD_TYPE=${{env.BUILD_TYPE}} + + - name: Build + # Build your program with the given configuration + run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}} + + - name: Test + working-directory: ${{github.workspace}}/build + # Execute tests defined by the CMake configuration. + # See https://cmake.org/cmake/help/latest/manual/ctest.1.html for more detail + run: ctest -C ${{env.BUILD_TYPE}} + From c34e46c3c6e89abeb7c0715f221b1ed6daf69296 Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Mon, 28 Aug 2023 18:08:24 -0300 Subject: [PATCH 19/20] Refactoring all project source code in order to apply our code standards all over --- .../Minimax/Tests/MinimaxTestMain.cpp | 4 +- .../Minimax/Tests/MinimaxTester.cpp | 45 +++++---- .../Minimax/Tests/MinimaxTester.h | 12 +-- .../Minimax/TicTacToeMinimax.cpp | 48 ++++----- AdversarialSearch/Minimax/TicTacToeMinimax.h | 6 +- Examples/MathLibrary/MathLibrary.cpp | 4 +- Examples/MathLibrary/MathLibrary.h | 4 +- Miscellaneous/BitMath.h | 6 +- Miscellaneous/ISerializable.cpp | 6 +- Miscellaneous/ISerializable.h | 6 +- Miscellaneous/Matrix.cpp | 78 +++++++-------- Miscellaneous/Matrix.h | 26 ++--- Miscellaneous/Tests/MatrixTestMain.cpp | 98 +++++++++---------- NeuralNetwork/Common/ActivationFunctions.cpp | 32 +++--- NeuralNetwork/Common/ActivationFunctions.h | 14 +-- NeuralNetwork/Common/Neuron.cpp | 8 +- NeuralNetwork/Common/Neuron.h | 10 +- NeuralNetwork/MLP/Layer.cpp | 10 +- NeuralNetwork/MLP/Layer.h | 8 +- NeuralNetwork/MLP/MLP.cpp | 20 ++-- NeuralNetwork/MLP/MLP.h | 2 +- NeuralNetwork/MLP/Tests/MLPTestMain.cpp | 30 +++--- NeuralNetwork/Perceptron/Perceptron.cpp | 16 +-- NeuralNetwork/Perceptron/Perceptron.h | 8 +- .../Perceptron/Tests/PerceptronTestMain.cpp | 22 ++--- 25 files changed, 267 insertions(+), 256 deletions(-) diff --git a/AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp b/AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp index 2a8535a..7514c90 100644 --- a/AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp +++ b/AdversarialSearch/Minimax/Tests/MinimaxTestMain.cpp @@ -5,12 +5,12 @@ int main(int argc, char *argv[]) if (argc > 1) { int boardState = atoi(argv[1]); - TicTacToeMinimax::benchmarkMinimax(boardState, true); + TicTacToeMinimax::BenchmarkMinimax(boardState, true); } else { TicTacToeMinimax::RunMinimaxTests(); - TicTacToeMinimax::benchmarkMinimaxVsMinimax(0, true); + TicTacToeMinimax::BenchmarkMinimaxVsMinimax(0, true); } return 0; } diff --git a/AdversarialSearch/Minimax/Tests/MinimaxTester.cpp b/AdversarialSearch/Minimax/Tests/MinimaxTester.cpp index 68b26a9..23a33ff 100644 --- a/AdversarialSearch/Minimax/Tests/MinimaxTester.cpp +++ b/AdversarialSearch/Minimax/Tests/MinimaxTester.cpp @@ -1,6 +1,9 @@ #include "MinimaxTester.h" -string getStateText(int state) +/// @brief Get a string representation for a given 'TicTacToeMinimax' flag. +/// @param state A 'TicTacToeMinimax' flag. +/// @return An string representing the 'state' parameter. +string GetStateText(int state) { if (state == TicTacToeMinimax::PLAYING) return "Playing"; @@ -50,7 +53,7 @@ void TicTacToeMinimax::RunMinimaxTests() std::cout << "Board=" << testCase.CurrentBoard << " / "; std::cout << "Expected="; bool isResultExpected = false; - int result = TicTacToeMinimax::predict(testCase.CurrentBoard, testCase.IsMaxTurn, 6); + int result = TicTacToeMinimax::Predict(testCase.CurrentBoard, testCase.IsMaxTurn, 6); for(unsigned int x = 0; x < testCase.ExpectedBoards.size(); x++) { if (i > 0U && i < testCase.ExpectedBoards.size() - 1U) @@ -70,45 +73,45 @@ void TicTacToeMinimax::RunMinimaxTests() { std::cout << "[FAILED] Result was=" << result; std::cout << "CurrentBoard:" << std::endl; - printBoard(testCase.CurrentBoard); + PrintBoard(testCase.CurrentBoard); std::cout << "Predicted board state:" << std::endl; - printBoard(result); + PrintBoard(result); } std::cout << std::endl; } } -void TicTacToeMinimax::benchmarkMinimax(int board, bool isMaxTurn) +void TicTacToeMinimax::BenchmarkMinimax(int board, bool isMaxTurn) { long long int totalDuration = 0ll; - printBoard(board); - int state = TicTacToeMinimax::getState(board); + PrintBoard(board); + int state = TicTacToeMinimax::GetState(board); if (state == PLAYING) { auto begin = std::chrono::steady_clock::now(); - board = TicTacToeMinimax::predict(board, isMaxTurn, 6); + board = TicTacToeMinimax::Predict(board, isMaxTurn, 6); auto end = std::chrono::steady_clock::now(); totalDuration += (end - begin).count(); } - printBoard(board); - printState(state); + PrintBoard(board); + PrintState(state); cout << "benchmarkMinimax: " << totalDuration << " ns"; cout << endl << endl; } -void TicTacToeMinimax::benchmarkMinimaxVsMinimax(int board, bool isMaxTurn) +void TicTacToeMinimax::BenchmarkMinimaxVsMinimax(int board, bool isMaxTurn) { std::vector durationByMove; long long int totalDuration = 0ll; int firstBoard = board; int currentBoard = firstBoard; - int state = TicTacToeMinimax::getState(currentBoard); + int state = TicTacToeMinimax::GetState(currentBoard); while (state == PLAYING) { auto begin = std::chrono::steady_clock::now(); // actually run the minimax algorithm - int bestMove = TicTacToeMinimax::predict(currentBoard, isMaxTurn, 6); + int bestMove = TicTacToeMinimax::Predict(currentBoard, isMaxTurn, 6); auto end = std::chrono::steady_clock::now(); // calculate the algorithm's duration in nanoseconds @@ -119,17 +122,17 @@ void TicTacToeMinimax::benchmarkMinimaxVsMinimax(int board, bool isMaxTurn) isMaxTurn = !isMaxTurn; currentBoard = bestMove; - state = TicTacToeMinimax::getState(currentBoard); + state = TicTacToeMinimax::GetState(currentBoard); // print an error in case the algorithm ends up doing something completely wrong if (state != PLAYING && state != DRAW) { - std::cerr << "'" << getStateText(state) << "' state must never happen during a Minimax vs Minimax battle!" << std::endl; + std::cerr << "'" << GetStateText(state) << "' state must never happen during a Minimax vs Minimax battle!" << std::endl; return; } } - printBoard(firstBoard); - printBoard(currentBoard); - printState(state); + PrintBoard(firstBoard); + PrintBoard(currentBoard); + PrintState(state); for (int i = 0; i < durationByMove.size(); ++i) { long long int nanoseconds = durationByMove[i]; @@ -139,7 +142,7 @@ void TicTacToeMinimax::benchmarkMinimaxVsMinimax(int board, bool isMaxTurn) cout << endl << endl; } -void TicTacToeMinimax::printBoard(int board) +void TicTacToeMinimax::PrintBoard(int board) { int crossMask = 3; cout << endl; @@ -160,8 +163,8 @@ void TicTacToeMinimax::printBoard(int board) cout << endl; } -void TicTacToeMinimax::printState(int state) +void TicTacToeMinimax::PrintState(int state) { - string stateText = getStateText(state); + string stateText = GetStateText(state); cout << stateText << endl; } diff --git a/AdversarialSearch/Minimax/Tests/MinimaxTester.h b/AdversarialSearch/Minimax/Tests/MinimaxTester.h index be5e7d5..edee851 100644 --- a/AdversarialSearch/Minimax/Tests/MinimaxTester.h +++ b/AdversarialSearch/Minimax/Tests/MinimaxTester.h @@ -16,10 +16,10 @@ using namespace std; namespace TicTacToeMinimax { void RunMinimaxTests(); - void benchmarkMinimax(int board, bool isMaxTurn); - void benchmarkMinimaxVsMinimax(int board, bool isMaxTurn); - void benchmarkEvaluate(int board, bool isMaxTurn); - void benchmarkEvaluateAll(int board, bool isMaxTurn); - void printBoard(int board); - void printState(int state); + void BenchmarkMinimax(int board, bool isMaxTurn); + void BenchmarkMinimaxVsMinimax(int board, bool isMaxTurn); + void BenchmarkEvaluate(int board, bool isMaxTurn); + void BenchmarkEvaluateAll(int board, bool isMaxTurn); + void PrintBoard(int board); + void PrintState(int state); } diff --git a/AdversarialSearch/Minimax/TicTacToeMinimax.cpp b/AdversarialSearch/Minimax/TicTacToeMinimax.cpp index 1921d1e..99b66ba 100644 --- a/AdversarialSearch/Minimax/TicTacToeMinimax.cpp +++ b/AdversarialSearch/Minimax/TicTacToeMinimax.cpp @@ -17,11 +17,11 @@ int _maxDepth; namespace TicTacToeMinimax { - int predictMinTreeScore(const int board, int alpha, int beta, int depth); + int PredictMinTreeScore(const int board, int alpha, int beta, int depth); - int predictMaxTreeScore(const int board, int alpha, int beta, int depth) + int PredictMaxTreeScore(const int board, int alpha, int beta, int depth) { - const int state = getState(board); + const int state = GetState(board); if (state != PLAYING || depth >= _maxDepth) { return state; @@ -34,7 +34,7 @@ namespace TicTacToeMinimax if ((board & mask) == 0) { const int newBoard = board | mask; - const int score = predictMinTreeScore(newBoard, alpha, beta, depth); + const int score = PredictMinTreeScore(newBoard, alpha, beta, depth); if (score >= beta) return score; if (score > alpha) @@ -47,9 +47,9 @@ namespace TicTacToeMinimax return bestScore; } - int predictMinTreeScore(int board, int alpha, int beta, int depth) + int PredictMinTreeScore(int board, int alpha, int beta, int depth) { - const int state = getState(board); + const int state = GetState(board); if (state != PLAYING || depth >= _maxDepth) { return state; @@ -62,7 +62,7 @@ namespace TicTacToeMinimax if ((board & mask) == 0) { const int newBoard = board | mask; - const int score = predictMaxTreeScore(newBoard, alpha, beta, depth); + const int score = PredictMaxTreeScore(newBoard, alpha, beta, depth); if (score <= alpha) return score; if (score < beta) @@ -75,7 +75,7 @@ namespace TicTacToeMinimax return bestScore; } - int predictMaxDecision(int board) + int PredictMaxDecision(int board) { int bestMove = 0; int bestMoveScore = INT_MIN; @@ -85,7 +85,7 @@ namespace TicTacToeMinimax if ((board & mask) == 0) { const int newBoard = board | mask; - const int score = predictMinTreeScore(newBoard, INT_MIN, INT_MAX, 1); + const int score = PredictMinTreeScore(newBoard, INT_MIN, INT_MAX, 1); if (score > bestMoveScore) { bestMoveScore = score; @@ -97,7 +97,7 @@ namespace TicTacToeMinimax return bestMove; } - int predictMinDecision(int board) + int PredictMinDecision(int board) { int bestMove = 0; int bestMoveScore = INT_MAX; @@ -107,7 +107,7 @@ namespace TicTacToeMinimax if ((board & mask) == 0) { const int newBoard = board | mask; - const int score = predictMaxTreeScore(newBoard, INT_MIN, INT_MAX, 1); + const int score = PredictMaxTreeScore(newBoard, INT_MIN, INT_MAX, 1); if (score < bestMoveScore) { bestMoveScore = score; @@ -119,7 +119,7 @@ namespace TicTacToeMinimax return bestMove; } - std::vector predictMaxDecisions(int board) + std::vector PredictMaxDecisions(int board) { std::vector bestMoves; int bestMoveScore = INT_MIN; @@ -129,7 +129,7 @@ namespace TicTacToeMinimax if ((board & mask) == 0) { const int newBoard = (board | mask); - const int score = predictMinTreeScore(newBoard, INT_MIN, INT_MAX, 1); + const int score = PredictMinTreeScore(newBoard, INT_MIN, INT_MAX, 1); if (score > bestMoveScore) { bestMoveScore = score; @@ -146,7 +146,7 @@ namespace TicTacToeMinimax return bestMoves; } - std::vector predictMinDecisions(int board) + std::vector PredictMinDecisions(int board) { std::vector bestMoves; int bestMoveScore = INT_MAX; @@ -156,7 +156,7 @@ namespace TicTacToeMinimax if ((board & mask) == 0) { const int newBoard = (board | mask); - const int score = predictMaxTreeScore(newBoard, INT_MIN, INT_MAX, 1); + const int score = PredictMaxTreeScore(newBoard, INT_MIN, INT_MAX, 1); if (score < bestMoveScore) { bestMoveScore = score; @@ -173,32 +173,34 @@ namespace TicTacToeMinimax return bestMoves; } - int predict(int board, bool isMaxTurn, int maxDepth) + int Predict(int board, bool isMaxTurn, int maxDepth) { _maxDepth = maxDepth; if (isMaxTurn) { - return predictMaxDecision(board); + return PredictMaxDecision(board); } - return predictMinDecision(board); + return PredictMinDecision(board); } - std::vector predictAll(int board, bool isMaxTurn, int maxDepth) + std::vector PredictAll(int board, bool isMaxTurn, int maxDepth) { _maxDepth = maxDepth; if (isMaxTurn) { - return predictMaxDecisions(board); + return PredictMaxDecisions(board); } - return predictMinDecisions(board); + return PredictMinDecisions(board); } - int getState(int board) + int GetState(int board) { + // Find out if there is a winner at this board state for(int winnerMask : winnerMasks) { if ((board & winnerMask) == winnerMask) { + // Find out who is the winner winnerMask = winnerMask >> 1; int piecesXor = (winnerMask ^ board) & winnerMask; if (piecesXor == 0) @@ -207,8 +209,10 @@ namespace TicTacToeMinimax return CIRCLE_WINS; } } + // if ((board & DRAW_BOARD_STATE) == DRAW_BOARD_STATE) return DRAW; + // If there is not WINNER or return PLAYING; } } diff --git a/AdversarialSearch/Minimax/TicTacToeMinimax.h b/AdversarialSearch/Minimax/TicTacToeMinimax.h index bad8eb0..b0cb253 100644 --- a/AdversarialSearch/Minimax/TicTacToeMinimax.h +++ b/AdversarialSearch/Minimax/TicTacToeMinimax.h @@ -10,7 +10,7 @@ namespace TicTacToeMinimax const int CIRCLE_WINS = -1; const int PLAYING = 2; - int predict(int board, bool isMaxTurn, int maxDepth); - std::vector predictAll(int board, bool isMaxTurn, int maxDepth); - int getState(int board); + int Predict(int board, bool isMaxTurn, int maxDepth); + std::vector PredictAll(int board, bool isMaxTurn, int maxDepth); + int GetState(int board); } diff --git a/Examples/MathLibrary/MathLibrary.cpp b/Examples/MathLibrary/MathLibrary.cpp index 24f887c..f7c7744 100644 --- a/Examples/MathLibrary/MathLibrary.cpp +++ b/Examples/MathLibrary/MathLibrary.cpp @@ -1,11 +1,11 @@ #include "MathLibrary.h" -int sum(int lhs, int rhs) +int Sum(int lhs, int rhs) { return lhs + rhs; } -int getOne() +int GetOne() { return 1; } diff --git a/Examples/MathLibrary/MathLibrary.h b/Examples/MathLibrary/MathLibrary.h index 09a9c7e..ded6e3c 100644 --- a/Examples/MathLibrary/MathLibrary.h +++ b/Examples/MathLibrary/MathLibrary.h @@ -11,6 +11,6 @@ # define MATHLIBRARY_API #endif -extern "C" MATHLIBRARY_API int sum(int lhs, int rhs); +extern "C" MATHLIBRARY_API int Sum(int lhs, int rhs); -extern "C" MATHLIBRARY_API int getOne(); +extern "C" MATHLIBRARY_API int GetOne(); diff --git a/Miscellaneous/BitMath.h b/Miscellaneous/BitMath.h index 54d4c45..f37298f 100644 --- a/Miscellaneous/BitMath.h +++ b/Miscellaneous/BitMath.h @@ -2,7 +2,11 @@ namespace BitMath { - int reverseBits(const int value, int maxBits) + /// @brief Reverse the bist of an integer value. + /// @param value + /// @param maxBits + /// @return + int ReverseBits(const int value, int maxBits) { int reversedBits = 0; maxBits = (maxBits % 2 == 0) ? maxBits : maxBits - 1; diff --git a/Miscellaneous/ISerializable.cpp b/Miscellaneous/ISerializable.cpp index b44b111..1a67b96 100644 --- a/Miscellaneous/ISerializable.cpp +++ b/Miscellaneous/ISerializable.cpp @@ -1,8 +1,8 @@ #include "ISerializable.h" -namespace serialization +namespace Serialization { - void loadWeightsFromFile(const char* filePath, ISerializable& target) + void LoadWeightsFromFile(const char* filePath, ISerializable& target) { std::fstream stream; stream.open (filePath, std::fstream::in | std::fstream::binary); @@ -13,7 +13,7 @@ namespace serialization stream.close(); } - void serializeToFile(const char* filePath, const ISerializable& target) + void SerializeToFile(const char* filePath, const ISerializable& target) { std::fstream stream; stream.open (filePath, std::fstream::out | std::fstream::binary | std::fstream::trunc); diff --git a/Miscellaneous/ISerializable.h b/Miscellaneous/ISerializable.h index 25cda09..8518a6c 100644 --- a/Miscellaneous/ISerializable.h +++ b/Miscellaneous/ISerializable.h @@ -3,7 +3,7 @@ #include #include -namespace serialization +namespace Serialization { class ISerializable { @@ -12,6 +12,6 @@ namespace serialization virtual void Deserialize(std::istream& stream) = 0; }; - void loadWeightsFromFile(const char* filePath, ISerializable& target); - void serializeToFile(const char* filePath, const ISerializable& target); + void LoadWeightsFromFile(const char* filePath, ISerializable& target); + void SerializeToFile(const char* filePath, const ISerializable& target); } diff --git a/Miscellaneous/Matrix.cpp b/Miscellaneous/Matrix.cpp index 8219ddd..9218ad6 100644 --- a/Miscellaneous/Matrix.cpp +++ b/Miscellaneous/Matrix.cpp @@ -15,7 +15,7 @@ Matrix::Matrix(const Matrix& source) _columns = source._columns; } -double Matrix::get(const int row, const int column) const +double Matrix::Get(const int row, const int column) const { int index = row * _columns + column; assert(index < (_rows * _columns)); @@ -23,29 +23,29 @@ double Matrix::get(const int row, const int column) const return value; } -void Matrix::set(const int row, const int column, const double value) +void Matrix::Set(const int row, const int column, const double value) { assert((row >= 0 && row < _rows) && (column >= 0 && column < _columns)); const int index = row * _columns + column; _values[index] = value; } -int Matrix::getRows() const +int Matrix::GetRows() const { return _rows; } -int Matrix::getColumns() const +int Matrix::GetColumns() const { return _columns; } -void Matrix::print(std::ostream& stream) const +void Matrix::Print(std::ostream& stream) const { - print(stream, 3); + Print(stream, 3); } -void Matrix::print(std::ostream& stream, int decimalPlace) const +void Matrix::Print(std::ostream& stream, int decimalPlace) const { int decimalFactor = static_cast(pow(10, decimalPlace)); assert(decimalFactor > 0); @@ -53,7 +53,7 @@ void Matrix::print(std::ostream& stream, int decimalPlace) const { for (int c = 0; c < _columns; c++) { - double value = get(r, c); + double value = Get(r, c); double truncatedValue = floor(value * decimalFactor) / decimalFactor; stream << "[" << truncatedValue << "] "; } @@ -62,86 +62,86 @@ void Matrix::print(std::ostream& stream, int decimalPlace) const } //STATIC FUNCTIONS -std::unique_ptr Matrix::multiply(const Matrix &left, const Matrix &right) +std::unique_ptr Matrix::Multiply(const Matrix &left, const Matrix &right) { - std::unique_ptr result = std::make_unique(left.getRows(),right.getColumns()); - multiply(left, right, *result); + std::unique_ptr result = std::make_unique(left.GetRows(),right.GetColumns()); + Multiply(left, right, *result); return result; } -void Matrix::multiply(const Matrix& left, const Matrix& right, Matrix& target) +void Matrix::Multiply(const Matrix& left, const Matrix& right, Matrix& target) { - assert(left.getColumns() == right.getRows()); - for(int row = 0; row < target.getRows(); ++row) + assert(left.GetColumns() == right.GetRows()); + for(int row = 0; row < target.GetRows(); ++row) { - for(int column = 0; column < target.getColumns(); ++column) + for(int column = 0; column < target.GetColumns(); ++column) { double sum = 0.0; - for(int leftColumn = 0; leftColumn < left.getColumns(); ++leftColumn) + for(int leftColumn = 0; leftColumn < left.GetColumns(); ++leftColumn) { - sum += left.get(row, leftColumn) * right.get(leftColumn, column); + sum += left.Get(row, leftColumn) * right.Get(leftColumn, column); } - target.set(row, column, sum); + target.Set(row, column, sum); } } } -void Matrix::multiply(Matrix& target, const double scalar) +void Matrix::Multiply(Matrix& target, const double scalar) { - for (int r = 0; r < target.getRows(); ++r) + for (int r = 0; r < target.GetRows(); ++r) { - for (int c = 0; c < target.getColumns(); ++c) + for (int c = 0; c < target.GetColumns(); ++c) { - double value = target.get(r,c) * scalar; - target.set(r,c,value); + double value = target.Get(r,c) * scalar; + target.Set(r,c,value); } } } -void Matrix::add(Matrix& target, const double value) +void Matrix::Add(Matrix& target, const double value) { - for (int r = 0; r < target.getRows(); ++r) + for (int r = 0; r < target.GetRows(); ++r) { - for (int c = 0; c < target.getColumns(); ++c) + for (int c = 0; c < target.GetColumns(); ++c) { - double sum = target.get(r, c) + value; - target.set(r, c, sum); + double sum = target.Get(r, c) + value; + target.Set(r, c, sum); } } } -std::unique_ptr Matrix::fromVectorRows(const std::vector& vector) +std::unique_ptr Matrix::FromVectorRows(const std::vector& vector) { int size = vector.size(); std::unique_ptr matrix = std::make_unique(size, 1); for (int i = 0; i < size; ++i) { const double number = vector[i]; - matrix->set(i, 0, number); + matrix->Set(i, 0, number); } return matrix; } -std::unique_ptr Matrix::fromVectorColumns(const std::vector& vector) +std::unique_ptr Matrix::FromVectorColumns(const std::vector& vector) { int size = vector.size(); std::unique_ptr matrix = std::make_unique(1, size); for (int i = 0; i < size; ++i) { const double number = vector[i]; - matrix->set(0, i, number); + matrix->Set(0, i, number); } return matrix; } -void Matrix::copy(const Matrix& source, Matrix& target) +void Matrix::Copy(const Matrix& source, Matrix& target) { - assert(source.getColumns() == target.getColumns()); - assert(source.getRows() == target.getRows()); - for (int r = 0; r < source.getRows(); ++r) + assert(source.GetColumns() == target.GetColumns()); + assert(source.GetRows() == target.GetRows()); + for (int r = 0; r < source.GetRows(); ++r) { - for (int c = 0; c < source.getRows(); ++c) + for (int c = 0; c < source.GetRows(); ++c) { - double value = source.get(r, c); - target.set(r, c, value); + double value = source.Get(r, c); + target.Set(r, c, value); } } } diff --git a/Miscellaneous/Matrix.h b/Miscellaneous/Matrix.h index 156f9ef..2b8658b 100644 --- a/Miscellaneous/Matrix.h +++ b/Miscellaneous/Matrix.h @@ -17,18 +17,18 @@ class Matrix public: Matrix(int rows, int columns); Matrix(const Matrix& source); - double get(int row, int column) const; - void set(int row, int column, double value); - int getRows() const; - int getColumns() const; - void print(std::ostream& stream) const; - void print(std::ostream& stream, int decimalPlace) const; + double Get(int row, int column) const; + void Set(int row, int column, double value); + int GetRows() const; + int GetColumns() const; + void Print(std::ostream& stream) const; + void Print(std::ostream& stream, int decimalPlace) const; - static std::unique_ptr multiply(const Matrix& left, const Matrix& right); - static void multiply(const Matrix& left, const Matrix& right, Matrix& target); - static void multiply(Matrix& target, double scalar); - static void add(Matrix& target, double value); - static std::unique_ptr fromVectorRows(const std::vector& vector); - static std::unique_ptr fromVectorColumns(const std::vector& vector); - static void copy(const Matrix& source, Matrix& target); + static std::unique_ptr Multiply(const Matrix& left, const Matrix& right); + static void Multiply(const Matrix& left, const Matrix& right, Matrix& target); + static void Multiply(Matrix& target, double scalar); + static void Add(Matrix& target, double value); + static std::unique_ptr FromVectorRows(const std::vector& vector); + static std::unique_ptr FromVectorColumns(const std::vector& vector); + static void Copy(const Matrix& source, Matrix& target); }; diff --git a/Miscellaneous/Tests/MatrixTestMain.cpp b/Miscellaneous/Tests/MatrixTestMain.cpp index edef05f..8148a5a 100644 --- a/Miscellaneous/Tests/MatrixTestMain.cpp +++ b/Miscellaneous/Tests/MatrixTestMain.cpp @@ -7,70 +7,70 @@ int main() { cout << "Matrix * Matrix" << endl; auto left = make_unique(1,3); - left->set(0,0,1); - left->set(0,1,2); - left->set(0,2,3); + left->Set(0,0,1); + left->Set(0,1,2); + left->Set(0,2,3); auto right = make_unique(3,1); - right->set(0,0,4); - right->set(1,0,5); - right->set(2,0,6); - auto dotProduct = Matrix::multiply(*left, *right); - dotProduct->print(cout); + right->Set(0,0,4); + right->Set(1,0,5); + right->Set(2,0,6); + auto dotProduct = Matrix::Multiply(*left, *right); + dotProduct->Print(cout); cout << endl; left = make_unique(3,1); - left->set(0,0,4); - left->set(1,0,5); - left->set(2,0,6); + left->Set(0,0,4); + left->Set(1,0,5); + left->Set(2,0,6); right = make_unique(1,3); - right->set(0,0,1); - right->set(0,1,2); - right->set(0,2,3); - dotProduct = Matrix::multiply(*left, *right); - dotProduct->print(cout); + right->Set(0,0,1); + right->Set(0,1,2); + right->Set(0,2,3); + dotProduct = Matrix::Multiply(*left, *right); + dotProduct->Print(cout); cout << endl; left = make_unique(1,3); - left->set(0,0,3); - left->set(0,1,4); - left->set(0,2,2); + left->Set(0,0,3); + left->Set(0,1,4); + left->Set(0,2,2); right = make_unique(3,4); - right->set(0,0,13); - right->set(0,1,9); - right->set(0,2,7); - right->set(0,3,15); - right->set(1,0,8); - right->set(1,1,7); - right->set(1,2,4); - right->set(1,3,6); - right->set(2,0,6); - right->set(2,1,4); - right->set(2,2,0); - right->set(2,3,3); - dotProduct = Matrix::multiply(*left, *right); - dotProduct->print(cout); + right->Set(0,0,13); + right->Set(0,1,9); + right->Set(0,2,7); + right->Set(0,3,15); + right->Set(1,0,8); + right->Set(1,1,7); + right->Set(1,2,4); + right->Set(1,3,6); + right->Set(2,0,6); + right->Set(2,1,4); + right->Set(2,2,0); + right->Set(2,3,3); + dotProduct = Matrix::Multiply(*left, *right); + dotProduct->Print(cout); cout << endl; left = make_unique(3,3); - left->set(0,0,0.9); - left->set(0,1,0.3); - left->set(0,2,0.4); - left->set(1,0,0.2); - left->set(1,1,0.8); - left->set(1,2,0.2); - left->set(2,0,0.1); - left->set(2,1,0.5); - left->set(2,2,0.6); + left->Set(0,0,0.9); + left->Set(0,1,0.3); + left->Set(0,2,0.4); + left->Set(1,0,0.2); + left->Set(1,1,0.8); + left->Set(1,2,0.2); + left->Set(2,0,0.1); + left->Set(2,1,0.5); + left->Set(2,2,0.6); right = make_unique(3, 1); - right->set(0,0,0.9); - right->set(1,0,0.1); - right->set(2,0,0.8); - dotProduct = Matrix::multiply(*left, *right); - dotProduct->print(cout); + right->Set(0,0,0.9); + right->Set(1,0,0.1); + right->Set(2,0,0.8); + dotProduct = Matrix::Multiply(*left, *right); + dotProduct->Print(cout); cout << endl << "Matrix * Scalar" << endl; - Matrix::multiply(*dotProduct, 2.0); - dotProduct->print(cout); + Matrix::Multiply(*dotProduct, 2.0); + dotProduct->Print(cout); return 0; } diff --git a/NeuralNetwork/Common/ActivationFunctions.cpp b/NeuralNetwork/Common/ActivationFunctions.cpp index fd708b6..c31c6aa 100644 --- a/NeuralNetwork/Common/ActivationFunctions.cpp +++ b/NeuralNetwork/Common/ActivationFunctions.cpp @@ -6,21 +6,21 @@ namespace NeuralNetwork { double thresholdValue = 0.5; - double sigmoid(const double& value) + double Sigmoid(const double& value) { double result = 1.0 / (1.0 + std::exp(-value)); return result; } - void sigmoid(std::vector& vector) + void Sigmoid(std::vector& vector) { for (double& i : vector) { - i = sigmoid(i); + i = Sigmoid(i); } } - double sign(const double& value) + double Sign(const double& value) { if (value > 0.0) { @@ -29,15 +29,15 @@ namespace NeuralNetwork return -1.0; } - void sign(std::vector& vector) + void Sign(std::vector& vector) { for (double& i : vector) { - i = sign(i); + i = Sign(i); } } - double threshold(const double& value) + double Threshold(const double& value) { if (value >= thresholdValue) { @@ -46,15 +46,15 @@ namespace NeuralNetwork return 0.0; } - void threshold(std::vector& vector) + void Threshold(std::vector& vector) { for (double& i : vector) { - i = threshold(i); + i = Threshold(i); } } - void setThreshold(const double&& threshold) + void SetThreshold(const double&& threshold) { thresholdValue = threshold; } @@ -66,15 +66,15 @@ namespace NeuralNetwork case EActivationFunctionType::None: break; case EActivationFunctionType::Sigmoid: - value = sigmoid(value); + value = Sigmoid(value); break; case EActivationFunctionType::Sign: - value = sign(value); + value = Sign(value); break; case EActivationFunctionType::Threshold: - value = threshold(value); + value = Threshold(value); break; } } @@ -86,15 +86,15 @@ namespace NeuralNetwork case EActivationFunctionType::None: break; case EActivationFunctionType::Sigmoid: - sigmoid(vector); + Sigmoid(vector); break; case EActivationFunctionType::Sign: - sign(vector); + Sign(vector); break; case EActivationFunctionType::Threshold: - threshold(vector); + Threshold(vector); break; } } diff --git a/NeuralNetwork/Common/ActivationFunctions.h b/NeuralNetwork/Common/ActivationFunctions.h index 387dbe5..3befdb4 100644 --- a/NeuralNetwork/Common/ActivationFunctions.h +++ b/NeuralNetwork/Common/ActivationFunctions.h @@ -7,15 +7,15 @@ namespace NeuralNetwork { namespace Activation { - double sigmoid(const double&); - void sigmoid(std::vector&); + double Sigmoid(const double&); + void Sigmoid(std::vector&); - double sign(const double&); - void sign(std::vector&); + double Sign(const double&); + void Sign(std::vector&); - double threshold(const double&); - void threshold(std::vector&); - void setThreshold(const double&&); + double Threshold(const double&); + void Threshold(std::vector&); + void SetThreshold(const double&&); enum class EActivationFunctionType { diff --git a/NeuralNetwork/Common/Neuron.cpp b/NeuralNetwork/Common/Neuron.cpp index 1484469..0ffe4f9 100644 --- a/NeuralNetwork/Common/Neuron.cpp +++ b/NeuralNetwork/Common/Neuron.cpp @@ -9,7 +9,7 @@ Neuron::Neuron(int weights) _weights = std::vector(weights); } -double Neuron::feedforward(const std::vector& inputs, double bias) const +double Neuron::Feedforward(const std::vector& inputs, double bias) const { // calculate Neuron's output double output = bias; @@ -24,7 +24,7 @@ double Neuron::feedforward(const std::vector& inputs, double bias) const return output; } -void Neuron::randomizeWeights() +void Neuron::RandomizeWeights() { for (double& weight : _weights) { @@ -56,12 +56,12 @@ void Neuron::Deserialize(std::istream& stream) } } -const std::vector& Neuron::getWeights() const +const std::vector& Neuron::GetWeights() const { return _weights; } -void Neuron::setWeights(const std::vector& weights) +void Neuron::SetWeights(const std::vector& weights) { _weights = weights; } diff --git a/NeuralNetwork/Common/Neuron.h b/NeuralNetwork/Common/Neuron.h index d7e95e2..ab1c0fe 100644 --- a/NeuralNetwork/Common/Neuron.h +++ b/NeuralNetwork/Common/Neuron.h @@ -7,7 +7,7 @@ namespace NeuralNetwork { - class Neuron final : public serialization::ISerializable + class Neuron final : public Serialization::ISerializable { private: std::vector _weights; @@ -16,11 +16,11 @@ namespace NeuralNetwork Activation::EActivationFunctionType ActivationFunction = Activation::EActivationFunctionType::None; explicit Neuron(int weightsLength); - void randomizeWeights(); + void RandomizeWeights(); - double feedforward(const std::vector& inputs, double bias) const; - void setWeights(const std::vector& weights); - const std::vector& getWeights() const; + double Feedforward(const std::vector& inputs, double bias) const; + void SetWeights(const std::vector& weights); + const std::vector& GetWeights() const; // serialization void Serialize(std::ostream &stream) const override; diff --git a/NeuralNetwork/MLP/Layer.cpp b/NeuralNetwork/MLP/Layer.cpp index 2b416b0..f86e6a8 100644 --- a/NeuralNetwork/MLP/Layer.cpp +++ b/NeuralNetwork/MLP/Layer.cpp @@ -8,7 +8,7 @@ Layer::Layer(size_t neurons, size_t weights) { std::unique_ptr neuron = std::make_unique(weights); neuron->ActivationFunction = Activation::EActivationFunctionType::Sigmoid; - neuron->randomizeWeights(); + neuron->RandomizeWeights(); _neurons.push_back(std::move(neuron)); } } @@ -31,23 +31,23 @@ void Layer::Deserialize(std::istream &stream) } } -std::vector Layer::feedforward(const std::vector& inputs) +std::vector Layer::Feedforward(const std::vector& inputs) { std::vector outputs(_neurons.size()); for(size_t n = 0; n < _neurons.size(); n++) { Neuron& neuron = *_neurons[n]; - outputs[n] = neuron.feedforward(inputs, 0.0); + outputs[n] = neuron.Feedforward(inputs, 0.0); } return outputs; } -size_t Layer::getNeuronsLength() const +size_t Layer::GetNeuronsLength() const { return _neurons.size(); } -Neuron& Layer::getNeuron(int index) const +Neuron& Layer::GetNeuron(int index) const { assert(index < _neurons.size()); Neuron& neuron = *_neurons[index]; diff --git a/NeuralNetwork/MLP/Layer.h b/NeuralNetwork/MLP/Layer.h index e299d4d..cdd4b5d 100644 --- a/NeuralNetwork/MLP/Layer.h +++ b/NeuralNetwork/MLP/Layer.h @@ -7,17 +7,17 @@ namespace NeuralNetwork { - class Layer final : public serialization::ISerializable + class Layer final : public Serialization::ISerializable { private: std::vector> _neurons; public: Layer(size_t neurons, size_t weights); - std::vector feedforward(const std::vector& inputs); + std::vector Feedforward(const std::vector& inputs); void Serialize(std::ostream &stream) const override; void Deserialize(std::istream &stream) override; - Neuron& getNeuron(int index) const; - size_t getNeuronsLength() const; + Neuron& GetNeuron(int index) const; + size_t GetNeuronsLength() const; }; } diff --git a/NeuralNetwork/MLP/MLP.cpp b/NeuralNetwork/MLP/MLP.cpp index 82ef109..9fe9f6b 100644 --- a/NeuralNetwork/MLP/MLP.cpp +++ b/NeuralNetwork/MLP/MLP.cpp @@ -29,7 +29,7 @@ std::vector MultiLayerPerceptron::Feedforward(const std::vector& const std::unique_ptr &layer = _layers[l]; outputs.emplace_back(1.0); // insert bias _inputsByLayer[l] = outputs; - outputs = layer->feedforward(outputs); + outputs = layer->Feedforward(outputs); } // apply activation function @@ -40,7 +40,7 @@ std::vector MultiLayerPerceptron::Feedforward(const std::vector& void GetPreviousLayerErrors(const Neuron& neuron, double error, std::vector& previousLayerErrors) { - const std::vector& weights = neuron.getWeights(); + const std::vector& weights = neuron.GetWeights(); previousLayerErrors.resize(weights.size()); double weightsSum = 0.0; @@ -58,7 +58,7 @@ void GetPreviousLayerErrors(const Neuron& neuron, double error, std::vector &inputs, const double output, const double error, const double learningRate) { - auto& weights = const_cast&>(neuron.getWeights()); + auto& weights = const_cast&>(neuron.GetWeights()); for (size_t w = 0; w < weights.size(); ++w) { double weightDifference = -(error * (output * (1.0 - output)) * inputs[w]); @@ -83,17 +83,17 @@ void MultiLayerPerceptron::BackPropagate(const std::vector& inputs, cons if (l > 0) { const std::unique_ptr &layer = _layers[l - 1]; - size = layer->getNeuronsLength(); + size = layer->GetNeuronsLength(); } std::vector previousLayerErrors(size); //BackPropagate errors Layer &layer = GetLayer(l); std::vector layerInputs = _inputsByLayer[l]; - std::vector layerOutputs = layer.feedforward(layerInputs); - for (int n = 0; n < layer.getNeuronsLength(); ++n) + std::vector layerOutputs = layer.Feedforward(layerInputs); + for (int n = 0; n < layer.GetNeuronsLength(); ++n) { - Neuron &neuron = layer.getNeuron(n); + Neuron &neuron = layer.GetNeuron(n); GetPreviousLayerErrors(neuron, errors[n], previousLayerErrors); UpdateNeuronWeights(neuron, layerInputs, layerOutputs[n], errors[n], learningRate); } @@ -134,10 +134,10 @@ void MultiLayerPerceptron::RandomizeWeights() const for (int l = 0; l < GetLayersLength(); ++l) { Layer &layer = GetLayer(l); - for (int n = 0; n < layer.getNeuronsLength(); ++n) + for (int n = 0; n < layer.GetNeuronsLength(); ++n) { - Neuron &neuron = layer.getNeuron(n); - neuron.randomizeWeights(); + Neuron &neuron = layer.GetNeuron(n); + neuron.RandomizeWeights(); } } } diff --git a/NeuralNetwork/MLP/MLP.h b/NeuralNetwork/MLP/MLP.h index 4171154..25eda6f 100644 --- a/NeuralNetwork/MLP/MLP.h +++ b/NeuralNetwork/MLP/MLP.h @@ -5,7 +5,7 @@ namespace NeuralNetwork { - class MultiLayerPerceptron final : public serialization::ISerializable + class MultiLayerPerceptron final : public Serialization::ISerializable { private: std::vector> _layers; diff --git a/NeuralNetwork/MLP/Tests/MLPTestMain.cpp b/NeuralNetwork/MLP/Tests/MLPTestMain.cpp index e131038..bd24e9d 100644 --- a/NeuralNetwork/MLP/Tests/MLPTestMain.cpp +++ b/NeuralNetwork/MLP/Tests/MLPTestMain.cpp @@ -48,14 +48,14 @@ void TEST_MLP_XOR_2_2_1() { std::cout << __FUNCTION__ << "... "; std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - Activation::setThreshold(0.7); + Activation::SetThreshold(0.7); RandomUtils::SetRandomSeed(); std::vector neuronsByLayer({2,1}); MultiLayerPerceptron mlp(2, neuronsByLayer); mlp.ActivationFunction = Activation::EActivationFunctionType::Threshold; mlp.RandomizeWeights(); #ifdef PERSIST_WEIGHTS - loadWeightsFromFile(weightsFilePath.c_str(), mlp); + Serialization::LoadWeightsFromFile(weightsFilePath.c_str(), mlp); #endif std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; @@ -63,7 +63,7 @@ void TEST_MLP_XOR_2_2_1() ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); #ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), mlp); + Serialization::SerializeToFile(weightsFilePath.c_str(), mlp); #endif } @@ -71,7 +71,7 @@ void TEST_MLP_XOR_2_3_1() { std::cout << __FUNCTION__ << "... "; std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - Activation::setThreshold(0.7); + Activation::SetThreshold(0.7); RandomUtils::SetRandomSeed(); std::vector neuronsByLayer({3,1}); MultiLayerPerceptron mlp(2, neuronsByLayer); @@ -79,7 +79,7 @@ void TEST_MLP_XOR_2_3_1() mlp.RandomizeWeights(); #ifdef PERSIST_WEIGHTS - loadWeightsFromFile(weightsFilePath.c_str(), mlp); + Serialization::LoadWeightsFromFile(weightsFilePath.c_str(), mlp); #endif std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; @@ -87,7 +87,7 @@ void TEST_MLP_XOR_2_3_1() ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); #ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), mlp); + Serialization::SerializeToFile(weightsFilePath.c_str(), mlp); #endif } @@ -95,7 +95,7 @@ void TEST_MLP_XOR_2_3_3_3_1() { std::cout << __FUNCTION__ << "... "; std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - Activation::setThreshold(0.7); + Activation::SetThreshold(0.7); RandomUtils::SetRandomSeed(); std::vector neuronsByLayer({3, 3, 3, 1}); MultiLayerPerceptron mlp(2, neuronsByLayer); @@ -103,7 +103,7 @@ void TEST_MLP_XOR_2_3_3_3_1() mlp.RandomizeWeights(); #ifdef PERSIST_WEIGHTS - loadWeightsFromFile(weightsFilePath.c_str(), mlp); + Serialization::LoadWeightsFromFile(weightsFilePath.c_str(), mlp); #endif std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; @@ -111,7 +111,7 @@ void TEST_MLP_XOR_2_3_3_3_1() ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); #ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), mlp); + Serialization::SerializeToFile(weightsFilePath.c_str(), mlp); #endif } @@ -119,7 +119,7 @@ void TEST_MLP_XOR_2_3_2() { std::cout << __FUNCTION__ << "... "; std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - Activation::setThreshold(0.7); + Activation::SetThreshold(0.7); RandomUtils::SetRandomSeed(); std::vector neuronsByLayer({3, 2}); MultiLayerPerceptron mlp(2, neuronsByLayer); @@ -127,7 +127,7 @@ void TEST_MLP_XOR_2_3_2() mlp.RandomizeWeights(); #ifdef PERSIST_WEIGHTS - loadWeightsFromFile(weightsFilePath.c_str(), mlp); + Serialization::LoadWeightsFromFile(weightsFilePath.c_str(), mlp); #endif std::vector> trainingInputs = { { 0.0, 0.0 }, { 0.0, 1.0 }, { 1.0, 0.0 }, { 1.0, 1.0 } }; @@ -135,7 +135,7 @@ void TEST_MLP_XOR_2_3_2() ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); #ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), mlp); + Serialization::SerializeToFile(weightsFilePath.c_str(), mlp); #endif } @@ -143,7 +143,7 @@ void TEST_MLP_number_recognition_digital_clock_0_to_9() { std::cout << __FUNCTION__ << "... "; std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - Activation::setThreshold(0.7); + Activation::SetThreshold(0.7); RandomUtils::SetRandomSeed(); std::vector neuronsByLayer({10, 10}); MultiLayerPerceptron mlp(7, neuronsByLayer); @@ -151,7 +151,7 @@ void TEST_MLP_number_recognition_digital_clock_0_to_9() mlp.RandomizeWeights(); #ifdef PERSIST_WEIGHTS - loadWeightsFromFile(weightsFilePath.c_str(), mlp); + Serialization::LoadWeightsFromFile(weightsFilePath.c_str(), mlp); #endif std::vector> trainingInputs = @@ -183,7 +183,7 @@ void TEST_MLP_number_recognition_digital_clock_0_to_9() ApplySupervisedLearning(mlp, trainingInputs, trainingOutputs); #ifdef PERSIST_WEIGHTS - serialization::serializeToFile(weightsFilePath.c_str(), mlp); + Serialization::SerializeToFile(weightsFilePath.c_str(), mlp); #endif } diff --git a/NeuralNetwork/Perceptron/Perceptron.cpp b/NeuralNetwork/Perceptron/Perceptron.cpp index 9227be8..fadc55b 100644 --- a/NeuralNetwork/Perceptron/Perceptron.cpp +++ b/NeuralNetwork/Perceptron/Perceptron.cpp @@ -7,22 +7,22 @@ Perceptron::Perceptron(int weights) : _bias(0.0) _neuron = std::make_unique(weights); } -double Perceptron::feedforward(const std::vector& inputs) +double Perceptron::Feedforward(const std::vector& inputs) { - double output = _neuron->feedforward(inputs, _bias); + double output = _neuron->Feedforward(inputs, _bias); return output; } -void Perceptron::train(const std::vector& inputs, double target, double learningRate) +void Perceptron::Train(const std::vector& inputs, double target, double learningRate) { - double output = feedforward(inputs); + double output = Feedforward(inputs); double error = (target - output); - std::vector weights = _neuron->getWeights(); + std::vector weights = _neuron->GetWeights(); for (int c = 0; c < weights.size(); ++c) { weights[c] += inputs[c] * error * learningRate; } - _neuron->setWeights(weights); + _neuron->SetWeights(weights); _bias += error; } @@ -39,9 +39,9 @@ void Perceptron::Deserialize(std::istream &stream) _neuron->Deserialize(stream); } -void Perceptron::randomizeWeights() +void Perceptron::RandomizeWeights() { - _neuron->randomizeWeights(); + _neuron->RandomizeWeights(); _bias = RandomUtils::Range(-0.1, 0.1); } diff --git a/NeuralNetwork/Perceptron/Perceptron.h b/NeuralNetwork/Perceptron/Perceptron.h index 40c1c8a..b135b9b 100644 --- a/NeuralNetwork/Perceptron/Perceptron.h +++ b/NeuralNetwork/Perceptron/Perceptron.h @@ -6,7 +6,7 @@ namespace NeuralNetwork { - class Perceptron final : public serialization::ISerializable + class Perceptron final : public Serialization::ISerializable { private: double _bias; @@ -15,9 +15,9 @@ namespace NeuralNetwork public: explicit Perceptron(int weights); Neuron* GetNeuron(); - double feedforward(const std::vector& inputs); - void train(const std::vector& inputs, double target, double learningRate); - void randomizeWeights(); + double Feedforward(const std::vector& inputs); + void Train(const std::vector& inputs, double target, double learningRate); + void RandomizeWeights(); void Serialize(std::ostream &stream) const override; void Deserialize(std::istream &stream) override; }; diff --git a/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp b/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp index 50b3cd6..6d89d07 100644 --- a/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp +++ b/NeuralNetwork/Perceptron/Tests/PerceptronTestMain.cpp @@ -8,10 +8,10 @@ void Test_Neuron_W1_greater_than_W2() RandomUtils::SetRandomSeed(); std::unique_ptr perceptron = std::make_unique(2); perceptron->GetNeuron()->ActivationFunction = NeuralNetwork::Activation::EActivationFunctionType::Sign; - perceptron->randomizeWeights(); + perceptron->RandomizeWeights(); #ifdef PERSIST_WEIGHTS std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - loadWeightsFromFile(weightsFilePath.c_str(), *perceptron); + Serialization::LoadWeightsFromFile(weightsFilePath.c_str(), *perceptron); #endif int iterations = 0; @@ -34,17 +34,17 @@ void Test_Neuron_W1_greater_than_W2() 1.0, -1.0, 1.0, -1.0, 1.0, -1.0}; for (int i = 0; i < inputs.size(); ++i) { - double guess = perceptron->feedforward(inputs[i]); + double guess = perceptron->Feedforward(inputs[i]); if (guess != expectedOutputs[i]) { iterations++; - perceptron->train(inputs[i], expectedOutputs[i], 0.1); + perceptron->Train(inputs[i], expectedOutputs[i], 0.1); i = -1; } } std::cout << "The network has been trained! (iterations: " << iterations << ")" << std::endl; #ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), *perceptron); + Serialization::SerializeToFile(weightsFilePath.c_str(), *perceptron); #endif } @@ -54,10 +54,10 @@ void Test_Neuron_W2_greater_than_W1() RandomUtils::SetRandomSeed(); std::unique_ptr perceptron = std::make_unique(2); perceptron->GetNeuron()->ActivationFunction = NeuralNetwork::Activation::EActivationFunctionType::Sign; - perceptron->randomizeWeights(); + perceptron->RandomizeWeights(); #ifdef PERSIST_WEIGHTS std::string weightsFilePath = ".\\" + std::string(__FUNCTION__); - loadWeightsFromFile(weightsFilePath.c_str(), *perceptron); + Serialization::LoadWeightsFromFile(weightsFilePath.c_str(), *perceptron); #endif int iterations = 0; std::vector> inputs = @@ -76,22 +76,22 @@ void Test_Neuron_W2_greater_than_W1() // apply sigmoid on inputs for (std::vector& input : inputs) { - NeuralNetwork::Activation::sigmoid(input); + NeuralNetwork::Activation::Sigmoid(input); } for (int i = 0; i < inputs.size(); ++i) { - double guess = perceptron->feedforward(inputs[i]); + double guess = perceptron->Feedforward(inputs[i]); if (guess != expectedOutputs[i]) { iterations++; - perceptron->train(inputs[i], expectedOutputs[i], 0.1); + perceptron->Train(inputs[i], expectedOutputs[i], 0.1); i = -1; } } std::cout << "The network has been trained! (iterations: " << iterations << ")" << std::endl; #ifdef PERSIST_WEIGHTS - serializeToFile(weightsFilePath.c_str(), *perceptron); + Serialization::SerializeToFile(weightsFilePath.c_str(), *perceptron); #endif } From 42bd69bace3f58a43d71448fecfc00a58a094f01 Mon Sep 17 00:00:00 2001 From: AndreiSchuch Date: Mon, 28 Aug 2023 18:36:05 -0300 Subject: [PATCH 20/20] Creating tests for using the static library from MathLibrary --- CMakeLists.txt | 2 ++ Examples/MathLibrary/ApiExports.h | 11 ++++++++ Examples/MathLibrary/MathLibrary.cpp | 9 +++++-- Examples/MathLibrary/MathLibrary.h | 21 +++++++-------- Examples/MathLibrary/Tests/CMakeLists.txt | 9 +++++++ .../MathLibrary/Tests/MathLibraryTestMain.cpp | 22 ++++++++++++++++ Miscellaneous/BitMath.cpp | 24 +++++++++++++++++ Miscellaneous/BitMath.h | 26 +++---------------- 8 files changed, 89 insertions(+), 35 deletions(-) create mode 100644 Examples/MathLibrary/ApiExports.h create mode 100644 Examples/MathLibrary/Tests/CMakeLists.txt create mode 100644 Examples/MathLibrary/Tests/MathLibraryTestMain.cpp create mode 100644 Miscellaneous/BitMath.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 785b802..7f08ec6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,3 +17,5 @@ add_subdirectory(NeuralNetwork/MLP/Tests) # include examples from subdirectories add_subdirectory(Examples/MathLibrary) +# include MathLibrary tests +add_subdirectory(Examples/MathLibrary/Tests) diff --git a/Examples/MathLibrary/ApiExports.h b/Examples/MathLibrary/ApiExports.h new file mode 100644 index 0000000..45a6fd5 --- /dev/null +++ b/Examples/MathLibrary/ApiExports.h @@ -0,0 +1,11 @@ +#pragma once + +#if defined(_WIN32) || defined(WIN32) +# ifdef MATHLIBRARY_EXPORTS +# define MATHLIBRARY_API __declspec(dllexport) +# else +# define MATHLIBRARY_API __declspec(dllimport) +# endif +#else +# define MATHLIBRARY_API +#endif diff --git a/Examples/MathLibrary/MathLibrary.cpp b/Examples/MathLibrary/MathLibrary.cpp index f7c7744..8ef07a7 100644 --- a/Examples/MathLibrary/MathLibrary.cpp +++ b/Examples/MathLibrary/MathLibrary.cpp @@ -10,7 +10,12 @@ int GetOne() return 1; } -int main(int argc, char *argv[]) +int MathFacade::Sum(int lhs, int rhs) { - return 0; + return ::Sum(lhs, rhs); +} + +int MathFacade::GetOne() +{ + return ::GetOne(); } diff --git a/Examples/MathLibrary/MathLibrary.h b/Examples/MathLibrary/MathLibrary.h index ded6e3c..080d83c 100644 --- a/Examples/MathLibrary/MathLibrary.h +++ b/Examples/MathLibrary/MathLibrary.h @@ -1,16 +1,15 @@ // MathLibrary.h - Contains declarations of math functions #pragma once -#if defined(_WIN32) || defined(WIN32) -# ifdef MATHLIBRARY_EXPORTS -# define MATHLIBRARY_API __declspec(dllexport) -# else -# define MATHLIBRARY_API __declspec(dllimport) -# endif -#else -# define MATHLIBRARY_API -#endif +#include "ApiExports.h" -extern "C" MATHLIBRARY_API int Sum(int lhs, int rhs); +MATHLIBRARY_API class MathFacade { +public: + static int Sum(int lhs, int rhs); + static int GetOne(); +}; -extern "C" MATHLIBRARY_API int GetOne(); +extern "C" { + MATHLIBRARY_API int Sum(int lhs, int rhs); + MATHLIBRARY_API int GetOne(); +} diff --git a/Examples/MathLibrary/Tests/CMakeLists.txt b/Examples/MathLibrary/Tests/CMakeLists.txt new file mode 100644 index 0000000..6601cd5 --- /dev/null +++ b/Examples/MathLibrary/Tests/CMakeLists.txt @@ -0,0 +1,9 @@ +set(FILES + MathLibraryTestMain.cpp +) +add_executable(MathLibrary ${FILES}) + +# link static library into executable +add_library(MathLibraryLib STATIC IMPORTED) +set_target_properties(MathLibraryLib PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/build/Libs/libMathLibrary.a) +target_link_libraries(MathLibrary PRIVATE MathLibraryLib) diff --git a/Examples/MathLibrary/Tests/MathLibraryTestMain.cpp b/Examples/MathLibrary/Tests/MathLibraryTestMain.cpp new file mode 100644 index 0000000..828a1a0 --- /dev/null +++ b/Examples/MathLibrary/Tests/MathLibraryTestMain.cpp @@ -0,0 +1,22 @@ +#include + +#include "../MathLibrary.h" + +using namespace std; + +int main() +{ + cout << "MathFacade::Sum" << endl; + cout << " (1, 3): " << MathFacade::Sum(1, 3) << " " << endl; + + cout << "::Sum" << endl; + cout << " (1, 3): " << Sum(1, 3) << " " << endl; + + cout << "MathFacade::GetOne" << endl; + cout << " (): " << MathFacade::GetOne() << " " << endl; + + cout << "::GetOne" << endl; + cout << " (): " << GetOne() << " " << endl; + + return 0; +} diff --git a/Miscellaneous/BitMath.cpp b/Miscellaneous/BitMath.cpp new file mode 100644 index 0000000..dcae02f --- /dev/null +++ b/Miscellaneous/BitMath.cpp @@ -0,0 +1,24 @@ +#include + +namespace BitMath +{ + int ReverseBits(const int value, int maxBits) + { + int reversedBits = 0; + maxBits = (maxBits % 2 == 0) ? maxBits : maxBits - 1; + int halfBits = (int) floor(maxBits / 2); + for (int bit = 0; bit < halfBits; bit++) + { + int transposeDifference = (maxBits - (bit * 2) - 1); + + int rBit = value & (1 << bit); + int lBit = rBit << transposeDifference; + reversedBits |= lBit; + + lBit = value & (1 << (maxBits - 1 - bit)); + rBit = lBit >> transposeDifference; + reversedBits |= rBit; + } + return reversedBits; + } +} diff --git a/Miscellaneous/BitMath.h b/Miscellaneous/BitMath.h index f37298f..b5caa97 100644 --- a/Miscellaneous/BitMath.h +++ b/Miscellaneous/BitMath.h @@ -3,26 +3,8 @@ namespace BitMath { /// @brief Reverse the bist of an integer value. - /// @param value - /// @param maxBits - /// @return - int ReverseBits(const int value, int maxBits) - { - int reversedBits = 0; - maxBits = (maxBits % 2 == 0) ? maxBits : maxBits - 1; - int halfBits = (int) floor(maxBits / 2); - for (int bit = 0; bit < halfBits; bit++) - { - int transposeDifference = (maxBits - (bit * 2) - 1); - - int rBit = value & (1 << bit); - int lBit = rBit << transposeDifference; - reversedBits |= lBit; - - lBit = value & (1 << (maxBits - 1 - bit)); - rBit = lBit >> transposeDifference; - reversedBits |= rBit; - } - return reversedBits; - } + /// @param value The value to be reversed. + /// @param maxBits The maximum number of bits to revert. + /// @return The reversed value. + int ReverseBits(const int value, int maxBits); }