Skip to content

Commit

Permalink
Merge pull request #1 from CrystSw/dev
Browse files Browse the repository at this point in the history
Merge dev-branch.
  • Loading branch information
CrystSw authored Jun 8, 2019
2 parents 27ccc97 + 2cb4f6e commit 74c4cf8
Show file tree
Hide file tree
Showing 11 changed files with 567 additions and 379 deletions.
19 changes: 16 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Simple-Neural-Network
# Simple-Neural-Network
## 概要
Sample of 2-layer categorization-neural-network(CNN).
This CNN consists of 2-layer(input-layer, output-layer). not exist hidden-layer.
Expand All @@ -16,7 +16,9 @@ This CNN consists of 2-layer(input-layer, output-layer). not exist hidden-layer.

の4つのファイルをダウンロードおよび解凍し,src/mnistの中へ入れてください.

Windows10 64bit(gcc 6.3.0)とCentOS6.10(gcc 4.4.7)で動作確認を行っています.
Windows10 64bit(gcc 6.3.0)とCentOS6.10(gcc 4.4.7)で動作確認を行っています.

**◎ver1.1以前(旧バージョン)**
コンパイルを行うには次のコマンドを実行します.
```gcc -lm snnet.c main.c -o snnet```
最適化オプションを付けたほうが,動作は早くなります.
Expand All @@ -25,7 +27,18 @@ Windows10 64bit(gcc 6.3.0)とCentOS6.10(gcc 4.4.7)で動作確認を行ってい
```./snnet learn```

テストデータを基に識別を行うには次のコマンドを実行します.
```./snnet test```
```./snnet test```

**◎ver1.2以降(新バージョン)**
コンパイルを行うには次のコマンドを実行します.
```gcc -lm nnet.c util.c mnist.c main.c -o snnet```
最適化オプションを付けたほうが,動作は早くなります.

教師データを基に学習を行うには次のコマンドを実行します.
```./snnet -l```

テストデータを基に識別を行うには次のコマンドを実行します.
```./snnet -t```

---
## 注意事項
Expand Down
10 changes: 10 additions & 0 deletions src/header/mnist.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
#ifndef __SNN_MNIST_H_INCLUDED__
#define __SNN_MNIST_H_INCLUDED__

#include "nnet_types.h"

extern FVALUE read_mnistimg(char *filename);
extern LABELOHV read_mnistlbl_ohv(char *filename);
extern LABEL read_mnistlbl(char *filename);

#endif
10 changes: 3 additions & 7 deletions src/snnet.h → src/header/nnet.h
Original file line number Diff line number Diff line change
@@ -1,17 +1,13 @@
#ifndef __SIMPLE_NNET_H__
#define __SIMPLE_NNET_H__
#ifndef __SNN_NNET_H_INCLUDED__
#define __SNN_NNET_H_INCLUDED__

#include "snnet-types.h"
#include "nnet_types.h"

extern FVALUE read_mnistimg(char *filename);
extern LABEL read_mnistlbl(char *filename);
extern LABELOHV read_mnistlbl_ohv(char *filename);
extern double softmax(const double *output, const int id, const int size);
extern double cross_entropy(const double *rpp, const double *wpp, const int size);
extern void nncout_train(const TRAINDATA td, double **weight, const double *bias, const int id, double *catout);
extern void nncout_test(const TESTDATA td, double **weight, const double *bias, const int id, double *catout);
extern void nncpp(const double *catout, const int size, double *catpp);
extern double xentrloss(const TRAINDATA td, double **weight, const double *bias);
extern int getmax(const double *p, int size);

#endif
5 changes: 5 additions & 0 deletions src/snnet-types.h → src/header/nnet_types.h
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
#ifndef __SNN_NNET_TYPES_H_INCLUDED__
#define __SNN_NNET_TYPES_H_INCLUDED__

typedef unsigned char u_char;

typedef struct {
Expand Down Expand Up @@ -29,3 +32,5 @@ typedef struct{
FVALUE fval;
LABEL label;
} TESTDATA;

#endif
16 changes: 16 additions & 0 deletions src/header/util.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#ifndef __SNN_UTIL_H_INCLUDED__
#define __SNN_UTIL_H_INCLUDED__

extern double *malloc_d1(const int size1);
extern double *calloc_d1(const int size1);
extern double **malloc_d2(const int size1, const int size2);
extern double **calloc_d2(const int size1, const int size2);
extern void free_d1(double *array);
extern void free_d2(double **array, const int size1);
extern int getmax(const double *array, int size1);
extern void writefile_d1(const char *filename, double *array_d1, const int size1);
extern void readfile_d1(const char *filename, double *array_d1, const int size1);
extern void writefile_d2(const char *filename, double **array_d2, const int size1, const int size2);
extern void readfile_d2(const char *filename, double **array_d2, const int size1, const int size2);

#endif
143 changes: 44 additions & 99 deletions src/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2,19 +2,20 @@
#include <string.h>
#include <stdlib.h>

#include "snnet.h"
#include "header/nnet.h"
#include "header/util.h"
#include "header/mnist.h"

#define LOSS_CALC_RATE 5000
//#define OUTPUT_IMAGE

void learn(void);
void test(void);

int main(int argc, char *argv[]){
if(argc > 1){
if(!strcmp(argv[1],"learn")){
if(!strcmp(argv[1],"-l")){
learn();
}else if(!strcmp(argv[1],"test")){
}else if(!strcmp(argv[1],"-t")){
test();
}
}
Expand All @@ -39,20 +40,15 @@ void learn(void){
double alpha = 0.0000001;

//重みとバイアスの宣言(callocで確保します)
double **weight, *bias;
weight = (double**)calloc(td.label.size, sizeof(double*));
for(j = 0; j < td.label.size; ++j){
weight[j] = (double*)calloc(td.fval.size, sizeof(double));
}
bias = (double*)calloc(td.label.size, sizeof(double));
double **weight = calloc_d2(td.label.size, td.fval.size);
double *bias = calloc_d1(td.label.size);

//学習前のクロスエントロピーの総和
printf("Data: %6d, Loss: %.8f\n", 0, xentrloss(td, weight, bias));

//カテゴリ出力とカテゴリ事後確率の宣言
double *catout, *catpp;
catout = (double*)malloc(sizeof(double)*td.label.size);
catpp = (double*)malloc(sizeof(double)*td.label.size);
double *catout = malloc_d1(td.label.size);
double *catpp = malloc_d1(td.label.size);

//学習
for(i = 0; i < td.fval.num; ++i){
Expand All @@ -69,43 +65,25 @@ void learn(void){
//バイアスの更新
bias[j] += alpha*(td.label.data[i][j]-catpp[j]);
}
//クロスエントロピーの総和
//クロスエントロピー誤差(計算負荷の高い処理なので,実行回数は少なめ)
if((i+1) % LOSS_CALC_RATE == 0) printf("Data: %6d, Loss: %.8f\n", i+1, xentrloss(td, weight, bias));
}
free(catout);
free(catpp);
free_d1(catout);
free_d1(catpp);

//重みの書き出し
FILE *wwp;
if((wwp = fopen("weight-value","w")) != NULL){
for(i = 0; i < td.label.size; ++i){
for(j = 0; j < td.fval.size; ++j){
fprintf(wwp, "%.20f\n", weight[i][j]);
}
}
}else{
fprintf(stderr, "Error: Cannot open file.");
exit(1);
}
fclose(wwp);

writefile_d2("./result/weight-value", weight, td.label.size, td.fval.size);
//バイアスの書き出し
FILE *wbp;
if((wbp = fopen("bias-value","w")) != NULL){
for(i = 0; i < td.label.size; ++i){
fprintf(wbp, "%.20f\n", bias[i]);
}
}else{
fprintf(stderr, "Error: Cannot open file.");
exit(1);
}
fclose(wbp);
writefile_d1("./result/bias-value", bias, td.label.size);

for(i = 0; i < td.label.size; ++i) free(weight[i]);
free(weight);
free(bias);
free_d2(weight, td.label.size);
free_d1(bias);
}

/**
* 学習したニューラルネットワークを用いてテストデータの分類を行う.
* 結果は混同行列として出力される.
*/
void test(void){
int i, j, k;

Expand All @@ -117,50 +95,20 @@ void test(void){
puts("Label data has loaded.");

//重みとバイアスの宣言(callocで確保します)
double **weight, *bias;
weight = (double**)calloc(td.label.size, sizeof(double*));
for(j = 0; j < td.label.size; ++j){
weight[j] = (double*)calloc(td.fval.size, sizeof(double));
}
bias = (double*)calloc(td.label.size, sizeof(double));
double **weight = calloc_d2(td.label.size, td.fval.size);
double *bias = calloc_d1(td.label.size);

//重みの読み込み
FILE *rwp;
if((rwp = fopen("weight-value","r")) != NULL){
for(i = 0; i < td.label.size; ++i){
for(j = 0; j < td.fval.size; ++j){
fscanf(rwp, "%lf", &weight[i][j]);
}
}
}else{
fprintf(stderr, "Error: Cannot open file.");
exit(1);
}
fclose(rwp);

readfile_d2("./result/weight-value", weight, td.label.size, td.fval.size);
//バイアスの読み込み
FILE *rbp;
if((rbp = fopen("bias-value","r")) != NULL){
for(i = 0; i < td.label.size; ++i){
fscanf(rbp, "%lf", &bias[i]);
}
}else{
fprintf(stderr, "Error: Cannot open file.");
exit(1);
}
fclose(rbp);
readfile_d1("./result/bias-value", bias, td.label.size);

//混同行列
double **confmat;
confmat = (double**)calloc(td.label.size, sizeof(double*));
for(j = 0; j < td.label.size; ++j){
confmat[j] = (double*)calloc(td.fval.size, sizeof(double));
}
double **confmat = calloc_d2(td.label.size, td.fval.size);

//カテゴリ出力とカテゴリ事後確率の宣言
double *catout, *catpp;
catout = (double*)malloc(sizeof(double)*td.label.size);
catpp = (double*)malloc(sizeof(double)*td.label.size);
double *catout = malloc_d1(td.label.size);
double *catpp = malloc_d1(td.label.size);

//カテゴリ分類
int m_category;
Expand All @@ -171,21 +119,13 @@ void test(void){
nncpp(catout, td.label.size, catpp);
//事後確率が最大であるカテゴリを導出
m_category = getmax(catpp, td.label.size);
//分類結果を格納(混同行列の計算に利用)
confmat[td.label.data[i]][m_category] += 1;
printf("[Data:%d]right:%d predict:%d(pp:%.1f)\n", i+1, td.label.data[i], m_category, catpp[m_category]*100);
#ifdef OUTPUT_IMAGE
//誤った画像の表示
if(td.label.data[i] != m_category){
puts("-----Image-----");
for(j = 0; j < td.fval.size; ++j){
putchar(td.fval.data[i][j] < 128 ? '-' : '#');
if((j+1) % td.fval.c_size == 0) putchar('\n');
}
}
#endif
//分類結果を出力
printf("[Data:%d]truth:%d predict:%d(pp:%.1f)\n", i+1, td.label.data[i], m_category, catpp[m_category]*100);
}
free(catout);
free(catpp);
free_d1(catout);
free_d1(catpp);

//混同行列の計算
int cat_num;
Expand All @@ -194,7 +134,6 @@ void test(void){
for(j = 0; j < td.label.size; ++j) cat_num += confmat[i][j];
for(j = 0; j < td.label.size; ++j) confmat[i][j] /= cat_num;
}

//混同行列の出力
puts("-----Confusion Matrix-----");
printf(" ");
Expand All @@ -208,10 +147,16 @@ void test(void){
putchar('\n');
}

for(i = 0; i < td.label.size; ++i) free(weight[i]);
free(weight);
free(bias);
//正解率の出力
puts("-----Accuracy Rate-----");
double arate = 0;
for(i = 0; i < td.label.size; ++i){
arate += confmat[i][i];
}
arate /= td.label.size;
printf("%3.1f\n", arate*100);

for(i = 0; i < td.label.size; ++i) free(confmat[i]);
free(confmat);
}
free_d2(weight, td.label.size);
free_d1(bias);
free_d2(confmat, td.label.size);
}
Loading

0 comments on commit 74c4cf8

Please sign in to comment.