-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathlin_reg.c
107 lines (96 loc) · 3.42 KB
/
lin_reg.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
#include "../lib/clear_net.h"
#include <stdio.h>
// function to learn
// y = 2 + 4a - 3b + 5c + 6d - 2e + 7f - 8g + 9h
#define data cn.data
scalar rand_range(scalar lower, scalar upper) {
return ((scalar)rand()) / RAND_MAX * (upper - lower) + lower;
}
scalar do_func(scalar a, scalar b, scalar c, scalar d, scalar e, scalar f,
scalar g, scalar h) {
return 2 + (4 * a) - (3 * b) + (5 * c) + (6 * d) - (2 * e) + (7 * f) -
(8 * g) + (9 * h);
}
int main(void) {
srand(0);
ulong num_train = 100;
scalar lower = -1;
scalar upper = 1;
ulong dim_input = 8;
ulong dim_output = 1;
Vector *inputs = data.allocVectors(num_train, dim_input);
Vector *val_inputs = data.allocVectors(num_train, dim_input);
Vector *targets = data.allocVectors(num_train, dim_output);
Vector *val_targets = data.allocVectors(num_train, dim_output);
scalar max =
do_func(upper, upper, upper, upper, upper, upper, upper, upper);
scalar a;
scalar b;
scalar c;
scalar d;
scalar e;
scalar f;
scalar g;
scalar h;
for (ulong i = 0; i < num_train; ++i) {
for (ulong j = 0; j < dim_input; ++j) {
VEC_AT(inputs[i], j) = rand_range(lower, upper);
VEC_AT(val_inputs[i], j) = rand_range(lower, upper);
}
}
for (ulong i = 0; i < num_train; ++i) {
a = VEC_AT(inputs[i], 0);
b = VEC_AT(inputs[i], 1);
c = VEC_AT(inputs[i], 2);
d = VEC_AT(inputs[i], 3);
e = VEC_AT(inputs[i], 4);
f = VEC_AT(inputs[i], 5);
g = VEC_AT(inputs[i], 6);
h = VEC_AT(inputs[i], 7);
VEC_AT(targets[i], 0) = do_func(a, b, c, d, e, f, g, h);
VEC_AT(targets[i], 0) /= max;
a = VEC_AT(val_inputs[i], 0);
b = VEC_AT(val_inputs[i], 1);
c = VEC_AT(val_inputs[i], 2);
d = VEC_AT(val_inputs[i], 3);
e = VEC_AT(val_inputs[i], 4);
f = VEC_AT(val_inputs[i], 5);
g = VEC_AT(val_inputs[i], 6);
h = VEC_AT(val_inputs[i], 7);
VEC_AT(val_targets[i], 0) = do_func(a, b, c, d, e, f, g, h);
VEC_AT(val_targets[i], 0) /= max;
}
CNData *io_ins = data.allocDataFromVectors(inputs, num_train);
CNData *io_tars = data.allocDataFromVectors(targets, num_train);
CNData *io_val_ins = data.allocDataFromVectors(val_inputs, num_train);
CNData *io_val_tars = data.allocDataFromVectors(val_targets, num_train);
HParams *hp = cn.allocDefaultHParams();
cn.setRate(hp, 0.01);
Net *net = cn.allocVanillaNet(hp, 8);
cn.allocDenseLayer(net, TANH, 1);
cn.randomizeNet(net, -1, 1);
ulong num_epochs = 200000;
scalar error_break = 0.01f;
scalar loss;
for (ulong i = 0; i < num_epochs; ++i) {
loss = cn.lossVanilla(net, io_ins, io_tars);
cn.backprop(net);
if (i % (num_epochs / 20) == 0) {
printf("Cost at %zu: %f\n", i, loss);
}
if (loss < error_break) {
printf("Less than: %f error at epoch %zu\n", error_break, i);
break;
}
}
printf("Final output: %f\n", cn.lossVanilla(net, io_ins, io_tars));
cn.printVanillaPredictions(net, io_val_ins, io_val_tars);
char *file_name = "model";
cn.saveNet(net, file_name);
cn.deallocNet(net);
net = cn.allocNetFromFile(file_name);
printf("After Loading From File\n");
cn.printVanillaPredictions(net, io_val_ins, io_val_tars);
cn.deallocNet(net);
return 0;
}