-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathserver.js
executable file
·115 lines (91 loc) · 3.18 KB
/
server.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
var fs = require('fs');
let credentials = require('./keys.js');
// Don't forget to install with NPM
var AWS = require('aws-sdk');
// Create a Rekognition
// Replace accessKeyId and secretAccessKey with your values
var rekognition = new AWS.Rekognition({apiVersion: '2016-06-27', "accessKeyId": credentials.ACCESS_KEY_ID, "secretAccessKey": credentials.SECRET_ACCESS_KEY, "region": "us-east-1"});
// Have to proxy the stream from the camera for p5.js
//https://github.com/legege/node-mjpeg-proxy
// var MjpegProxy = require('mjpeg-proxy').MjpegProxy;
var express = require('express')
var app = express()
app.use(express.static('public'));
// Put in the actual camera IP
var CAMERA_IP = "CAMERA IP";
// Credentials don't work yet
// var USERNAME = "root";
// var PASSWORD = "enter";
var CAMERA_URL = "http://" + CAMERA_IP + "/mjpg/video.mjpg";
app.get('/', function (req, res) {
res.send('Hello World!')
});
// Proxy the mjpeg stream
// app.get('/stream', new MjpegProxy(CAMERA_URL).proxyRequest);
// Just an example showing how to submit an image from node
app.get('/test', function (req, res) {
// Search for face match
fs.readFile("bikes.jpg", function(err, data) {
var imageBuffer = new Buffer(data);
var params = {
CollectionId: 'veillance', /* required */
Image: { /* required */
Bytes: imageBuffer
},
FaceMatchThreshold: '90',
MaxFaces: '1'
};
rekognition.searchFacesByImage(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else {
console.log(data); // successful response
console.log(data.FaceMatches[0].Face);
}
});
});
});
// Prime the pump, create a face detection database with the images in the prime directory
app.get('/prime', function (req, res) {
// Create a collection for the target images
var vparams = {
CollectionId: "tiatiaitp"
};
rekognition.deleteCollection(vparams, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log("delete",data); // successful response
rekognition.createCollection(vparams, function(err, data) {
if (err) {
console.log(err, err.stack); // an error occurred
}
else {
console.log("create",data); // successful response
// Create the index // read all of the images in the directory
fs.readdir("prime", function(err, list) {
list.forEach(function (filename) {
if (filename.includes(".jpg") || filename.includes(".png")) {
fs.readFile("prime/"+filename, function(err, data) {
console.log(filename);
var imageBuffer = new Buffer(data);
var params = {
CollectionId: "tiatiaitp",
DetectionAttributes: ["DEFAULT"],
ExternalImageId: filename,
Image: {
Bytes: imageBuffer
}
};
rekognition.indexFaces(params, function(err, data) {
if (err) console.log(err, err.stack); // an error occurred
else console.log("index",data); // successful response
});
});
}
});
});
}
});
});
});
app.listen(8080, function () {
console.log('Example app listening on port 8080!')
});