-
Notifications
You must be signed in to change notification settings - Fork 2
/
script.js
42 lines (36 loc) · 1.45 KB
/
script.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
const video = document.querySelector("video#video")
const startVideo = () => {
navigator.getUserMedia(
{ video: {} },
stream => video.srcObject = stream,
err => console.error(err)
)
}
// NOTE: YOU NEED TO CHANGE `modelPath` prefix
const modelPath = "/FaceDetectionJS/models"; // Only for GitHub pages hosting
// const modelPath = "/models"; // Good if you want to host on your (local) server/system
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri(modelPath),
faceapi.nets.faceLandmark68Net.loadFromUri(modelPath),
faceapi.nets.faceRecognitionNet.loadFromUri(modelPath),
faceapi.nets.faceExpressionNet.loadFromUri(modelPath)
]).then(startVideo)
video.addEventListener("play", () => {
const canvas = faceapi.createCanvasFromMedia(video)
document.body.append(canvas)
const displaySize = {
width: video.width,
height: video.height
}
faceapi.matchDimensions(canvas, displaySize)
setInterval(async () => {
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions()
console.log(detections, canvas.width, canvas.height)
const resizedDetections = faceapi.resizeResults(detections, displaySize)
const ctx = canvas.getContext("2d")
ctx.clearRect(0, 0, canvas.width, canvas.height)
faceapi.draw.drawDetections(canvas, resizedDetections)
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections)
faceapi.draw.drawFaceExpressions(canvas, resizedDetections)
}, 100)
})