首先,需要在HTML文件中做出一些改变。将视频标签的尺寸进行调整,以便有足够的空间来显示绘制结果:
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8"/>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
<script type="application/x-javascript" src="face-api.js"></script>
</head>
<body>
<h1>使用face-api.js进行情绪、年龄和性别检测</h1>
<video autoplay muted id="video" width="400" height="400" style="margin: auto;"></video>
<div id="prediction">加载中</div>
<script type="text/javascript" defer src="index.js"></script>
</body>
</html>
接下来,需要在index.js文件中导入另一个模型:
faceapi.nets.ageGenderNet.loadFromUri('/models')
然后,将年龄和性别添加到预测结果中:
const detections = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions().withAgeAndGender();
face-api.js还具有一些绘图功能。让在画布上添加绘制:
const resizedDetections = faceapi.resizeResults(detections, displaySize);
faceapi.draw.drawDetections(canvas, resizedDetections);
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections);
faceapi.draw.drawFaceExpressions(canvas, resizedDetections);
现在可以获取预测结果了:
resizedDetections.forEach(result => {
const { age, gender, genderProbability } = result;
new faceapi.draw.DrawTextField([
` ${faceapi.round(age, 0)} 岁`,
` ${gender} (${faceapi.round(genderProbability)})`
], result.detection.box.bottomRight).draw(canvas);
});
const video = document.getElementById('video');
Promise.all([
faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
faceapi.nets.faceExpressionNet.loadFromUri('/models'),
faceapi.nets.ageGenderNet.loadFromUri('/models')
]).then(startVideo);
function startVideo() {
navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
if (navigator.getUserMedia) {
navigator.getUserMedia({ video: true }, function(stream) {
var video = document.querySelector('video');
video.srcObject = stream;
video.onloadedmetadata = function(e) {
video.play();
};
}, function(err) {
console.log(err.name);
});
} else {
document.body.innerText = "getUserMedia not supported";
console.log("getUserMedia not supported");
}
}
video.addEventListener('play', () => {
const canvas = faceapi.createCanvasFromMedia(video);
document.body.append(canvas);
const displaySize = { width: video.width, height: video.height };
faceapi.matchDimensions(canvas, displaySize);
setInterval(async () => {
const predictions = await faceapi.detectAllFaces(video, new faceapi.TinyFaceDetectorOptions()).withFaceLandmarks().withFaceExpressions().withAgeAndGender();
const resizedDetections = faceapi.resizeResults(predictions, displaySize);
canvas.getContext('2d').clearRect(0, 0, canvas.width, canvas.height);
faceapi.draw.drawDetections(canvas, resizedDetections);
faceapi.draw.drawFaceLandmarks(canvas, resizedDetections);
faceapi.draw.drawFaceExpressions(canvas, resizedDetections);
resizedDetections.forEach(result => {
const { age, gender, genderProbability } = result;
new faceapi.draw.DrawTextField([
` ${faceapi.round(age, 0)} 岁`,
` ${gender} (${faceapi.round(genderProbability)})`
], result.detection.box.bottomRight).draw(canvas);
});
}, 100);
});