Webcam face detection in the browser
Important: Please make sure to allow access to your webcam for this page to work. No video data will be recorded or uploaded.
Note: For mobile phones, please turn them sideways into landscape format to get better results. This demo is optimized for desktop browsers.
This demo is pretty amazing. With just a few lines of code, you can enable your webcam and start doing face detection. The program will recognize your face and then display it on the screen. This is a great demonstration of how easy it is to get started with face recognition.
The source code
Here’s all the code it took to make this happen (credit):
<div style="text-align:center"> <video id="video" autoplay style="display:none"></video> <canvas id="canvas" width="300px" height="200px" style="margin:0 auto"></canvas> </div> <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script> <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/blazeface"></script> <script> let video = document.getElementById("video"); let model; // declare a canvas variable and get its context let canvas = document.getElementById("canvas"); let ctx = canvas.getContext("2d"); const setupCamera = () => { navigator.mediaDevices .getUserMedia({ video: { width: 300, height: 200 }, audio: false, }) .then((stream) => { video.srcObject = stream; }); }; let shown = false; const detectFaces = async () => { const prediction = await model.estimateFaces(video, false); if (shown == false) { // log the prediction once to the browser console console.log(prediction); shown = true; } // draw the video first ctx.drawImage(video, 0, 0, 300, 200); prediction.forEach((pred) => { // draw the rectangle enclosing the face ctx.beginPath(); ctx.lineWidth = "5"; ctx.strokeStyle = "white"; // the last two arguments are width and height // since blazeface returned only the coordinates, // we can find the width and height by subtracting them. ctx.rect( pred.topLeft[0], pred.topLeft[1], pred.bottomRight[0] - pred.topLeft[0], pred.bottomRight[1] - pred.topLeft[1] ); ctx.stroke(); // drawing small rectangles for the face landmarks ctx.fillStyle = "white"; pred.landmarks.forEach((landmark) => { ctx.fillRect(landmark[0], landmark[1], 5, 5); }); }); }; setupCamera(); video.addEventListener("loadeddata", async () => { model = await blazeface.load(); // call detect faces every 100 milliseconds or 10 times every second setInterval(detectFaces, 500); }); </script>