I am trying to run this HTML example https://codepen.io/mediapipe/details/KKgVaPJ from https://google.github.io/mediapipe/solutions/face_mesh#javascript-solution-api in a create react application. I have already done:
I don't know how to replace this jsdelivr, maybe is affecting:
const faceMesh = new FaceMesh({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
}
});
So the question is:
This is my App.js code (sorry for the debugugging scaffolding):
import './App.css';
import React, { useState, useEffect } from "react";
import Webcam from "react-webcam";
import { Camera, CameraOptions } from '@mediapipe/camera_utils'
import {
FaceMesh,
FACEMESH_TESSELATION,
FACEMESH_RIGHT_EYE,
FACEMESH_LEFT_EYE,
FACEMESH_RIGHT_EYEBROW,
FACEMESH_LEFT_EYEBROW,
FACEMESH_FACE_OVAL,
FACEMESH_LIPS
} from '@mediapipe/face_mesh'
import { drawConnectors } from '@mediapipe/drawing_utils'
const videoConstraints = {
width: 1280,
height: 720,
facingMode: "user"
};
function App() {
const webcamRef = React.useRef(null);
const canvasReference = React.useRef(null);
const [cameraReady, setCameraReady] = useState(false);
let canvasCtx
let camera
const videoElement = document.getElementsByClassName('input_video')[0];
// const canvasElement = document.getElementsByClassName('output_canvas')[0];
const canvasElement = document.createElement('canvas');
console.log('canvasElement', canvasElement)
console.log('canvasCtx', canvasCtx)
useEffect(() => {
camera = new Camera(webcamRef.current, {
onFrame: async () => {
console.log('{send}',await faceMesh.send({ image: webcamRef.current.video }));
},
width: 1280,
height: 720
});
canvasCtx = canvasReference.current.getContext('2d');
camera.start();
console.log('canvasReference', canvasReference)
}, [cameraReady]);
function onResults(results) {
console.log('results')
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(
results.image, 0, 0, canvasElement.width, canvasElement.height);
if (results.multiFaceLandmarks) {
for (const landmarks of results.multiFaceLandmarks) {
drawConnectors(canvasCtx, landmarks, FACEMESH_TESSELATION, { color: '#C0C0C070', lineWidth: 1 });
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYE, { color: '#FF3030' });
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYEBROW, { color: '#FF3030' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYE, { color: '#30FF30' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYEBROW, { color: '#30FF30' });
drawConnectors(canvasCtx, landmarks, FACEMESH_FACE_OVAL, { color: '#E0E0E0' });
drawConnectors(canvasCtx, landmarks, FACEMESH_LIPS, { color: '#E0E0E0' });
}
}
canvasCtx.restore();
}
const faceMesh = new FaceMesh({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
}
});
faceMesh.setOptions({
selfieMode: true,
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
faceMesh.onResults(onResults);
// const camera = new Camera(webcamRef.current, {
// onFrame: async () => {
// await faceMesh.send({ image: videoElement });
// },
// width: 1280,
// height: 720
// });
// camera.start();
return (
<div className="App">
<Webcam
audio={false}
height={720}
ref={webcamRef}
screenshotFormat="image/jpeg"
width={1280}
videoConstraints={videoConstraints}
onUserMedia={() => {
console.log('webcamRef.current', webcamRef.current);
// navigator.mediaDevices
// .getUserMedia({ video: true })
// .then(stream => webcamRef.current.srcObject = stream)
// .catch(console.log);
setCameraReady(true)
}}
/>
<canvas
ref={canvasReference}
style={{
position: "absolute",
marginLeft: "auto",
marginRight: "auto",
left: 0,
right: 0,
textAlign: "center",
zindex: 9,
width: 1280,
height: 720,
}}
/>
</div >
);
}
export default App;
You don't have to replace the jsdelivr, that piece of code is fine; also I think you need to reorder your code a little bit:
An example of code:
useEffect(() => {
const faceMesh = new FaceDetection({
locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_detection/${file}`;
},
});
faceMesh.setOptions({
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5,
});
faceMesh.onResults(onResults);
if (
typeof webcamRef.current !== "undefined" &&
webcamRef.current !== null
) {
camera = new Camera(webcamRef.current.video, {
onFrame: async () => {
await faceMesh.send({ image: webcamRef.current.video });
},
width: 1280,
height: 720,
});
camera.start();
}
}, []);
Finally, in the onResults callback I would suggest printing first the results, just to check if the Mediapipe implementation is working fine. And don't forget to set the canvas size before drawing something.
function onResults(results){
console.log(results)
canvasCtx = canvasReference.current.getContext('2d')
canvas.width = webcamRef.current.video.videoWidth;
canvas.height = webcamRef.current.video.videoHeight;;
...
}
Good luck! :)
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With