cleanup of CV examples
This commit is contained in:
Родитель
ff450c985f
Коммит
e3067d17bc
|
@ -114,25 +114,7 @@ function createCVMat2(rotation, buffer, pixelFormat) {
|
||||||
cv.cvtColor(img_rgba, img_gray, cv.COLOR_RGBA2GRAY, 0);
|
cv.cvtColor(img_rgba, img_gray, cv.COLOR_RGBA2GRAY, 0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// switch(rotation) {
|
|
||||||
// case -90:
|
|
||||||
// cv.rotate(img_gray, rotatedImage, cv.ROTATE_90_CLOCKWISE);
|
|
||||||
// return rotatedImage;
|
|
||||||
// break;
|
|
||||||
// case 90:
|
|
||||||
// cv.rotate(img_gray, rotatedImage, cv.ROTATE_90_COUNTERCLOCKWISE);
|
|
||||||
// return rotatedImage;
|
|
||||||
// break;
|
|
||||||
// case 180:
|
|
||||||
// cv.rotate(img_gray, rotatedImage, cv.ROTATE_180);
|
|
||||||
// return rotatedImage;
|
|
||||||
// break;
|
|
||||||
// default:
|
|
||||||
// return img_gray;
|
|
||||||
// }
|
|
||||||
return img_gray;
|
return img_gray;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
///////////
|
///////////
|
||||||
|
@ -205,22 +187,14 @@ self.addEventListener('message', function(event){
|
||||||
var width = buffer.size.width;
|
var width = buffer.size.width;
|
||||||
var height = buffer.size.height;
|
var height = buffer.size.height;
|
||||||
|
|
||||||
// // let's pick a size such that the video is below 256 in size in both dimensions
|
// set up a camera coefficient matrices, for distortion and projection
|
||||||
// // since face detection is really expensive on large images
|
|
||||||
// while (width > 256 || height > 256) {
|
|
||||||
// width = width / 2
|
|
||||||
// height = height / 2
|
|
||||||
// scale = scale / 2;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// set up a camera coefficient matrix
|
|
||||||
if (camDistortion == null) {
|
if (camDistortion == null) {
|
||||||
// we assume we're getting no distortion for now, since our cameras appear rectaliner
|
// we assume we're getting no distortion for now, since our cameras appear rectaliner
|
||||||
// we probably want to add distortion coefficients to the camera definition since I assume some
|
// we probably want to add distortion coefficients to the camera definition since I assume some
|
||||||
// cameras will have it
|
// cameras will have it
|
||||||
camDistortion = cv.matFromArray(5, 1, cv.CV_64F, [0, 0, 0, 0, 0]);
|
camDistortion = cv.matFromArray(5, 1, cv.CV_64F, [0, 0, 0, 0, 0]);
|
||||||
}
|
}
|
||||||
videoFrame.camera.cameraIntrinsics[8] = 1.0
|
//videoFrame.camera.cameraIntrinsics[8] = 1.0 // was broken in the app, should be fixed now
|
||||||
camIntrinsics.create(3, 3, cv.CV_64F)
|
camIntrinsics.create(3, 3, cv.CV_64F)
|
||||||
camIntrinsics.data64F.set(videoFrame.camera.cameraIntrinsics);
|
camIntrinsics.data64F.set(videoFrame.camera.cameraIntrinsics);
|
||||||
|
|
||||||
|
@ -239,12 +213,16 @@ self.addEventListener('message', function(event){
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// detect the aruco markers
|
||||||
cv.detectMarkers(image, dictionary, markerCorners, markerIds, parameter);
|
cv.detectMarkers(image, dictionary, markerCorners, markerIds, parameter);
|
||||||
postMessage({type: "cvAfterDetect", time: ( performance || Date ).now()});
|
postMessage({type: "cvAfterDetect", time: ( performance || Date ).now()});
|
||||||
|
|
||||||
|
// did we find any?
|
||||||
if (markerIds.rows > 0) {
|
if (markerIds.rows > 0) {
|
||||||
|
// Need to adjust the corners to be around 0,0 at the center.
|
||||||
// need to adjust the corners to be around 0,0 at the center
|
// Weirdly, the aruco detector returns the values in image coordinates,
|
||||||
|
// even though pose estimation expects them relative to the center of the
|
||||||
|
// screen.
|
||||||
for(let i=0; i < markerIds.rows; ++i) {
|
for(let i=0; i < markerIds.rows; ++i) {
|
||||||
let cornervec = markerCorners.get(i).data32F
|
let cornervec = markerCorners.get(i).data32F
|
||||||
cornervec[0] -= width/2;
|
cornervec[0] -= width/2;
|
||||||
|
@ -265,6 +243,7 @@ self.addEventListener('message', function(event){
|
||||||
// cornervec[7] *= -1
|
// cornervec[7] *= -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// estimate the poses of the found markers
|
||||||
cv.estimatePoseSingleMarkers(markerCorners, 0.053, camIntrinsics, camDistortion, rvecs, tvecs);
|
cv.estimatePoseSingleMarkers(markerCorners, 0.053, camIntrinsics, camDistortion, rvecs, tvecs);
|
||||||
|
|
||||||
for(let i=0; i < markerIds.rows; ++i) {
|
for(let i=0; i < markerIds.rows; ++i) {
|
||||||
|
@ -281,20 +260,18 @@ self.addEventListener('message', function(event){
|
||||||
tm = Array.from(rotMat.data64F)
|
tm = Array.from(rotMat.data64F)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// construct a pose matrix from rotation and position
|
||||||
|
|
||||||
//let tvec = [tvecs.doublePtr(0, i)[0], tvecs.doublePtr(0, i)[1], tvecs.doublePtr(0, i)[2]];
|
|
||||||
//var returnMat = [tm[0], tm[1], tm[2], 0, tm[3], tm[4], tm[5], 0, tm[6], tm[7], tm[8], 0, -tvec[0], -tvec[1], -tvec[2], 1]
|
|
||||||
//var returnMat = [tm[0], tm[1], tm[2], 0, tm[3], tm[4], tm[5], 0, tm[6], tm[7], tm[8], 0, tvecs.doublePtr(0, i)[0], tvecs.doublePtr(0, i)[1], -tvecs.doublePtr(0, i)[2], 1]
|
|
||||||
// var returnMat = [tm[0], tm[3], tm[6], 0, tm[1], tm[4], tm[7], 0, tm[2], tm[5], tm[8], 0, tvecs.doublePtr(0, i)[0], tvecs.doublePtr(0, i)[1], -tvecs.doublePtr(0, i)[2], 1]
|
|
||||||
//var returnMat = [tm[0], tm[3], tm[6], 0, tm[1], tm[4], tm[7], 0, tm[2], tm[5], tm[8], 0, 0,0,0, 1]
|
|
||||||
var returnMat = [tm[0], tm[1], tm[2], 0, tm[3], tm[4], tm[5], 0, tm[6], tm[7], tm[8], 0,0,0,0, 1]
|
var returnMat = [tm[0], tm[1], tm[2], 0, tm[3], tm[4], tm[5], 0, tm[6], tm[7], tm[8], 0,0,0,0, 1]
|
||||||
mat4.translate(returnMat, returnMat, [tvecs.doublePtr(0, i)[0], tvecs.doublePtr(0, i)[1], tvecs.doublePtr(0, i)[2]]);
|
mat4.translate(returnMat, returnMat, [tvecs.doublePtr(0, i)[0], tvecs.doublePtr(0, i)[1], tvecs.doublePtr(0, i)[2]]);
|
||||||
|
|
||||||
|
// invert it so it's marker relative to camera, not the usual camera relative to marker
|
||||||
mat4.invert(returnMat,returnMat)
|
mat4.invert(returnMat,returnMat)
|
||||||
|
|
||||||
// account for camera rotation relative to screen, which happens in video mixed handhelds
|
// account for camera rotation relative to screen, which happens in video mixed handhelds
|
||||||
mat4.fromZRotation(antiRotation, rotation * Math.PI/180);
|
mat4.fromZRotation(antiRotation, rotation * Math.PI/180);
|
||||||
mat4.multiply(returnMat, antiRotation, returnMat)
|
mat4.multiply(returnMat, antiRotation, returnMat)
|
||||||
|
|
||||||
|
// save the marker info!
|
||||||
markers.push({
|
markers.push({
|
||||||
id: id,
|
id: id,
|
||||||
corners: [{x: cornervec[0], y: cornervec[1]}, {x: cornervec[2], y: cornervec[3]}, {x: cornervec[4], y: cornervec[5]}, {x: cornervec[6], y: cornervec[7]}],
|
corners: [{x: cornervec[0], y: cornervec[1]}, {x: cornervec[2], y: cornervec[3]}, {x: cornervec[4], y: cornervec[5]}, {x: cornervec[6], y: cornervec[7]}],
|
||||||
|
@ -303,6 +280,8 @@ self.addEventListener('message', function(event){
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// reply, even if opencv isn't ready.
|
||||||
endTime = ( performance || Date ).now()
|
endTime = ( performance || Date ).now()
|
||||||
videoFrame.postReplyMessage({type: "cvFrame", markers: markers, time: endTime})
|
videoFrame.postReplyMessage({type: "cvFrame", markers: markers, time: endTime})
|
||||||
|
|
||||||
|
|
|
@ -75,12 +75,13 @@
|
||||||
<script>
|
<script>
|
||||||
var cvStatusTxt = "";
|
var cvStatusTxt = "";
|
||||||
// set up for collecting different stats
|
// set up for collecting different stats
|
||||||
var beginTime = ( performance || Date ).now(), prevTime = beginTime, frames = 0;
|
var beginTime = ( performance || Date ).now(), prevTime = beginTime, frames = 0;
|
||||||
|
|
||||||
|
// set up some stats, including a new pane for CV fps
|
||||||
var stats = new Stats();
|
var stats = new Stats();
|
||||||
stats.domElement.style.cssText = 'position:fixed;top:2%;right:2%;cursor:pointer;opacity:0.9;z-index:10000';
|
stats.domElement.style.cssText = 'position:fixed;top:2%;right:2%;cursor:pointer;opacity:0.9;z-index:10000';
|
||||||
var cvPanel = stats.addPanel( new Stats.Panel( 'CV fps', '#ff8', '#221' ) );
|
var cvPanel = stats.addPanel( new Stats.Panel( 'CV fps', '#ff8', '#221' ) );
|
||||||
stats.showPanel( 2 ); // 0: fps, 1: ms, 2: mb, 3+: custom
|
stats.showPanel( 2 ); // 0: fps, 1: ms, 2: mb, 3+: custom
|
||||||
|
|
||||||
// a method to update a new panel for displaying CV FPS
|
// a method to update a new panel for displaying CV FPS
|
||||||
var updateCVFPS = function () {
|
var updateCVFPS = function () {
|
||||||
frames ++;
|
frames ++;
|
||||||
|
@ -92,6 +93,7 @@
|
||||||
}
|
}
|
||||||
beginTime = time;
|
beginTime = time;
|
||||||
}
|
}
|
||||||
|
document.body.appendChild( stats.dom );
|
||||||
|
|
||||||
// keep track of what we think the view size is so we can adjust the
|
// keep track of what we think the view size is so we can adjust the
|
||||||
// overlay used to render the 2D detection results
|
// overlay used to render the 2D detection results
|
||||||
|
@ -122,7 +124,6 @@
|
||||||
var cvImageDiv = document.getElementById("video_canvas");
|
var cvImageDiv = document.getElementById("video_canvas");
|
||||||
var cvImageCtx = cvImageDiv.getContext('2d');
|
var cvImageCtx = cvImageDiv.getContext('2d');
|
||||||
|
|
||||||
document.body.appendChild( stats.dom );
|
|
||||||
|
|
||||||
class ARAnchorExample extends XRExampleBase {
|
class ARAnchorExample extends XRExampleBase {
|
||||||
constructor(domElement){
|
constructor(domElement){
|
||||||
|
@ -136,30 +137,34 @@
|
||||||
this.lightEstimate = 0;
|
this.lightEstimate = 0;
|
||||||
this.el.appendChild(this.textBox)
|
this.el.appendChild(this.textBox)
|
||||||
|
|
||||||
|
// some temp variables
|
||||||
this.rotation = -1;
|
this.rotation = -1;
|
||||||
this.rotatedImage = null;
|
|
||||||
this.face_cascade = null;
|
|
||||||
this.eye_cascade = null;
|
|
||||||
|
|
||||||
this.triggerResize = true;
|
this.triggerResize = true;
|
||||||
|
|
||||||
|
// try to notice all resizes of the screen
|
||||||
window.addEventListener('resize', () => {
|
window.addEventListener('resize', () => {
|
||||||
console.log("resize, trigger adjust canvas size")
|
console.log("resize, trigger adjust canvas size")
|
||||||
this.triggerResize = true;
|
this.triggerResize = true;
|
||||||
})
|
})
|
||||||
}
|
|
||||||
|
|
||||||
newSession() {
|
// new worker
|
||||||
this.worker = new Worker ("worker.js")
|
this.worker = new Worker ("worker.js")
|
||||||
|
|
||||||
this.worker.onmessage = (ev) => {
|
this.worker.onmessage = (ev) => {
|
||||||
switch (ev.data.type) {
|
switch (ev.data.type) {
|
||||||
case "cvFrame":
|
case "cvFrame":
|
||||||
|
// finished with a frame!
|
||||||
var videoFrame = XRVideoFrame.createFromMessage(ev)
|
var videoFrame = XRVideoFrame.createFromMessage(ev)
|
||||||
|
|
||||||
|
// get the face rectangles
|
||||||
this.faceRects = ev.data.faceRects;
|
this.faceRects = ev.data.faceRects;
|
||||||
|
|
||||||
|
// timing
|
||||||
cvEndTime = ev.data.time;
|
cvEndTime = ev.data.time;
|
||||||
cvFaceTime = cvEndTime - cvAfterResizeTime;
|
cvFaceTime = cvEndTime - cvAfterResizeTime;
|
||||||
|
|
||||||
|
// see if the video frame sizes have changed, since
|
||||||
|
// we'll need to adjust the 2D overlay
|
||||||
var rotation = videoFrame.camera.cameraOrientation;
|
var rotation = videoFrame.camera.cameraOrientation;
|
||||||
var buffer = videoFrame.buffer(0)
|
var buffer = videoFrame.buffer(0)
|
||||||
|
|
||||||
|
@ -171,24 +176,32 @@
|
||||||
this.adjustRenderCanvasSize(rotation, width, height)
|
this.adjustRenderCanvasSize(rotation, width, height)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// are we showing the image (for debugging) or just the rectangles?
|
||||||
if (showCVImage) {
|
if (showCVImage) {
|
||||||
this.showVideoFrame(videoFrame)
|
this.showVideoFrame(videoFrame)
|
||||||
} else {
|
} else {
|
||||||
cvImageCtx.clearRect(0, 0, cvImageDiv.width, cvImageDiv.height);
|
cvImageCtx.clearRect(0, 0, cvImageDiv.width, cvImageDiv.height);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// show the rectangles
|
||||||
for (let i = 0; i < this.faceRects.length; i++) {
|
for (let i = 0; i < this.faceRects.length; i++) {
|
||||||
let rect = this.faceRects[i];
|
let rect = this.faceRects[i];
|
||||||
cvImageCtx.strokeRect(rect.x, rect.y, rect.width , rect.height);
|
cvImageCtx.strokeRect(rect.x, rect.y, rect.width , rect.height);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// update CV fps, and release the image
|
||||||
updateCVFPS();
|
updateCVFPS();
|
||||||
videoFrame.release();
|
videoFrame.release();
|
||||||
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case "cvStart":
|
case "cvStart":
|
||||||
// request the next one when the old one finishes
|
// request the next frame when the worker starts working on the current
|
||||||
|
// one, to pipeline a bit
|
||||||
this.requestVideoFrame();
|
this.requestVideoFrame();
|
||||||
|
|
||||||
|
// has there been a delay since the last frame was finished processing?
|
||||||
|
// this shouldn't happen, but is a sign that the background tasks are
|
||||||
|
// being throttled
|
||||||
cvStartTime = ev.data.time;
|
cvStartTime = ev.data.time;
|
||||||
if (cvEndTime > 0) {
|
if (cvEndTime > 0) {
|
||||||
cvIdleTime = cvStartTime - cvEndTime;
|
cvIdleTime = cvStartTime - cvEndTime;
|
||||||
|
@ -206,11 +219,13 @@
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case "cvReady":
|
case "cvReady":
|
||||||
|
// opencv is loaded and ready!
|
||||||
console.log('OpenCV.js is ready');
|
console.log('OpenCV.js is ready');
|
||||||
openCVready = true
|
openCVready = true
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case "cvStatus":
|
case "cvStatus":
|
||||||
|
// opencv sends status messages sometimes
|
||||||
cvStatusTxt = ev.data.msg;
|
cvStatusTxt = ev.data.msg;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -219,24 +234,15 @@
|
||||||
this.worker.addEventListener('error', (e) => {
|
this.worker.addEventListener('error', (e) => {
|
||||||
console.log("worker error:" + e)
|
console.log("worker error:" + e)
|
||||||
})
|
})
|
||||||
|
|
||||||
this.setVideoWorker(this.worker);
|
|
||||||
// this.setVideoWorker(ev => {
|
|
||||||
// var videoFrame = ev.detail
|
|
||||||
// if (openCVready) {
|
|
||||||
// try {
|
|
||||||
// this.handleVideoFrame(ev)
|
|
||||||
// } catch(e) {
|
|
||||||
// console.error('CV worker error', e)
|
|
||||||
// }
|
|
||||||
// updateCVFPS();
|
|
||||||
// }
|
|
||||||
// // pass the buffers back or they will be garbage collected
|
|
||||||
// videoFrame.release();
|
|
||||||
// this.requestVideoFrame();
|
|
||||||
// })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// new session, tell it about our worker
|
||||||
|
newSession() {
|
||||||
|
this.setVideoWorker(this.worker);
|
||||||
|
}
|
||||||
|
|
||||||
|
// when the window or video frame resizes, we need to resize and reposition
|
||||||
|
// the 2D overlay
|
||||||
adjustRenderCanvasSize (rotation, width, height) {
|
adjustRenderCanvasSize (rotation, width, height) {
|
||||||
var cameraAspect;
|
var cameraAspect;
|
||||||
console.log("adjustRenderCanvasSize: " + rotation + " degrees, " + width + " by " + height)
|
console.log("adjustRenderCanvasSize: " + rotation + " degrees, " + width + " by " + height)
|
||||||
|
@ -325,6 +331,7 @@
|
||||||
this.textBox.innerHTML = this.messageText;
|
this.textBox.innerHTML = this.messageText;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// do another check on frame size, to make sure we don't miss one
|
||||||
var viewport = frame.views[0].getViewport(this.session.baseLayer);
|
var viewport = frame.views[0].getViewport(this.session.baseLayer);
|
||||||
if (viewport.width != viewWidth || viewport.height != viewHeight) {
|
if (viewport.width != viewWidth || viewport.height != viewHeight) {
|
||||||
viewHeight = viewport.height;
|
viewHeight = viewport.height;
|
||||||
|
@ -335,6 +342,7 @@
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// show the video frame for debugging
|
||||||
showVideoFrame(videoFrame) {
|
showVideoFrame(videoFrame) {
|
||||||
var camera = videoFrame.camera
|
var camera = videoFrame.camera
|
||||||
var buffer = videoFrame.buffer(0)
|
var buffer = videoFrame.buffer(0)
|
||||||
|
|
|
@ -3,7 +3,8 @@ console.log("loaded webxr-worker.js")
|
||||||
|
|
||||||
var openCVready = false;
|
var openCVready = false;
|
||||||
|
|
||||||
// need to load up the files for tracking the face and eyes
|
// need to load up the files for tracking the face and eyes. WASM module has hooks to pass in
|
||||||
|
// files to be loaded by opencv
|
||||||
var Module = {
|
var Module = {
|
||||||
preRun: [function() {
|
preRun: [function() {
|
||||||
console.log("CV preRun")
|
console.log("CV preRun")
|
||||||
|
@ -76,6 +77,7 @@ console.log("loaded opencv.js:" + cv);
|
||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// faces and eyes
|
||||||
var face_cascade;
|
var face_cascade;
|
||||||
var eye_cascade;
|
var eye_cascade;
|
||||||
|
|
||||||
|
@ -213,6 +215,10 @@ function createCVMat2(rotation, buffer, pixelFormat) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// face tracker only works if image is upright.
|
||||||
|
// on mobile, the camera is fixed, even though the display rotates. So, we need
|
||||||
|
// to rotate the image so it's in the right orientation (upright, relative to how the
|
||||||
|
// user sees it)
|
||||||
switch(rotation) {
|
switch(rotation) {
|
||||||
case -90:
|
case -90:
|
||||||
cv.rotate(img_gray, rotatedImage, cv.ROTATE_90_CLOCKWISE);
|
cv.rotate(img_gray, rotatedImage, cv.ROTATE_90_CLOCKWISE);
|
||||||
|
|
|
@ -63,19 +63,23 @@
|
||||||
// need to figure out how to make loadScripts() work in BlobURLs
|
// need to figure out how to make loadScripts() work in BlobURLs
|
||||||
</script>
|
</script>
|
||||||
<script>
|
<script>
|
||||||
// RAINBOW Candy of a Certain Name colors
|
// We'll try to guess if the color matches one of the colors of a certain
|
||||||
|
// RAINBOW Candy
|
||||||
var colors = [
|
var colors = [
|
||||||
{ cr: 250, cg: 25, cb: 25, name: "RED" },
|
{ cr: 250, cg: 25, cb: 25, name: "RED" },
|
||||||
{ cr: 25, cg: 250, cb: 25, name: "GREEN" },
|
{ cr: 25, cg: 250, cb: 25, name: "GREEN" },
|
||||||
{ cr: 250, cg: 250, cb: 50, name: "YELLOW" }
|
{ cr: 250, cg: 250, cb: 50, name: "YELLOW" }
|
||||||
]
|
]
|
||||||
|
|
||||||
|
// do some time stamping, just to show the performance numbers
|
||||||
var beginTime = ( performance || Date ).now(), prevTime = beginTime, frames = 0;
|
var beginTime = ( performance || Date ).now(), prevTime = beginTime, frames = 0;
|
||||||
|
|
||||||
var stats = new Stats();
|
var stats = new Stats();
|
||||||
|
|
||||||
|
// create a new pane for the stats panel to show the fps of the vision processing
|
||||||
stats.domElement.style.cssText = 'position:fixed;top:2%;right:2%;cursor:pointer;opacity:0.9;z-index:10000';
|
stats.domElement.style.cssText = 'position:fixed;top:2%;right:2%;cursor:pointer;opacity:0.9;z-index:10000';
|
||||||
var cvPanel = stats.addPanel( new Stats.Panel( 'CV fps', '#ff8', '#221' ) );
|
var cvPanel = stats.addPanel( new Stats.Panel( 'CV fps', '#ff8', '#221' ) );
|
||||||
stats.showPanel( 2 ); // 0: fps, 1: ms, 2: mb, 3+: custom
|
stats.showPanel( 2 ); // 0: fps, 1: ms, 2: mb, 3+: custom
|
||||||
|
|
||||||
var updateCVFPS = function () {
|
var updateCVFPS = function () {
|
||||||
frames ++;
|
frames ++;
|
||||||
var time = ( performance || Date ).now();
|
var time = ( performance || Date ).now();
|
||||||
|
@ -86,34 +90,30 @@
|
||||||
}
|
}
|
||||||
beginTime = time;
|
beginTime = time;
|
||||||
}
|
}
|
||||||
|
|
||||||
// flag to set true if you want to construct a texture from the UV image
|
|
||||||
var makeTexUV = false;
|
|
||||||
|
|
||||||
document.body.appendChild( stats.dom );
|
document.body.appendChild( stats.dom );
|
||||||
|
|
||||||
|
// flag to set true if you want to construct a texture from the UV image and show it
|
||||||
|
var makeTexUV = false;
|
||||||
|
|
||||||
class ARAnchorExample extends XRExampleBase {
|
class ARAnchorExample extends XRExampleBase {
|
||||||
constructor(domElement){
|
constructor(domElement){
|
||||||
super(domElement, false, true, true)
|
super(domElement, false, true, true)
|
||||||
|
|
||||||
|
|
||||||
this.textBox = document.createElement('span')
|
this.textBox = document.createElement('span')
|
||||||
this.textBox.setAttribute('class', 'text-box')
|
this.textBox.setAttribute('class', 'text-box')
|
||||||
this.textBox.innerText = '0.0'
|
this.textBox.innerText = '0.0'
|
||||||
|
this.el.appendChild(this.textBox)
|
||||||
|
|
||||||
|
// to store the returned values
|
||||||
this.intensity = 0;
|
this.intensity = 0;
|
||||||
this.cr = 0;
|
this.cr = 0;
|
||||||
this.cg = 0;
|
this.cg = 0;
|
||||||
this.cb = 0;
|
this.cb = 0;
|
||||||
|
|
||||||
|
// the light estimate from WebXR, if there is one
|
||||||
this.lightEstimate = 0;
|
this.lightEstimate = 0;
|
||||||
this.el.appendChild(this.textBox)
|
|
||||||
}
|
|
||||||
|
|
||||||
newSession() {
|
|
||||||
// var blob = new Blob([
|
|
||||||
// document.querySelector('#worker1').textContent
|
|
||||||
// ], { type: "text/javascript" })
|
|
||||||
// this.worker = new Worker(window.URL.createObjectURL(blob));
|
|
||||||
|
|
||||||
|
// start a background worker
|
||||||
this.worker = new Worker("worker.js")
|
this.worker = new Worker("worker.js")
|
||||||
|
|
||||||
var self = this;
|
var self = this;
|
||||||
|
@ -129,11 +129,12 @@
|
||||||
this.worker.addEventListener('error', (e) => {
|
this.worker.addEventListener('error', (e) => {
|
||||||
console.log("worker error:" + e)
|
console.log("worker error:" + e)
|
||||||
})
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// called when there is a new session
|
||||||
|
newSession() {
|
||||||
|
// this can only be done inside the session
|
||||||
this.setVideoWorker(this.worker);
|
this.setVideoWorker(this.worker);
|
||||||
|
|
||||||
// use this instead to not use worker.
|
|
||||||
//this.setVideoWorker(ev => { this.handleVideoFrame(ev) })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called during construction
|
// Called during construction
|
||||||
|
@ -160,7 +161,8 @@
|
||||||
this.camera.add( plane );
|
this.camera.add( plane );
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add a box at the scene origin
|
// Add a box at the scene origin, so that we can see if things are working when
|
||||||
|
// we look down
|
||||||
let box = new THREE.Mesh(
|
let box = new THREE.Mesh(
|
||||||
new THREE.BoxBufferGeometry(0.1, 0.1, 0.1),
|
new THREE.BoxBufferGeometry(0.1, 0.1, 0.1),
|
||||||
new THREE.MeshPhongMaterial({ color: '#DDFFDD' })
|
new THREE.MeshPhongMaterial({ color: '#DDFFDD' })
|
||||||
|
@ -174,16 +176,10 @@
|
||||||
this.scene.add(directionalLight)
|
this.scene.add(directionalLight)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// called each frame
|
||||||
updateScene(frame){
|
updateScene(frame){
|
||||||
this.lightEstimate = frame.lightEstimate || 0;
|
this.lightEstimate = frame.lightEstimate || 0;
|
||||||
stats.update()
|
|
||||||
if (this.messageText != this.textBox.innerHTML) {
|
|
||||||
this.textBox.innerHTML = this.messageText;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
handleVisionDone(videoFrame) {
|
|
||||||
var txt = "ARKit Light Estimate: " + this.lightEstimate.toFixed(2) + "<br>CV Average Intensity: " + this.intensity.toFixed(2)
|
var txt = "ARKit Light Estimate: " + this.lightEstimate.toFixed(2) + "<br>CV Average Intensity: " + this.intensity.toFixed(2)
|
||||||
+ "<br>Center RGB: " + this.cr.toFixed(0) + " / " + this.cg.toFixed(0) + " / " + this.cb.toFixed(0) + "<br><center>";
|
+ "<br>Center RGB: " + this.cr.toFixed(0) + " / " + this.cg.toFixed(0) + " / " + this.cb.toFixed(0) + "<br><center>";
|
||||||
|
|
||||||
|
@ -203,7 +199,20 @@
|
||||||
|
|
||||||
this.messageText = txt;
|
this.messageText = txt;
|
||||||
|
|
||||||
// in the worker case, if we don't access the buffer in the worker, it won't be pulled back
|
if (this.messageText != this.textBox.innerHTML) {
|
||||||
|
this.textBox.innerHTML = this.messageText;
|
||||||
|
}
|
||||||
|
stats.update()
|
||||||
|
}
|
||||||
|
|
||||||
|
handleVisionDone(videoFrame) {
|
||||||
|
// ask for the next frame (before we construct that debugging frame, if we're doing that)
|
||||||
|
this.requestVideoFrame();
|
||||||
|
|
||||||
|
// update CV fps
|
||||||
|
updateCVFPS();
|
||||||
|
|
||||||
|
// check to make sure the 2nd plane exists; that will be UV
|
||||||
if (makeTexUV && videoFrame.buffer(1).buffer) {
|
if (makeTexUV && videoFrame.buffer(1).buffer) {
|
||||||
var buffer = videoFrame.buffer(1);
|
var buffer = videoFrame.buffer(1);
|
||||||
var pixels = buffer.buffer;
|
var pixels = buffer.buffer;
|
||||||
|
@ -211,7 +220,8 @@
|
||||||
this.texSize = pixels.length /2 * 3
|
this.texSize = pixels.length /2 * 3
|
||||||
this.texBuff = new Uint8Array( this.texSize ); // convert each pixel from 2 to 3 bytes
|
this.texBuff = new Uint8Array( this.texSize ); // convert each pixel from 2 to 3 bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// convert it to a simple RGB image. Super slow, but ok for debugging
|
||||||
var j = 0;
|
var j = 0;
|
||||||
for ( var i = 0; i < this.texSize; i ++ ) {
|
for ( var i = 0; i < this.texSize; i ++ ) {
|
||||||
this.texBuff[i] = pixels[j++];
|
this.texBuff[i] = pixels[j++];
|
||||||
|
@ -222,62 +232,11 @@
|
||||||
}
|
}
|
||||||
this.uvTexture.image = { data: this.texBuff, width: buffer.size.width, height: buffer.size.height };
|
this.uvTexture.image = { data: this.texBuff, width: buffer.size.width, height: buffer.size.height };
|
||||||
this.uvTexture.needsUpdate = true;
|
this.uvTexture.needsUpdate = true;
|
||||||
}
|
}
|
||||||
updateCVFPS();
|
|
||||||
|
// must do this to clean up the internal buffer caches, let system know we're done
|
||||||
videoFrame.release();
|
videoFrame.release();
|
||||||
this.requestVideoFrame();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//////
|
|
||||||
////// NOT USED with worker above, but can switch to callback model using these
|
|
||||||
//////
|
|
||||||
averageIntensity(buffer) {
|
|
||||||
var w = buffer.size.width;
|
|
||||||
var h = buffer.size.height;
|
|
||||||
var pad = buffer.size.bytesPerRow - w;
|
|
||||||
var pixels = buffer.buffer;
|
|
||||||
|
|
||||||
var intensity = 0.0;
|
|
||||||
var p = 0;
|
|
||||||
for (var r = 0; r < h; r++) {
|
|
||||||
var v = 0;
|
|
||||||
for (var i = 0; i < w; i++) {
|
|
||||||
if (p < pixels.length) {
|
|
||||||
v += pixels[p++]
|
|
||||||
} else {
|
|
||||||
console.error("overflow pixel buffer")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
intensity += v / w;
|
|
||||||
p += pad;
|
|
||||||
}
|
|
||||||
this.intensity = (intensity / h) / 255.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
colorAtCenter(buffer) {
|
|
||||||
var w = buffer.size.width;
|
|
||||||
var h = buffer.size.height;
|
|
||||||
var pixels = buffer.buffer;
|
|
||||||
|
|
||||||
var cx = Math.floor(w / 2) * buffer.size.bytesPerPixel
|
|
||||||
var cy = Math.floor(h / 2)
|
|
||||||
var p = cy * buffer.size.bytesPerRow + cx;
|
|
||||||
this.cb = pixels[p++];
|
|
||||||
this.cr = pixels[p];
|
|
||||||
}
|
|
||||||
|
|
||||||
handleVideoFrame(ev) {
|
|
||||||
var videoFrame = ev.detail
|
|
||||||
switch (videoFrame.pixelFormat) {
|
|
||||||
case XRVideoFrame.IMAGEFORMAT_YUV420P:
|
|
||||||
this.averageIntensity(videoFrame.buffer(0))
|
|
||||||
this.colorAtCenter(videoFrame.buffer(1))
|
|
||||||
}
|
|
||||||
|
|
||||||
this.handleVisionDone(videoFrame);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
window.addEventListener('DOMContentLoaded', () => {
|
window.addEventListener('DOMContentLoaded', () => {
|
||||||
|
|
|
@ -54,11 +54,13 @@ importScripts('../../dist/webxr-worker.js')
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
// some globals to hold the -- silly -- values we compute
|
||||||
var intensity = 0.0;
|
var intensity = 0.0;
|
||||||
var cr = -1;
|
var cr = -1;
|
||||||
var cg = -1;
|
var cg = -1;
|
||||||
var cb = -1;
|
var cb = -1;
|
||||||
|
|
||||||
|
// a silly simply function to compute something based on 'all the pixels' in an RGBA image
|
||||||
averageIntensityRGBA = function (buffer) {
|
averageIntensityRGBA = function (buffer) {
|
||||||
var w = buffer.size.width;
|
var w = buffer.size.width;
|
||||||
var h = buffer.size.height;
|
var h = buffer.size.height;
|
||||||
|
@ -79,6 +81,7 @@ averageIntensityRGBA = function (buffer) {
|
||||||
intensity = (intensity / h) / 255.0;
|
intensity = (intensity / h) / 255.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// a silly simply function to compute something based on 'all the pixels' in a grayscale image
|
||||||
averageIntensityLum = function (buffer) {
|
averageIntensityLum = function (buffer) {
|
||||||
var w = buffer.size.width;
|
var w = buffer.size.width;
|
||||||
var h = buffer.size.height;
|
var h = buffer.size.height;
|
||||||
|
@ -90,11 +93,7 @@ averageIntensityLum = function (buffer) {
|
||||||
for (var r = 0; r < h; r++) {
|
for (var r = 0; r < h; r++) {
|
||||||
var v = 0;
|
var v = 0;
|
||||||
for (var i = 0; i < w; i++) {
|
for (var i = 0; i < w; i++) {
|
||||||
if (p < pixels.length) {
|
v += pixels[p++]
|
||||||
v += pixels[p++]
|
|
||||||
} else {
|
|
||||||
console.error("overflow pixel buffer")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
intensity += v / w;
|
intensity += v / w;
|
||||||
p += pad;
|
p += pad;
|
||||||
|
@ -102,6 +101,7 @@ averageIntensityLum = function (buffer) {
|
||||||
intensity = (intensity / h) / 255.0;
|
intensity = (intensity / h) / 255.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sample a single color, just for variety
|
||||||
colorAtCenterRGB = function(buffer0) {
|
colorAtCenterRGB = function(buffer0) {
|
||||||
var w = buffer0.size.width;
|
var w = buffer0.size.width;
|
||||||
var h = buffer0.size.height;
|
var h = buffer0.size.height;
|
||||||
|
@ -115,6 +115,8 @@ colorAtCenterRGB = function(buffer0) {
|
||||||
cb = pixels[p];
|
cb = pixels[p];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make an attempt to convert a UV color to RBG
|
||||||
|
//
|
||||||
// LUV == LuCbCr
|
// LUV == LuCbCr
|
||||||
//
|
//
|
||||||
// Y = 0.299R + 0.587G + 0.114B
|
// Y = 0.299R + 0.587G + 0.114B
|
||||||
|
@ -169,28 +171,39 @@ colorAtCenterLUV = function(buffer0, buffer1) {
|
||||||
// cb=y+1.772*u;
|
// cb=y+1.772*u;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The listener.
|
||||||
|
//
|
||||||
|
// We can ignore the message type field, since we are only receiving one message from
|
||||||
|
// the main thread, a new video frame
|
||||||
self.addEventListener('message', function(event){
|
self.addEventListener('message', function(event){
|
||||||
try {
|
try {
|
||||||
|
// a utility function to receive the message. Takes care of managing the
|
||||||
|
// internal ArrayBuffers that are being passed around
|
||||||
var videoFrame = XRVideoFrame.createFromMessage(event);
|
var videoFrame = XRVideoFrame.createFromMessage(event);
|
||||||
|
|
||||||
|
// The video frames will come in different formats on different platforms.
|
||||||
|
// The call to videoFrame.buffer(i) retrieves the i-th plane for the frame;
|
||||||
|
// (in the case of the WebXR Viewer, it also converts the base64 encoded message
|
||||||
|
// into an ArrayBuffer, which we don't do until the plane is used)
|
||||||
switch (videoFrame.pixelFormat) {
|
switch (videoFrame.pixelFormat) {
|
||||||
|
// the WebXR Viewer uses iOS native YCbCr, which is two buffers, one for Y and one for CbCr
|
||||||
case XRVideoFrame.IMAGEFORMAT_YUV420P:
|
case XRVideoFrame.IMAGEFORMAT_YUV420P:
|
||||||
this.averageIntensityLum(videoFrame.buffer(0))
|
this.averageIntensityLum(videoFrame.buffer(0))
|
||||||
this.colorAtCenterLUV(videoFrame.buffer(0),videoFrame.buffer(1))
|
this.colorAtCenterLUV(videoFrame.buffer(0),videoFrame.buffer(1))
|
||||||
break;
|
break;
|
||||||
|
// WebRTC uses web-standard RGBA
|
||||||
case XRVideoFrame.IMAGEFORMAT_RGBA32:
|
case XRVideoFrame.IMAGEFORMAT_RGBA32:
|
||||||
this.averageIntensityRGBA(videoFrame.buffer(0))
|
this.averageIntensityRGBA(videoFrame.buffer(0))
|
||||||
this.colorAtCenterRGB(videoFrame.buffer(0))
|
this.colorAtCenterRGB(videoFrame.buffer(0))
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// utility function to send the video frame and additional parameters back.
|
||||||
|
// Want to use this so we pass ArrayBuffers back and forth to avoid having to
|
||||||
|
// reallocate them every frame.
|
||||||
videoFrame.postReplyMessage({intensity: intensity, cr: cr, cg: cg, cb: cb})
|
videoFrame.postReplyMessage({intensity: intensity, cr: cr, cg: cg, cb: cb})
|
||||||
videoFrame.release();
|
videoFrame.release();
|
||||||
} catch(e) {
|
} catch(e) {
|
||||||
console.error('page error', e)
|
console.error('page error', e)
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// setInterval( function(){
|
|
||||||
// console.log("Help me!")
|
|
||||||
// self.postMessage (Math.random() * 255.0);
|
|
||||||
//}, 500);
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче