美文网首页
TF PoseNet + THREEjs 应用

TF PoseNet + THREEjs 应用

作者: T_K_233 | 来源:发表于2018-12-29 11:03 被阅读0次

<html>
<head>

<meta name="viewport" content="width=device-width, initial-scale=1">
<script src="js/jquery.min.js"></script>
<script src="js/three.min.js"></script>
<script src="js/mmdparser.min.js"></script>
<script src="js/MMDLoader.js"></script>
<script src="js/TGALoader.js"></script>

<script src="js/tfjs@0.13.3.js"></script>

<script src="js/posenet@0.2.3.js"></script>
<style>
body {
margin: 0;
}
canvas[id!=canvas] {
width: 100%; height: 100%;
}

canvas, #video {

opacity: 0.5;
position: absolute;
right: 0;

}

canvas {

bottom: 0;

}
</style>
</head>

<body>
<video id="video" playsinline style=" -moz-transform: scaleX(-1);
-o-transform: scaleX(-1);
-webkit-transform: scaleX(-1);
transform: scaleX(-1);"></video>
<canvas id="canvas"></canvas>
<script>
/* demo.util */

const color = 'aqua';
const boundingBoxColor = 'red';
const lineWidth = 2;

var toTuple = function({y, x}) {
return [y, x];
}

var drawPoint = function(ctx, y, x, r, color) {
ctx.beginPath();
ctx.arc(x, y, r, 0, 2 * Math.PI);
ctx.fillStyle = color;
ctx.fill();
}

/**

  • Draws a line on a canvas, i.e. a joint
    */
    var drawSegment = function([ay, ax], [by, bx], color, scale, ctx) {
    ctx.beginPath();
    ctx.moveTo(ax * scale, ay * scale);
    ctx.lineTo(bx * scale, by * scale);
    ctx.lineWidth = lineWidth;
    ctx.strokeStyle = color;
    ctx.stroke();
    }

/**

  • Draws a pose skeleton by looking up all adjacent keypoints/joints
    */
    var drawSkeleton = function(keypoints, minConfidence, ctx, scale = 1) {
    const adjacentKeyPoints = posenet.getAdjacentKeyPoints(keypoints, minConfidence);
    adjacentKeyPoints.forEach((keypoints) => {
    drawSegment(toTuple(keypoints[0].position),
    toTuple(keypoints[1].position), color,
    scale, ctx);
    });
    }

/**

  • Draw pose keypoints onto a canvas
    */
    var drawKeypoints = function(keypoints, minConfidence, ctx, scale = 1) {
    for (let i = 0; i < keypoints.length; i++) {
    const keypoint = keypoints[i];
    if (keypoint.score < minConfidence) {
    continue;
    }
    var {y, x} = keypoint.position;
    drawPoint(ctx, y * scale, x * scale, 3, color);
    }
    }

/**

  • Draw the bounding box of a pose. For example, for a whole person standing
  • in an image, the bounding box will begin at the nose and extend to one of
  • ankles
    */
    var drawBoundingBox = function(keypoints, ctx) {
    const boundingBox = posenet.getBoundingBox(keypoints);
ctx.rect(
    boundingBox.minX, boundingBox.minY, boundingBox.maxX - boundingBox.minX,
    boundingBox.maxY - boundingBox.minY);

ctx.strokeStyle = boundingBoxColor;
ctx.stroke();

}

</script>
<script>
/**

0 nose
1 leftEye
2 rightEye
3 leftEar
4 rightEar
5 leftShoulder
6 rightShoulder
7 leftElbow
8 rightElbow
9 leftWrist
10 rightWrist
11 leftHip
12 rightHip
13 leftKnee
14 rightKnee
15 leftAnkle
16 rightAnkle

*/
const videoWidth = 400;
const videoHeight = 300;
var isAndroid = function() {
return /Android/i.test(navigator.userAgent);
}

var isiOS = function() {
return /iPhone|iPad|iPod/i.test(navigator.userAgent);
}

var isMobile = function() {
return isAndroid() || isiOS();
}
var setupCamera = async function() {
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
throw new Error(
'Browser API navigator.mediaDevices.getUserMedia not available');
}

const video = document.getElementById('video');
video.width = videoWidth;
video.height = videoHeight;

const mobile = isMobile();
const stream = await navigator.mediaDevices.getUserMedia({
  'audio': false,
  'video': {
    facingMode: 'user',
    width: mobile ? undefined : videoWidth,
    height: mobile ? undefined : videoHeight,
  },
});
video.srcObject = stream;

return new Promise((resolve) => {
  video.onloadedmetadata = () => {
    resolve(video);
  };
});

}

async function loadVideo() {
const video = await setupCamera();
video.play();

return video;

}

function detectPoseInRealTime(video, net) {
const canvas = document.getElementById('canvas');
canvas.width = videoWidth;
canvas.height = videoHeight;

const ctx = canvas.getContext('2d');
// since images are being fed from a webcam
const flipHorizontal = true;

async function poseDetectionFrame() {
  // Scale an image down to a certain factor. Too large of an image will slow
  // down the GPU
  const imageScaleFactor = 0.5;
  const outputStride = 16;

  let poses = [];
  let minPoseConfidence;
  let minPartConfidence;
  const pose = await net.estimateSinglePose(
      video, imageScaleFactor, flipHorizontal, outputStride);
  poses.push(pose);

  minPoseConfidence = 0.1;
  minPartConfidence = 0.5;
     

  ctx.clearRect(0, 0, videoWidth, videoHeight);

  ctx.save();
  ctx.scale(-1, 1);
  ctx.translate(-videoWidth, 0);
  ctx.drawImage(video, 0, 0, videoWidth, videoHeight);
  ctx.restore();

  // For each pose (i.e. person) detected in an image, loop through the poses
  // and draw the resulting skeleton and keypoints if over certain confidence
  // scores
  animate(poses[0]);
  
  poses.forEach(({score, keypoints}) => {
  showPoints =showSkeleton =showBoundingBox = true;
    if (score >= minPoseConfidence) {
      if (showPoints) {
        drawKeypoints(keypoints, minPartConfidence, ctx);
      }
      if (showSkeleton) {
        drawSkeleton(keypoints, minPartConfidence, ctx);
      }
      if (showBoundingBox) {
        drawBoundingBox(keypoints, ctx);
      }
    }
  });

  requestAnimationFrame(poseDetectionFrame);
}

poseDetectionFrame();

}

async function run() {
// Load the PoseNet model weights with architecture 0.75
const net = await posenet.load(0.75);

let video;

try {
  video = await loadVideo();
} catch (e) {
  let info = document.getElementById('info');
  info.textContent = 'this browser does not support video capture,' +
      'or this device does not have a camera';
  info.style.display = 'block';
  throw e;
}
detectPoseInRealTime(video, net);

}

navigator.getUserMedia = navigator.getUserMedia ||
navigator.webkitGetUserMedia || navigator.mozGetUserMedia;

var animate = function(pose) {
x = pose.keypoints[0].position.x / 60;
y = pose.keypoints[0].position.y / 60;

rot = Math.atan((pose.keypoints[2].position.y - pose.keypoints[1].position.y)
                / (pose.keypoints[2].position.x - pose.keypoints[1].position.x));

var vert_scale = 8;

rotation = 0;
head.position.x = x - 3;
head.position.y = vert_scale - y;
head.rotation.z = -rot;

x = pose.keypoints[9].position.x / 60;
y = pose.keypoints[9].position.y / 60;
r_hand.position.x = x - 3;
r_hand.position.y = vert_scale - y;

x = pose.keypoints[10].position.x / 60;
y = pose.keypoints[10].position.y / 60;
l_hand.position.x = x - 3;
l_hand.position.y = vert_scale - y;

diff = head.position.x - l_hand.position.x;
theta = Math.asin((diff/2) / 2);

mesh_center = mesh.skeleton.bones[12];

left_arm = mesh.skeleton.bones[34];
left_waist = mesh.skeleton.bones[39];
right_arm = mesh.skeleton.bones[49];
mesh_center.rotation.z = rot;
left_arm.rotation.z = theta - 0.785;
left_waist.rotation.z = Math.PI - 2 * theta;

renderer.render(scene, camera);

}

// kick off the demo
run();

</script>
<script>
var mesh;

var scene = new THREE.Scene();
var camera = new THREE.PerspectiveCamera(75, window.innerWidth/window.innerHeight, 0.1, 1000 );

var renderer = new THREE.WebGLRenderer();
renderer.setSize( window.innerWidth, window.innerHeight );
document.body.appendChild( renderer.domElement );
var light = new THREE.AmbientLight({color: 0x404040, intensity: 8}); // soft white light
scene.add( light );

var loader = new THREE.MMDLoader();
// Load a MMD model
loader.load(
// path to PMD/PMX file
'sailor/TDA_sailorsuit_Miku.pmx',
// called when the resource is loaded
function ( m_mesh ) {
mesh = m_mesh;
mesh.scale.set(0.4, 0.4, 0.4);
mesh.position.z = -2;
mesh.rotation.y = Math.PI;
scene.add( mesh );
},
// called when loading is in progresses
function ( xhr ) {
console.log( ( xhr.loaded / xhr.total * 100 ) + '% loaded' );
},
// called when loading has errors
function ( error ) {
console.log( 'An error happened' );
}
);

var head = new THREE.Mesh(new THREE.BoxGeometry(0.8, 0.8, 0.8), new THREE.MeshBasicMaterial({color: 0x00ffff}));
scene.add(head);
var r_hand = new THREE.Mesh(new THREE.BoxGeometry(0.4, 0.4, 0.4), new THREE.MeshBasicMaterial({color: 0x00ff66}));
scene.add(r_hand);
var l_hand = new THREE.Mesh(new THREE.BoxGeometry(0.4, 0.4, 0.4), new THREE.MeshBasicMaterial({color: 0x0066ff}));
scene.add(l_hand);

camera.position.z = 6;
camera.position.y = 4;
/*
var animate = function () {
requestAnimationFrame( animate );

renderer.render( scene, camera );

};

animate();
*/
</script>
</body>
</html>

相关文章

网友评论

      本文标题:TF PoseNet + THREEjs 应用

      本文链接:https://www.haomeiwen.com/subject/tajulqtx.html