| <!doctype html> |
| <!-- |
| Copyright 2020 The Immersive Web Community Group |
| |
| Permission is hereby granted, free of charge, to any person obtaining a copy of |
| this software and associated documentation files (the "Software"), to deal in |
| the Software without restriction, including without limitation the rights to |
| use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of |
| the Software, and to permit persons to whom the Software is furnished to do so, |
| subject to the following conditions: |
| |
| The above copyright notice and this permission notice shall be included in all |
| copies or substantial portions of the Software. |
| |
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS |
| FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR |
| COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER |
| IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| --> |
| <html> |
| |
| <head> |
| <meta charset='utf-8'> |
| <meta name='viewport' content='width=device-width, initial-scale=1, user-scalable=no'> |
| <meta name='mobile-web-app-capable' content='yes'> |
| <meta name='apple-mobile-web-app-capable' content='yes'> |
| <link rel='icon' type='image/png' sizes='32x32' href='./favicon-32x32.png'> |
| <link rel='icon' type='image/png' sizes='96x96' href='./favicon-96x96.png'> |
| <link rel='stylesheet' href='./css/common.css'> |
| |
| <title>Immersive Session with hands</title> |
| </head> |
| |
| <body> |
| <header> |
| <details open> |
| <summary>Immersive VR Session with hands</summary> |
| <p> |
| This sample demonstrates a simple VR session that shows the user's hands. |
| If your device supports this feature, you will see your hands using a set |
| of cubes. Each cube represents a joint in your hand. |
| <a class="back" href="./">Back</a> |
| </p> |
| </details> |
| </header> |
| <main style='text-align: center;'> |
| <p>Click 'Enter XR' to see content</p> |
| </main> |
| <script type="module"> |
| import { WebXRButton } from './js/util/webxr-button.js'; |
| import { Scene } from './js/render/scenes/scene.js'; |
| import { Node } from './js/render/core/node.js'; |
| import { Renderer, createWebGLContext } from './js/render/core/renderer.js'; |
| import { Gltf2Node } from './js/render/nodes/gltf2.js'; |
| import { SkyboxNode } from './js/render/nodes/skybox.js'; |
| import { BoxBuilder } from './js/render/geometry/box-builder.js'; |
| import { PbrMaterial } from './js/render/materials/pbr.js'; |
| import { QueryArgs } from './js/util/query-args.js'; |
| import {mat4} from './js/render/math/gl-matrix.js'; |
| import {vec3} from './js/render/math/gl-matrix.js'; |
| import {Ray} from './js/render/math/ray.js'; |
| |
| // This library matches XRInputSource profiles to available controller models for us. |
| import { fetchProfile } from 'https://cdn.jsdelivr.net/npm/@webxr-input-profiles/motion-controllers@1.0/dist/motion-controllers.module.js'; |
| |
| // The path of the CDN the sample will fetch controller models from. |
| const DEFAULT_PROFILES_PATH = 'https://cdn.jsdelivr.net/npm/@webxr-input-profiles/assets@1.0/dist/profiles'; |
| |
| // XR globals. |
| let xrButton = null; |
| let xrRefSpace = null; |
| let isAR = false; |
| let radii = new Float32Array(25); |
| let positions = new Float32Array(16*25); |
| |
| // Boxes |
| let boxes_left = []; |
| let boxes_right = []; |
| let tracked_boxes_left = []; |
| let tracked_boxes_right = []; |
| let boxes = { input_left: boxes_left, input_right: boxes_right, tracked_left: tracked_boxes_left, tracked_right: tracked_boxes_right }; |
| let indexFingerBoxes = { input_left: null, input_right: null, tracked_left: null, tracked_right: null }; |
| const defaultBoxColor = {r: 0.5, g: 0.5, b: 0.5}; |
| const leftBoxColor = {r: 1, g: 0, b: 1}; |
| const rightBoxColor = {r: 0, g: 1, b: 1}; |
| let interactionBox = null; |
| let leftInteractionBox = null; |
| let rightInteractionBox = null; |
| |
| // WebGL scene globals. |
| let gl = null; |
| let renderer = null; |
| let scene = new Scene(); |
| scene.addNode(new Gltf2Node({ url: './media/gltf/space/space.gltf' })); |
| scene.addNode(new SkyboxNode({ url: './media/textures/milky-way-4k.png' })); |
| |
| function createBoxPrimitive(r, g, b) { |
| let boxBuilder = new BoxBuilder(); |
| boxBuilder.pushCube([0, 0, 0], 1); |
| let boxPrimitive = boxBuilder.finishPrimitive(renderer); |
| let boxMaterial = new PbrMaterial(); |
| boxMaterial.baseColorFactor.value = [r, g, b, 1]; |
| return renderer.createRenderPrimitive(boxPrimitive, boxMaterial); |
| } |
| |
| function addBox(x, y, z, r, g, b, offset) { |
| let boxRenderPrimitive = createBoxPrimitive(r, g, b); |
| let boxNode = new Node(); |
| boxNode.addRenderPrimitive(boxRenderPrimitive); |
| // Marks the node as one that needs to be checked when hit testing. |
| boxNode.selectable = true; |
| return boxNode; |
| } |
| |
| function initHands() { |
| for (const box of boxes_left) { |
| scene.removeNode(box); |
| } |
| for (const box of boxes_right) { |
| scene.removeNode(box); |
| } |
| boxes_left = []; |
| boxes_right = []; |
| boxes = { input_left: boxes_left, input_right: boxes_right, tracked_left: tracked_boxes_left, tracked_right: tracked_boxes_right }; |
| if (typeof XRHand !== 'undefined') { |
| for (let i = 0; i <= 24; i++) { |
| const r = .6 + Math.random() * .4; |
| const g = .6 + Math.random() * .4; |
| const b = .6 + Math.random() * .4; |
| boxes_left.push(addBox(0, 0, 0, r, g, b)); |
| boxes_right.push(addBox(0, 0, 0, r, g, b)); |
| tracked_boxes_left.push(addBox(0, 0, 0, r, g, b)); |
| tracked_boxes_right.push(addBox(0, 0, 0, r, g, b)); |
| } |
| } |
| if (indexFingerBoxes.input_left) { |
| scene.removeNode(indexFingerBoxes.left); |
| } |
| if (indexFingerBoxes.input_right) { |
| scene.removeNode(indexFingerBoxes.input_right); |
| } |
| if (indexFingerBoxes.tracked_left) { |
| scene.removeNode(indexFingerBoxes.tracked_left); |
| } |
| if (indexFingerBoxes.tracked_right) { |
| scene.removeNode(indexFingerBoxes.tracked_right); |
| } |
| indexFingerBoxes.input_left = addBox(0, 0, 0, leftBoxColor.r, leftBoxColor.g, leftBoxColor.b); |
| indexFingerBoxes.input_right = addBox(0, 0, 0, rightBoxColor.r, rightBoxColor.g, rightBoxColor.b); |
| indexFingerBoxes.tracked_left = addBox(0, 0, 0, leftBoxColor.r, leftBoxColor.g, leftBoxColor.b); |
| indexFingerBoxes.tracked_right = addBox(0, 0, 0, rightBoxColor.r, rightBoxColor.g, rightBoxColor.b); |
| } |
| |
| // Checks to see if WebXR is available and, if so, queries a list of |
| // XRDevices that are connected to the system. |
| function initXR() { |
| // Adds a helper button to the page that indicates if any XRDevices are |
| // available and let's the user pick between them if there's multiple. |
| xrButton = new WebXRButton({ |
| onRequestSession: onRequestSession, |
| onEndSession: onEndSession |
| }); |
| document.querySelector('header').appendChild(xrButton.domElement); |
| |
| // Is WebXR available on this UA? |
| if (navigator.xr) { |
| // If the device allows creation of exclusive sessions set it as the |
| // target of the 'Enter XR' button. |
| navigator.xr.isSessionSupported('immersive-vr').then((supported) => { |
| if (supported) |
| xrButton.enabled = supported; |
| else |
| navigator.xr.isSessionSupported('immersive-ar').then((supported) => { |
| isAR = true; |
| xrButton.enabled = supported; |
| }); |
| }); |
| } |
| } |
| |
| // Called when the user selects a device to present to. In response we |
| // will request an exclusive session from that device. |
| function onRequestSession() { |
| return navigator.xr.requestSession(isAR?'immersive-ar':'immersive-vr', { optionalFeatures: ['local-floor', 'bounded-floor', 'hand-tracking'] }).then(onSessionStarted); |
| } |
| |
| // Called when we've successfully acquired a XRSession. In response we |
| // will set up the necessary session state and kick off the frame loop. |
| function onSessionStarted(session) { |
| // This informs the 'Enter XR' button that the session has started and |
| // that it should display 'Exit XR' instead. |
| xrButton.setSession(session); |
| |
| // Listen for the sessions 'end' event so we can respond if the user |
| // or UA ends the session for any reason. |
| session.addEventListener('end', onSessionEnded); |
| session.addEventListener('inputsourceschange', onInputSourcesChange); |
| session.addEventListener('trackedsourceschange', onInputSourcesChange); |
| |
| session.addEventListener('visibilitychange', e => { |
| // remove hand controller while blurred |
| if(e.session.visibilityState === 'visible-blurred') { |
| for (const box of boxes['input_left']) { |
| scene.removeNode(box); |
| } |
| for (const box of boxes['input_right']) { |
| scene.removeNode(box); |
| } |
| for (const box of boxes['tracked_left']) { |
| scene.removeNode(box); |
| } |
| for (const box of boxes['tracked_right']) { |
| scene.removeNode(box); |
| } |
| } |
| }); |
| |
| // Create a WebGL context to render with, initialized to be compatible |
| // with the XRDisplay we're presenting to. |
| gl = createWebGLContext({ |
| xrCompatible: true |
| }); |
| |
| // Create a renderer with that GL context (this is just for the samples |
| // framework and has nothing to do with WebXR specifically.) |
| renderer = new Renderer(gl); |
| |
| initHands(); |
| |
| // Set the scene's renderer, which creates the necessary GPU resources. |
| scene.setRenderer(renderer); |
| |
| // Use the new WebGL context to create a XRWebGLLayer and set it as the |
| // sessions baseLayer. This allows any content rendered to the layer to |
| // be displayed on the XRDevice. |
| session.updateRenderState({ baseLayer: new XRWebGLLayer(session, gl) }); |
| |
| // Get a frame of reference, which is required for querying poses. In |
| // this case an 'local' frame of reference means that all poses will |
| // be relative to the location where the XRDevice was first detected. |
| session.requestReferenceSpace('local').then((refSpace) => { |
| xrRefSpace = refSpace.getOffsetReferenceSpace(new XRRigidTransform({ x: 0, y: 0, z: 0 })); |
| |
| // Inform the session that we're ready to begin drawing. |
| session.requestAnimationFrame(onXRFrame); |
| }); |
| } |
| |
| // Called when the user clicks the 'Exit XR' button. In response we end |
| // the session. |
| function onEndSession(session) { |
| session.end(); |
| } |
| |
| // Called either when the user has explicitly ended the session (like in |
| // onEndSession()) or when the UA has ended the session for any reason. |
| // At this point the session object is no longer usable and should be |
| // discarded. |
| function onSessionEnded(event) { |
| xrButton.setSession(null); |
| |
| // In this simple case discard the WebGL context too, since we're not |
| // rendering anything else to the screen with it. |
| renderer = null; |
| } |
| |
| function onInputSourcesChange(event) { |
| onSourcesChange(event, "input_"); |
| } |
| |
| function onTrackedSourcesChange(event) { |
| onSourcesChange(event, "tracked_"); |
| } |
| |
| function onSourcesChange(event, type) { |
| // As input sources are connected if they are tracked-pointer devices |
| // look up which meshes should be associated with their profile and |
| // load as the controller model for that hand. |
| for (let inputSource of event.added) { |
| if (inputSource.targetRayMode == 'tracked-pointer') { |
| // Use the fetchProfile method from the motionControllers library |
| // to find the appropriate glTF mesh path for this controller. |
| fetchProfile(inputSource, DEFAULT_PROFILES_PATH).then(({profile, assetPath}) => { |
| // Typically if you wanted to animate the controllers in response |
| // to device inputs you'd create a new MotionController() instance |
| // here to handle the animation, but this sample will skip that |
| // and only display a static mesh for simplicity. |
| |
| scene.inputRenderer.setControllerMesh(new Gltf2Node({url: assetPath}), inputSource.handedness, inputSource.profiles[0]); |
| }); |
| } |
| } |
| } |
| |
| function updateInputSources(session, frame, refSpace) { |
| updateSources(session, frame, refSpace, session.inputSources, "input_"); |
| } |
| |
| function updateTrackedSources(session, frame, refSpace) { |
| if (session.trackedSources) { |
| updateSources(session, frame, refSpace, session.trackedSources, "tracked_"); |
| } |
| } |
| |
| function updateSources(session, frame, refSpace, sources, type) { |
| if(session.visibilityState === 'visible-blurred') { |
| return; |
| } |
| for (let inputSource of sources) { |
| let hand_type = type + inputSource.handedness; |
| if (type == "input_") { |
| let targetRayPose = frame.getPose(inputSource.targetRaySpace, refSpace); |
| if (targetRayPose) { |
| if (inputSource.targetRayMode == 'tracked-pointer') { |
| scene.inputRenderer.addLaserPointer(targetRayPose.transform); |
| } |
| |
| let targetRay = new Ray(targetRayPose.transform); |
| let cursorDistance = 2.0; |
| let cursorPos = vec3.fromValues( |
| targetRay.origin.x, |
| targetRay.origin.y, |
| targetRay.origin.z |
| ); |
| vec3.add(cursorPos, cursorPos, [ |
| targetRay.direction.x * cursorDistance, |
| targetRay.direction.y * cursorDistance, |
| targetRay.direction.z * cursorDistance, |
| ]); |
| |
| scene.inputRenderer.addCursor(cursorPos); |
| } |
| } |
| |
| if (!inputSource.hand && inputSource.gripSpace) { |
| let gripPose = frame.getPose(inputSource.gripSpace, refSpace); |
| if (gripPose) { |
| scene.inputRenderer.addController(gripPose.transform.matrix, inputSource.handedness, inputSource.profiles[0]); |
| } else { |
| scene.inputRenderer.hideController(hand_type); |
| } |
| } |
| |
| let offset = 0; |
| if (!inputSource.hand) { |
| for (const box of boxes[hand_type]) { |
| scene.removeNode(box); |
| } |
| scene.removeNode(indexFingerBoxes[hand_type]); |
| |
| continue; |
| } else { |
| let pose = frame.getPose(inputSource.targetRaySpace, refSpace); |
| if (pose === undefined) { |
| console.log("no pose"); |
| } |
| |
| if (!frame.fillJointRadii(inputSource.hand.values(), radii)) { |
| console.log("no fillJointRadii"); |
| continue; |
| } |
| if (!frame.fillPoses(inputSource.hand.values(), refSpace, positions)) { |
| console.log("no fillPoses"); |
| continue; |
| } |
| for (const box of boxes[hand_type]) { |
| scene.addNode(box); |
| let matrix = positions.slice(offset * 16, (offset + 1) * 16); |
| let jointRadius = radii[offset]; |
| offset++; |
| mat4.getTranslation(box.translation, matrix); |
| mat4.getRotation(box.rotation, matrix); |
| box.scale = [jointRadius, jointRadius, jointRadius]; |
| } |
| |
| // Render a special box for each index finger on each hand |
| const indexFingerBox = indexFingerBoxes[hand_type]; |
| scene.addNode(indexFingerBox); |
| let joint = inputSource.hand.get('index-finger-tip'); |
| let jointPose = frame.getJointPose(joint, xrRefSpace); |
| if (jointPose) { |
| let matrix = jointPose.transform.matrix; |
| mat4.getTranslation(indexFingerBox.translation, matrix); |
| mat4.getRotation(indexFingerBox.rotation, matrix); |
| indexFingerBox.scale = [0.02, 0.02, 0.02]; |
| } |
| } |
| } |
| } |
| |
| function UpdateInteractables(time) { |
| // Add scene objects if not present |
| if (!interactionBox) { |
| // Add box to demonstrate hand interaction |
| function AddInteractionBox(r, g, b) { |
| let box = new Node(); |
| box.addRenderPrimitive(createBoxPrimitive(r, g, b)); |
| box.translation = [0, 0, -0.65]; |
| box.scale = [0.25, 0.25, 0.25]; |
| return box; |
| } |
| interactionBox = AddInteractionBox(defaultBoxColor.r, defaultBoxColor.g, defaultBoxColor.b); |
| leftInteractionBox = AddInteractionBox(leftBoxColor.r, leftBoxColor.g, leftBoxColor.b); |
| rightInteractionBox = AddInteractionBox(rightBoxColor.r, rightBoxColor.g, rightBoxColor.b); |
| scene.addNode(interactionBox); |
| scene.addNode(leftInteractionBox); |
| scene.addNode(rightInteractionBox); |
| } |
| |
| function Distance(nodeA, nodeB) { |
| return Math.sqrt( |
| Math.pow(nodeA.translation[0] - nodeB.translation[0], 2) + |
| Math.pow(nodeA.translation[1] - nodeB.translation[1], 2) + |
| Math.pow(nodeA.translation[2] - nodeB.translation[2], 2)); |
| } |
| |
| // Perform distance check on interactable elements |
| const interactionDistance = interactionBox.scale[0]; |
| leftInteractionBox.visible = false; |
| rightInteractionBox.visible = false; |
| if (Distance(indexFingerBoxes.input_left, interactionBox) < interactionDistance) { |
| leftInteractionBox.visible = true; |
| } else if (Distance(indexFingerBoxes.input_right, interactionBox) < interactionDistance) { |
| rightInteractionBox.visible = true; |
| } |
| interactionBox.visible = !(leftInteractionBox.visible || rightInteractionBox.visible); |
| |
| mat4.rotateX(interactionBox.matrix, interactionBox.matrix, time/1000); |
| mat4.rotateY(interactionBox.matrix, interactionBox.matrix, time/1500); |
| leftInteractionBox.matrix = interactionBox.matrix; |
| rightInteractionBox.matrix = interactionBox.matrix; |
| } |
| |
| // Called every time the XRSession requests that a new frame be drawn. |
| function onXRFrame(t, frame) { |
| let session = frame.session; |
| |
| // Per-frame scene setup. Nothing WebXR specific here. |
| scene.startFrame(); |
| |
| // Inform the session that we're ready for the next frame. |
| session.requestAnimationFrame(onXRFrame); |
| |
| updateInputSources(session, frame, xrRefSpace); |
| updateTrackedSources(session, frame, xrRefSpace); |
| UpdateInteractables(t); |
| |
| // Get the XRDevice pose relative to the Frame of Reference we created |
| // earlier. |
| let pose = frame.getViewerPose(xrRefSpace); |
| |
| // Getting the pose may fail if, for example, tracking is lost. So we |
| // have to check to make sure that we got a valid pose before attempting |
| // to render with it. If not in this case we'll just leave the |
| // framebuffer cleared, so tracking loss means the scene will simply |
| // disappear. |
| if (pose) { |
| let glLayer = session.renderState.baseLayer; |
| |
| // If we do have a valid pose, bind the WebGL layer's framebuffer, |
| // which is where any content to be displayed on the XRDevice must be |
| // rendered. |
| gl.bindFramebuffer(gl.FRAMEBUFFER, glLayer.framebuffer); |
| |
| // Clear the framebuffer |
| gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT); |
| |
| // Loop through each of the views reported by the frame and draw them |
| // into the corresponding viewport. |
| for (let view of pose.views) { |
| let viewport = glLayer.getViewport(view); |
| gl.viewport(viewport.x, viewport.y, |
| viewport.width, viewport.height); |
| |
| // Draw this view of the scene. What happens in this function really |
| // isn't all that important. What is important is that it renders |
| // into the XRWebGLLayer's framebuffer, using the viewport into that |
| // framebuffer reported by the current view, and using the |
| // projection matrix and view transform from the current view. |
| // We bound the framebuffer and viewport up above, and are passing |
| // in the appropriate matrices here to be used when rendering. |
| scene.draw(view.projectionMatrix, view.transform); |
| } |
| } else { |
| // There's several options for handling cases where no pose is given. |
| // The simplest, which these samples opt for, is to simply not draw |
| // anything. That way the device will continue to show the last frame |
| // drawn, possibly even with reprojection. Alternately you could |
| // re-draw the scene again with the last known good pose (which is now |
| // likely to be wrong), clear to black, or draw a head-locked message |
| // for the user indicating that they should try to get back to an area |
| // with better tracking. In all cases it's possible that the device |
| // may override what is drawn here to show the user it's own error |
| // message, so it should not be anything critical to the application's |
| // use. |
| } |
| |
| // Per-frame scene teardown. Nothing WebXR specific here. |
| scene.endFrame(); |
| } |
| |
| // Start the XR application. |
| initXR(); |
| </script> |
| </body> |
| |
| </html> |