抖音开放平台Logo
开发者文档
“/”唤起搜索
控制台

AI/AR:足部识别(基于 three.js 和 microapp-ar-three)

收藏
我的收藏

准备工作

更新你的抖音开发者工具到 3.1.1 及以上版本(该版本之前不支持 npm 包)。

使用 microapp-ar-three 包

microapp-ar-three 本身依赖于 threejs,@douyin-microapp/microapp-ar-three/libs 中提供了解决兼容性的 threejs,可以直接在字节小程序中使用,我们后面会提到,目前先创建 package.json 文件,并增加对 @douyin-microapp/microapp-ar-three 的依赖:
{ "dependencies": { "@douyin-microapp/microapp-ar-three": "0.0.5" } }
之后在 package.json 同目录下执行 npm install,可以看到目录结构如下图所示:
若没有看到 node_modules 可点击图中的刷新按钮。
然后点击下图红框处按钮,打开 npm 界面。
可在下图中的 installed 中看到安装好的 npm 包,若未看到以下信息,可点击下图中的红框处刷新 。
之后点击上图的构建 npm。
待右下角出现构建成功后,点击左侧 Explorer 按钮,可以看到目录结构如下。
上图的红框处文件 three.js 是解决了字节小程序和 threejs 开源库兼容性问题的文件,我们拷贝这个文件到 miniprogram_npm/@douyin-microapp/microapp-ar-three 目录下。接着我们可以 import 一下引入的 npm 包,看看是否生效。创建一个 pages/index/renderShoesModule.js,代码如下:
import { getThree } from "../../miniprogram_npm/@douyin-microapp/microapp-ar-three/three"; import { ArMixer, ArFootProcessor, } from "../../miniprogram_npm/@douyin-microapp/microapp-ar-three/index"; export function initModule() { console.log(getThree); console.log(ArMixer); console.log(ArFootProcessor); }
getThree 用于获取 THREE,它需要传入一个 webgl canvas 对象,我们之后会用到它。
index.js 中调用一下这个函数:
const app = getApp(); Page({ data: {}, onLoad: function () { console.log("Welcome to Mini Code"); this.shoeRenderModule = require("./ShoeRenderModule"); this.shoeRenderModule.initModule(); }, });
如果能正常输出,则说明可以正常使用 npm 包了。
接下来我们将会通过 5 个步骤的学习来实现一个 AR 试鞋的小程序:
    获取相机画面
    获取算法数据
    将相机画在 canvas 上
    Threejs 加载并渲染模型
    通过 ArMixer 完成 AR 场景渲染

获取相机画面

在 AR 应用中,相机画面是必要的,因此我们需要先创建一个相机。这里我们使用 camera 标签,它可以获取宽高比为 4:3 的相机画面。参考 camera 相机文档,我们在 index.ttml 中添加 camera 标签:
<view class="intro"> <view class="try_shoes"> <camera class="camera" device-position="back" flash="off" binderror="onError" style="width: 100%; height: 600px;" /> </view> </view>
这样我们就可以看到相机画面了。
但是我们还需要将相机画面数据交给算法模块,修改 index.js 和 index.ttml 来获取相机画面:
index.ttml:
<view class="intro"> <view class="try_shoes"> <camera class="camera" device-position="back" flash="off" binderror="onError" style="width: 100%; height: 600px;" /> <button bindtouchstart="getCameraData">获取相机数据</button> </view> </view>
index.js:
const app = getApp(); Page({ data: {}, onLoad: function () { this.cameraContext = tt.createCameraContext(); }, getCameraData() { this.cameraListener = this.cameraContext.onCameraFrame((frame) => { console.log(frame); }); this.cameraListener.start(); }, onError(e) { tt.showModal({ content: "相机出错了:" + e.detail.errMsg, }); }, });
运行小程序,可以在调试窗口中看到 frame 中包含三个数据:width,height 和 data。

获取算法数据

接下来我们需要把相机画面交给算法模块来获取控制鞋子模型的数据,算法接口可以参考算法能力简介。方便起见,我们将算法接口有关的处理放在 ShoeAlgorithmModule.js 中:
let algorithmManager = null; let width, height = null; let cb = null; export function initShoeAlgorithm(_width, _height, onResultFallback) { cb = onResultFallback; width = _width; height = _height; tt.getAlgorithmManager({ width: _width, height: _height, useSyncMode: true, requirements: ["foot"], success: (algMgr) => { console.log("get algorithm Manager ~"); console.log(algMgr); algorithmManager = algMgr.algorithmManager; }, fail: (errMsg) => { console.log(errMsg); }, complete: () => { console.log("get alg mgr complete"); }, }); } export function onFrame(cameraData) { if (algorithmManager != null) { algorithmManager.doExecute({ input: cameraData, width: width, height: height, timeStamp: Date.now() / 1e9, success: (algMgr) => { cb(algMgr); }, fail: (errMsg) => { console.log(errMsg); }, }); } }
由于 AlgorithmManager 接口内部实现需要依赖 canvas 的能力,因此我们需要在 index.ttml 中添加一个 canvas 标签来初始化 helium 引擎,同时设置 type 为 webgl,我们之后也会用到它。修改后的 index.ttml 如下:
<view class="intro"> <view class="try_shoes"> <camera class="camera" device-position="back" flash="off" binderror="onError" style="width: 100%; height: 600px;" /> <canvas id="canvas_type_webgl" type="webgl" style="width: 1px; height: 1px" /> <button bindtouchstart="getCameraData"> get Camera Data</button> </view> </view>
同时修改 index.js,将相机数据传递给 AlgorithmManager,并输出算法结果:
const app = getApp(); Page({ data: {}, onLoad: function () { this.cameraContext = tt.createCameraContext(); }, getCameraData() { this.cameraListener = this.cameraContext.onCameraFrame((frame) => { if (this.shoeAlgorithmModule == null) { this.shoeAlgorithmModule = require("./ShoeAlgorithmModule"); this.shoeAlgorithmModule.initShoeAlgorithm( frame.width, frame.height, this.onAlgorithmResult ); } this.shoeAlgorithmModule.onFrame(frame.data); }); this.cameraListener.start(); }, onAlgorithmResult(algorithmResult) { if (algorithmResult.left.length != 0 || algorithmResult.right.length != 0) console.log("algorithmResult = ", algorithmResult); }, onError(e) { tt.showModal({ content: "相机出错了:" + e.detail.errMsg, }); }, });
运行小程序,点击 get Camera Data 按钮,此时,如果相机画面中出现足部,则会输出算法返回的结果。到这一阶段,完整的项目代码可在开发者工具中预览
下一阶段,我们将使用 threejs 加载模型并渲染模型,并使用 microapp-ar-three 库根据 AlgorithmManager 返回的算法数据来控制并进一步绘制模型。

将相机画在 canvas 上

我们之前引入了解决了兼容性问题的 threejs,现在我们尝试使用 threejs 来加载并绘制模型,关于 threejs 的文档可参考这里
与 AlgorithmManager 一样,我们也创建一个 ShoeRenderModule.js 用来存放渲染函数。由于 threejs 需要使用 webgl canvas,因此这里我们需要获取 index.ttml 中的 canvas 标签,获取方式可参考这里
前两个阶段我们了解了获取相机画面和算法数据的方法,接下来我们真正开始实现一个 AR 小程序了,因此我们重新组织一下 index.js 的代码结构,我们先来获取 canvas,同时获取相机数据:
const app = getApp(); Page({ data: {}, onLoad: function () { this.cameraContext = tt.createCameraContext(); }, initCanvasConfig() { tt.createSelectorQuery() .select("#canvas_type_webgl") .node() .exec((res) => { this.canvas = res[0].node; this.data.canvas2d = this.canvas; this.ctx = this.canvas.getContext("webgl"); this.ctx.canvas.width = 480; this.ctx.canvas.height = 640; this.cameraContext = tt.createCameraContext(); this.cameraListener = this.cameraContext.onCameraFrame((frame) => { console.log(frame); }); this.cameraListener.start(); }); }, onError(e) { tt.showModal({ content: "相机出错了:" + e.detail.errMsg, }); }, });
同时,需要修改一下 index.ttml。
<view class="intro"> <view class="try_shoes"> <camera class="camera" device-position="back" flash="off" binderror="onError" style="width: 100%; height: 600px;" /> <canvas id="canvas_type_webgl" type="webgl" style="width: 1px; height: 1px" /> <button bindtouchstart="initCanvasConfig">初始化 webgl canvas</button> </view> </view>
确定好能够正常输出 frame 数据后,我们需要把 frame 传递给 AlgorithmManager 来获取算法数据;除此之外我们也需要把相机输出传递给 RenderModule,这是因为 camera 标签本身是不可绘制的,我们最后需要把模型和相机画面都画在 webgl canvas 上,所以修改代码如下:
index.js:
const app = getApp(); Page({ data: {}, onLoad: function () { this.cameraContext = tt.createCameraContext(); }, initCanvasConfig() { tt.createSelectorQuery() .select("#canvas_type_webgl") .node() .exec((res) => { this.canvas = res[0].node; this.data.canvas2d = this.canvas; this.ctx = this.canvas.getContext("webgl"); this.ctx.canvas.width = 480; this.ctx.canvas.height = 640; this.cameraContext = tt.createCameraContext(); this.cameraListener = this.cameraContext.onCameraFrame((frame) => { this.canvas.requestAnimationFrame(() => { this.onFrame(frame); }); }); this.cameraListener.start(); }); }, initModules() { if (this.shoeRenderModule == null) { this.shoeRenderModule = require("./ShoeRenderModule"); this.shoeRenderModule.initModule(this.ctx.canvas); } if (this.shoeAlgorithmModule == null) { this.shoeAlgorithmModule = require("./ShoeAlgorithmModule"); this.shoeAlgorithmModule.initShoeAlgorithm( this.ctx.canvas.width, this.ctx.canvas.height, this.shoeRenderModule.onAlgorithmResult ); } }, onFrame(cameraFrame) { if (this.shoeAlgorithmModule != null) { this.shoeAlgorithmModule.onFrame(cameraFrame.data); } if (this.shoeRenderModule != null) { this.shoeRenderModule.onFrame(cameraFrame); } }, onError(e) { tt.showModal({ content: "相机出错了:" + e.detail.errMsg, }); }, });
index.ttml:
<view class="intro"> </view> <view class="try_shoes"> <camera class="camera" device-position="back" flash="off" binderror="onError" style="width: 1px; height: 1px;" /> <canvas id="canvas_type_webgl" type="webgl" style="width: 480px; height: 640px" /> <button bindtouchstart="initCanvasConfig">初始化 webgl canvas</button> <button bindtouchstart="initModules">AR 试鞋</button> </view>
ShoeRenderModule.js:
import { getThree } from "../../miniprogram_npm/@douyin-microapp/microapp-ar-three/three"; import { ArMixer, ArFootProcessor, } from "../../miniprogram_npm/@douyin-microapp/microapp-ar-three/index"; export function initModule(_canvas) { console.log(getThree); console.log(ArMixer); console.log(ArFootProcessor); } export function onAlgorithmResult(algorithmResult) { console.log( "in shoeRenderModule onAlgorithmResult :: algorithmResult = ", algorithmResult ); } export function onFrame(cameraFrame) { console.log("in shoeRenderModule onFrame :: cameraFrame = ", cameraFrame); }
能正常获取到 algorithmResult 和 cameraFrame 的输出即可继续。
因为 microapp-ar-three 的 ArMixer 需要将绘制了所有模型的 texture 和画有相机画面的 texture 做一次混合,因此我们这里需要借助 threejs 将相机画面画在一个 threejs texture 上。
之前我们已经在 ShoeRenderModule.js 中引入了 @douyin-microapp/microapp-ar-three 库以及 three.js 文件,接下来,我们需要在 initModule 中初始化 threejs,并且在 onFrame 中绘制相机。
先来获取 THREE:
let THREE = null; export function initModule(_canvas) { THREE = getThree(_canvas); console.log("THREE = ", THREE); }
能够正常输出 THREE 之后,我们就可以用 THREE 来渲染相机画面了。
我们可以将相机画面的数据保存成一张 Threejs Texture,然后作为 material 贴在 THREE.PlaneGeometry 上,代码如下:
let THREE = null; let renderer = null; let mixedPlane = null; let mixedScene = null; let mixedCamera = null; let cameraTexture = null; const vertexShaderSource = ` varying vec2 vUv; void main() { vUv = uv; vec4 mvPosition = modelViewMatrix * vec4( position, 1.0 ); gl_Position = projectionMatrix * mvPosition; } `; const fragmentShaderSource = ` uniform sampler2D mixedTexture; varying vec2 vUv; void main(void) { vec4 mix = texture2D(mixedTexture, vUv); gl_FragColor = vec4(mix); } `; export function initModule(_canvas) { THREE = getThree(_canvas); cameraTexture = new THREE.DataTexture(); let planeGeometry = new THREE.PlaneGeometry(_canvas.width, _canvas.height); let planeMaterial = new THREE.ShaderMaterial({ uniforms: { mixedTexture: { value: cameraTexture }, }, vertexShader: vertexShaderSource, fragmentShader: fragmentShaderSource, }); mixedPlane = new THREE.Mesh(planeGeometry, planeMaterial); mixedPlane.position.z = -10; }
然后我们来初始化 mixedScene、mixedCamera 以及 renderer:
mixedScene = new THREE.Scene(); mixedScene.add(mixedPlane); mixedCamera = new THREE.OrthographicCamera( _canvas.width / -2, _canvas.width / 2, _canvas.height / 2, _canvas.height / -2, 1, 1000 ); renderer = new THREE.WebGLRenderer({ canvas: _canvas, alpha: true, premultipliedAlpha: false, stencil: false, preserveDrawingBuffer: true, });
之后,我们在 ShoeRenderModule.js 的 onFrame 中填充 cameraTexture 的内容:
export function onFrame(cameraData) { if (cameraTexture.image.data == null) { cameraTexture.copy( new THREE.DataTexture( cameraData.data, cameraData.width, cameraData.height, THREE.RGBAFormat ) ); cameraTexture.flipY = true; } else { cameraTexture.image.data = cameraData.data; } mixedPlane.material.uniformsNeedUpdate = true; cameraTexture.needsUpdate = true; renderer.render(mixedScene, mixedCamera); }
运行小程序,我们就能看到相机画面了,完整工程可在开发者工具中预览

threejs 加载并渲染模型

因为 microapp-ar-three 接受一张渲染了所有模型的 Texture,所以接下来我们使用 threejs 来加载并渲染模型。文档中提供了两个示例模型:
分别是左脚和右脚的鞋模型(OBJ 格式)。
在之前提到的 three.js 文件中,我们已经将 OBJLoader 绑定到了 THREE 上,所以我们可以直接通过调用 THREE 来使用 OBJLoader。我们可以通过上面两个链接来加载模型:
let modelGroup = null; let shoe_left = null; let shoe_right = null; const leftShoePath = "https://lf3-developer.bytemastatic.com/obj/developer/misc/AI_AR_demo/shoe_L.obj"; const rightShoePath = "https://lf3-developer.bytemastatic.com/obj/developer/misc/AI_AR_demo/shoe_R.obj"; function loadShoeModels() { const loader = new THREE.OBJLoader(); loader.load( leftShoePath, function (loadedMesh) { tt.showToast({ title: "模型加载成功", duration: 2000, success(res) {}, fail(res) { console.log(`showToast failed`); }, }); shoe_left = loadedMesh; var material = new THREE.MeshLambertMaterial({ color: "white" }); shoe_left.children.forEach(function (child) { child.material = material; }); modelGroup.add(shoe_left); }, undefined, function (e) { tt.showToast({ title: "模型加载失败", duration: 2000, success(res) { console.log(`${res}`); }, fail(res) { console.log(`showToast failed`); }, }); console.error("load model error :: ", e); } ); loader.load( rightShoePath, function (loadedMesh) { tt.showToast({ title: "模型加载成功", duration: 2000, success(res) {}, fail(res) { console.log(`showToast failed`); }, }); shoe_right = loadedMesh; var material = new THREE.MeshLambertMaterial({ color: "white" }); shoe_right.children.forEach(function (child) { child.material = material; }); modelGroup.add(shoe_right); }, undefined, function (e) { tt.showToast({ title: "模型加载失败", duration: 2000, success(res) { console.log(`${res}`); }, fail(res) { console.log(`showToast failed`); }, }); console.error("load model error :: ", e); } ); }
之后可以在 initModule 中调用这个函数。
接下来我们尝试渲染这个模型,为此我们需要创建用于绘制模型的 modelScene 和 modelCamera:
let modelScene = null; let modelCamera = null; export function initModule(_canvas) { ... modelScene = new THREE.Scene(); modelCamera = new THREE.PerspectiveCamera( 5, _canvas.width / _canvas.height, 0.1, 2500 ); modelGroup = new THREE.Group(); modelScene.add(modelGroup); var modelLight = new THREE.HemisphereLight(0xffffff, 0x444444); modelLight.position.set(0, 20, 0); modelScene.add(modelLight); }
我们直接来使用 ArFootProcessor,通过获取算法数据来更新模型。注意这里的 modelCamera,这里建议使用相同参数。首先在 initModule 中初始化 ArFootProcessor 对象:
let arFootProcessor = null; export function initModule(_canvas) { arFootProcessor = new ArFootProcessor({ three: THREE, }); }
之后,我们在 onAlgorithmResult 中,将加载的模型和算法结果给 arFootProcessor。
export function onAlgorithmResult(algorithmResult) { if (shoe_left == null || shoe_right == null) return; const models = { left: shoe_left, right: shoe_right, }; arFootProcessor.updateModels({ algResult: algorithmResult, models: models, }); }
最后在 onFrame 中渲染模型:
export function onFrame(cameraData) { if (cameraTexture.image.data == null) { cameraTexture.copy( new THREE.DataTexture( cameraData.data, cameraData.width, cameraData.height, THREE.RGBAFormat ) ); cameraTexture.flipY = true; } else { cameraTexture.image.data = cameraData.data; } mixedPlane.material.uniformsNeedUpdate = true; cameraTexture.needsUpdate = true; renderer.render(mixedScene, mixedCamera); renderer.render(modelScene, modelCamera); }
运行小程序,依次点击初始化 webgl canvas 和 AR 试鞋按钮,等待模型加载完成后,我们会看到鞋子的模型会跟随脚动起来。
没有纹理的鞋子看起来有点单调,我们可以通过加载模型纹理让鞋子变得更好看一些,纹理的链接可以参考 ShoeRenderModule.js 的代码。读取图像的方法可参考 Canvas.createImage,我们在这里也使用 THREE.DataTexture 创建鞋子模型纹理,修改 index.js 和 ShoeRenderModule.js:
index.js:
const app = getApp(); Page({ data: { page_canvas: null, }, onLoad: function () { this.cameraContext = tt.createCameraContext(); }, initCanvasConfig() { tt.createSelectorQuery() .select("#canvas_type_webgl") .node() .exec((res) => { this.canvas = res[0].node; this.data.page_canvas = this.canvas; this.ctx = this.canvas.getContext("webgl"); ... }); }, ... });
ShoeRenderModule.js:
const leftShoeModelPath = "https://lf3-developer.bytemastatic.com/obj/developer/misc/AI_AR_demo/shoe_L.obj"; const rightShoeModelPath = "https://lf3-developer.bytemastatic.com/obj/developer/misc/AI_AR_demo/shoe_R.obj"; const leftShoeTexturePath = "https://lf3-developer.bytemastatic.com/obj/developer/misc/AI_AR_demo/shoe_L.png"; const rightShoeTexturePath = "https://lf3-developer.bytemastatic.com/obj/developer/misc/AI_AR_demo/shoe_R.png"; function loadShoeModels() { const currentPage = getCurrentPages()[0]; const leftShoeTextureImage = currentPage.data.page_canvas.createImage(); leftShoeTextureImage.src = leftShoeTexturePath; console.log("leftShoeTextureImage.data = ", leftShoeTextureImage.data); const rightShoeTextureImage = currentPage.data.page_canvas.createImage(); rightShoeTextureImage.src = rightShoeTexturePath; console.log("rightShoeTextureImage.data = ", rightShoeTextureImage.data); const loader = new THREE.OBJLoader(); leftShoeTextureImage.onload = () => { loader.load( leftShoeModelPath, function (loadedMesh) { tt.showToast({ title: "模型加载成功", duration: 2000, success(res) {}, fail(res) { console.log(`showToast failed`); }, }); shoe_left = loadedMesh; console.log( "leftShoeTextureImage.data(in load) = ", rightShoeTextureImage.data ); const texture = new THREE.Texture(leftShoeTextureImage); texture.format = THREE.RGBAFormat; texture.flipY = false; texture.needsUpdate = true; shoe_left.children.forEach(function (child) { child.material.map = texture; }); modelGroup.add(shoe_left); }, undefined, function (e) { tt.showToast({ title: "模型加载失败", duration: 2000, success(res) { console.log(`${res}`); }, fail(res) { console.log(`showToast failed`); }, }); console.error("load model error :: ", e); } ); }; rightShoeTextureImage.onload = () => { loader.load( rightShoeModelPath, function (loadedMesh) { tt.showToast({ title: "模型加载成功", duration: 2000, success(res) {}, fail(res) { console.log(`showToast failed`); }, }); shoe_right = loadedMesh; console.log( "rightShoeTextureImage.data(in load) = ", rightShoeTextureImage.data ); const texture = new THREE.Texture(rightShoeTextureImage); texture.format = THREE.RGBAFormat; texture.flipY = false; texture.needsUpdate = true; shoe_right.children.forEach(function (child) { child.material.map = texture; }); modelGroup.add(shoe_right); }, undefined, function (e) { tt.showToast({ title: "模型加载失败", duration: 2000, success(res) { console.log(`${res}`); }, fail(res) { console.log(`showToast failed`); }, }); console.error("load model error :: ", e); } ); }; }
成功后运行小程序,依次点击初始化 webgl canvas 按钮和 AR 试鞋按钮,待模型加载完成,就可以看到带有纹理的鞋子跟随足部位置移动的效果:
此阶段的项目代码可在开发者工具中预览

通过 ArMixer 完成 AR 场景

当目前为止,我们已经快完成这个 AR 项目了!我们在之前绘制了 camera 和鞋子模型,但还没有把两者结合起来,ArMixer 可以帮助我们完成这个工作,它需要的参数如下:
属性名
类型
默认值
必填
说明
three
type of THREE
-
当前使用的 THREE 本身
processors
ArProcessor[]
-
arProcessor 数组
cameraTexture
THREE.Texture
-
保存了一帧相机画面数据的 THREE.Texture
modelTexture
THREE.Texture
-
绘制了所有模型的 THREE.Texture
mixedTextureWidth
number
-
指定保存混合结果的 mixedTexture 的宽度
mixedTextureHeight
number
-
指定保存混合结果的 mixedTexture 的高度
本例中我们的需要把结果绘制到 canvas 上,所以 mixedTexture 的尺寸和 canvas 一致。这里我们还缺少一张 modelTexture,modelTexture 可以通过设置 THREE.WebGLRenderer.RenderTarget 为 THREE.WebGLRenderTarget 对象,将模型渲染到这个 RenderTarget 中,通过 THREE.WebGLRenderTarget.texture 来获取 modelTexture:
let modelRenderTarget = null; export function initModule(_canvas) { ... modelRenderTarget = new THREE.WebGLRenderTarget( _canvas.width, _canvas.height ); ... } export function onFrame(cameraData) { if (cameraTexture.image.data == null) { cameraTexture.copy( new THREE.DataTexture( cameraData.data, cameraData.width, cameraData.height, THREE.RGBAFormat ) ); cameraTexture.flipY = true; } else { cameraTexture.image.data = cameraData.data; } mixedPlane.material.uniformsNeedUpdate = true; cameraTexture.needsUpdate = true; renderer.setRenderTarget(modelRenderTarget); renderer.render(modelScene, modelCamera); renderer.setRenderTarget(null); renderer.render(mixedScene, mixedCamera); }
现在我们可以开始创建 ArMixer 对象了:
let arMixer = null; export function initModule(_canvas) { ... arMixer = new ArMixer({ three: THREE, processors: [arFootProcessor], cameraTexture: cameraTexture, modelTexture: modelRenderTarget.texture, mixedTextureWidth: _canvas.width, mixedTextureHeight: _canvas.height, }); let planeGeometry = new THREE.PlaneGeometry(_canvas.width, _canvas.height); ... }
之后,我们需要将 planeMaterial 的 mixedTexture 更换成 arMixer.mixedTexture:
export function initModule(_canvas) { ... arMixer = new ArMixer({ three: THREE, processors: [arFootProcessor], cameraTexture: cameraTexture, modelTexture: modelRenderTarget.texture, mixedTextureWidth: _canvas.width, mixedTextureHeight: _canvas.height, }); let planeGeometry = new THREE.PlaneGeometry(_canvas.width, _canvas.height); let planeMaterial = new THREE.ShaderMaterial({ uniforms: { mixedTexture: { value: arMixer.mixedTexture }, }, vertexShader: vertexShaderSource, fragmentShader: fragmentShaderSource, }); ... }
最后,我们需要在 onFrame 中调用一下 arMixer.render() :
export function onFrame(cameraData) { if (cameraTexture.image.data == null) { cameraTexture.copy( new THREE.DataTexture( cameraData.data, cameraData.width, cameraData.height, THREE.RGBAFormat ) ); cameraTexture.flipY = true; } else { cameraTexture.image.data = cameraData.data; } mixedPlane.material.uniformsNeedUpdate = true; cameraTexture.needsUpdate = true; renderer.setRenderTarget(modelRenderTarget); renderer.render(modelScene, modelCamera); renderer.setRenderTarget(null); arMixer.render({ renderer: renderer, }); renderer.render(mixedScene, mixedCamera); }
完成之后可以运行一下小程序,依次点击初始化 webgl canvas 和 AR 试鞋按钮,成功的话,运行效果如下:
完整的项目工程可以在开发者工具中预览
到这里我们就完成了 Ar 试鞋场景的 demo,希望这篇教程能够对您在 microapp-ar-three 库的使用上有所帮助。