new Image() in Next.js? - image

Can you use new Image() inside the Next.js? I am getting error, saying it is not defined.
I know I can simply use inside JSX, but in this case, I am not sure if it will work, as I must reference the image outisde JSX, as shown below.
If there is no way to make new Image() work with Next.js, please show a different way to make this work. This new Image() is for making a GSAP scroll animation work.
Here is my code:
import Link from 'next/link'
import Head from 'next/head'
import Image from 'next/image'
import {useEffect, useRef, useState} from 'react'
import styles from '../styles/Home.module.css'
import {gsap} from 'gsap'
import { ScrollTrigger } from "gsap/dist/ScrollTrigger";
const Home = () => {
const viewer = useRef(null)
const image = new Image();
if (typeof window !== "undefined") {
gsap.registerPlugin(ScrollTrigger);
}
useEffect(()=>{
const rows = 5
const columns = 10
const frame_count = rows * columns - 1
// const imageWidth = 6336
// const imageHeight = 1782
const imageWidth = 4049
const imageHeight = 3000
const horizDiff = imageWidth / columns
const vertDiff = imageHeight / rows
const ctx = viewer.current.getContext("2d");
viewer.current.width = horizDiff;
viewer.current.height = vertDiff;
const image = new Image()
image.src = "./spriteDesk.jpg";
// image.src = "./spriteMobile.jpg";
image.onload = () => onUpdate();
const setPos = gsap.quickSetter(viewer.current, "background-position");
const obj = {num: 0};
gsap.to(obj, {
num: frame_count,
ease: "steps(" + frame_count + ")",
scrollTrigger: {
trigger: ".section-one",
start: "top top",
end: "+=" + imageHeight,
pin: true,
anticipatePin: 1,
scrub: 1
},
onUpdate
});
function onUpdate() {
ctx.clearRect(0, 0, horizDiff, vertDiff);
const x = Math.round((obj.num % columns) * horizDiff);
const y = Math.round(Math.floor(obj.num / columns) * vertDiff);
ctx.drawImage(image, x, y, horizDiff, vertDiff, 0, 0, horizDiff, vertDiff);
}
},[])
return (
<>
<Head>
<title>TalkingSkunk | Home</title>
<meta name='keywords' content='talkingskunk' />
<link rel="icon" href="/favicon.ico" />
{/* <script src="https://cdnjs.cloudflare.com/ajax/libs/gsap/3.6.1/gsap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/gsap/3.6.1/ScrollTrigger.min.js"></script> */}
</Head>
{/* <div className={styles.bgWrap}>
<Image
src="/spriteDesk.png"
className='cityscape'
data-speed="0.2"
layout="fill"
objectFit="cover"
quality={100}
/> */}
{/* <Image src ='/spriteDesk.jpg' alt="spriteDesk" width ={4049} height = {3000} /> */}
{/* <p className={styles.bgText}>
Discover
</p> */}
<section className="styles.scene styles.section section-one">
<canvas ref={viewer} className="styles.viewer"></canvas>
</section>
</>
)
}
export default Home;

If anyone still needs this it can be accomplished by following the next steps:
Replace import Image from 'next/image' with import NextImage from 'next/image' (or anything similar),
Replace all <Image ... tags with <NextImage ... (or, if you don't have any, than just remove the import)
Use The Image() constructor to create a new HTMLImageElement instance described on mdn page.
In case you use Next.js version 11+ (or earlier) use one of the solutions described on this question to disable no-img-element lint in order to prevent the error.

Related

How do you correctly include three.js and gltf.js

I'm rewriting my question because stackoverflow thought my post was spam (because I included 6000+ lines of code). I'm trying to make a web app that tracks the user's face and puts a 3D object over the face like a "filter". The thing is, I don't want this app to have any external dependencies except 1 (at least for scripts/packages/modules/whatever). Therefore, I copied three.js minified (from https://cdn.jsdelivr.net/npm/three#0.120.1/build/three.min.js) inbetween in the HTML file, as well as the GLTFLoader.js script from the GitHub repository in examples/jsm/loaders/.
I started with Jeeliz's face filter repository, and I'm trying to implement GLTFLoader.js, but when I use
const loader = new GLTFLoader();
it gives me a GLTFLoader is not defined error, as well as console messages that ES6 module was not imported.
When I use
const loader = new THREE.GLTFLoader();
it says it's not a constructor, so I lean towards the former being the correct way to construct the loader.
I appreciate any help in advance! I mostly code in Python or C++ and I'm still a beginner, but I've tinkered with JavaScript a few times so I thought I could handle this. I posted this question once but stackoverflow crashed after saying the post is spam, so I'm including a link to the html file:
https://www.dropbox.com/s/p86xchlldr7j2vf/index-mine.html?dl=0
Code:
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<meta http-equiv="content-language" content="en-EN" />
<title>Filter Web App</title>
<!-- INCLUDE FACEFILTER SCRIPT -->
<script src=‘https://raw.githubusercontent.com/jeeliz/jeelizFaceFilter/master/dist/jeelizFaceFilter.js’>
</script>
<!-- INCLUDE THREE.JS -->
<script src=‘https://cdn.jsdelivr.net/npm/three#0.120.1/build/three.min.js’>
</script>
<!-- INCLUDE RESIZER -->
<script src=‘https://raw.githubusercontent.com/jeeliz/jeelizFaceFilter/master/helpers/JeelizResizer.js’>
</script>
<!-- INCLUDE GLTFLOADER.JS -->
<script src=‘https://raw.githubusercontent.com/mrdoob/three.js/dev/examples/jsm/loaders/GLTFLoader.js’>
</script>
<!-- INCLUDE JEELIZ THREE.JS HELPER -->
<script>
/*
Helper for Three.js
*/
const punkThreeHelper = (function(){
// internal settings:
const _settings = {
rotationOffsetX: 0.0, // negative -> look upper. in radians
pivotOffsetYZ: [0.2, 0.6],// YZ of the distance between the center of the cube and the pivot
detectionThreshold: 0.8, // sensibility, between 0 and 1. Less -> more sensitive
detectionHysteresis: 0.02,
//tweakMoveYRotateX: 0,//0.5, // tweak value: move detection window along Y axis when rotate the face around X (look up <-> down)
cameraMinVideoDimFov: 35 // Field of View for the smallest dimension of the video in degrees
};
// private vars:
let _threeRenderer = null,
_threeScene = null,
_threeVideoMesh = null,
_threeVideoTexture = null,
_threeTranslation = null;
let _maxFaces = -1,
_isMultiFaces = false,
_detectCallback = null,
_isVideoTextureReady = false,
_isSeparateThreeCanvas = false,
_faceFilterCv = null,
_videoElement = null,
_isDetected = false,
_scaleW = 1,
_canvasAspectRatio = -1;
const _threeCompositeObjects = [];
let _gl = null,
_glVideoTexture = null,
_glShpCopyCut = null,
_glShpCopyCutVideoMatUniformPointer = null;
let _videoTransformMat2 = null;
// private funcs:
function destroy(){
_isVideoTextureReady = false;
_threeCompositeObjects.splice(0);
if (_threeVideoTexture){
_threeVideoTexture.dispose();
_threeVideoTexture = null;
}
}
function create_threeCompositeObjects(){
for (let i=0; i<_maxFaces; ++i){
// COMPOSITE OBJECT WHICH WILL TRACK A DETECTED FACE
const threeCompositeObject = new THREE.Object3D();
threeCompositeObject.frustumCulled = false;
threeCompositeObject.visible = false;
_threeCompositeObjects.push(threeCompositeObject);
_threeScene.add(threeCompositeObject);
}
}
function create_videoScreen(){
const videoScreenVertexShaderSource = "attribute vec2 position;\n\
uniform mat2 videoTransformMat2;\n\
varying vec2 vUV;\n\
void main(void){\n\
gl_Position = vec4(position, 0., 1.);\n\
vUV = 0.5 + videoTransformMat2 * position;\n\
}";
const videoScreenFragmentShaderSource = "precision lowp float;\n\
uniform sampler2D samplerVideo;\n\
varying vec2 vUV;\n\
void main(void){\n\
gl_FragColor = texture2D(samplerVideo, vUV);\n\
}";
if (_isSeparateThreeCanvas){
const compile_shader = function(source, type, typeString) {
const glShader = _gl.createShader(type);
_gl.shaderSource(glShader, source);
_gl.compileShader(glShader);
if (!_gl.getShaderParameter(glShader, _gl.COMPILE_STATUS)) {
alert("ERROR IN " + typeString + " SHADER: " + _gl.getShaderInfoLog(glShader));
return null;
}
return glShader;
};
const glShaderVertex = compile_shader(videoScreenVertexShaderSource, _gl.VERTEX_SHADER, 'VERTEX');
const glShaderFragment = compile_shader(videoScreenFragmentShaderSource, _gl.FRAGMENT_SHADER, 'FRAGMENT');
_glShpCopyCut = _gl.createProgram();
_gl.attachShader(_glShpCopyCut, glShaderVertex);
_gl.attachShader(_glShpCopyCut, glShaderFragment);
_gl.linkProgram(_glShpCopyCut);
const samplerVideo = _gl.getUniformLocation(_glShpCopyCut, 'samplerVideo');
_glShpCopyCutVideoMatUniformPointer = _gl.getUniformLocation(_glShpCopyCut, 'videoTransformMat2');
return;
}
// init video texture with red:
_threeVideoTexture = new THREE.DataTexture( new Uint8Array([255,0,0]), 1, 1, THREE.RGBFormat);
_threeVideoTexture.needsUpdate = true;
// CREATE THE VIDEO BACKGROUND:
const videoMaterial = new THREE.RawShaderMaterial({
depthWrite: false,
depthTest: false,
vertexShader: videoScreenVertexShaderSource,
fragmentShader: videoScreenFragmentShaderSource,
uniforms:{
samplerVideo: {value: _threeVideoTexture},
videoTransformMat2: {
value: _videoTransformMat2
}
}
});
const videoGeometry = new THREE.BufferGeometry()
const videoScreenCorners = new Float32Array([-1,-1, 1,-1, 1,1, -1,1]);
// handle both new and old THREE.js versions:
const setVideoGeomAttribute = (videoGeometry.setAttribute || videoGeometry.addAttribute).bind(videoGeometry);
setVideoGeomAttribute( 'position', new THREE.BufferAttribute( videoScreenCorners, 2 ) );
videoGeometry.setIndex(new THREE.BufferAttribute(new Uint16Array([0,1,2, 0,2,3]), 1));
_threeVideoMesh = new THREE.Mesh(videoGeometry, videoMaterial);
that.apply_videoTexture(_threeVideoMesh);
_threeVideoMesh.renderOrder = -1000; // render first
_threeVideoMesh.frustumCulled = false;
_threeScene.add(_threeVideoMesh);
} //end create_videoScreen()
function detect(detectState){
_threeCompositeObjects.forEach(function(threeCompositeObject, i){
_isDetected = threeCompositeObject.visible;
const ds = detectState[i];
if (_isDetected && ds.detected < _settings.detectionThreshold-_settings.detectionHysteresis){
// DETECTION LOST
if (_detectCallback) _detectCallback(i, false);
threeCompositeObject.visible = false;
} else if (!_isDetected && ds.detected > _settings.detectionThreshold+_settings.detectionHysteresis){
// FACE DETECTED
if (_detectCallback) _detectCallback(i, true);
threeCompositeObject.visible = true;
}
}); //end loop on all detection slots
}
function update_poses(ds, threeCamera){
// tan( <horizontal FoV> / 2 ):
const halfTanFOVX = Math.tan(threeCamera.aspect * threeCamera.fov * Math.PI/360); //tan(<horizontal FoV>/2), in radians (threeCamera.fov is vertical FoV)
_threeCompositeObjects.forEach(function(threeCompositeObject, i){
if (!threeCompositeObject.visible) return;
const detectState = ds[i];
// tweak Y position depending on rx:
//const tweak = _settings.tweakMoveYRotateX * Math.tan(detectState.rx);
const cz = Math.cos(detectState.rz), sz = Math.sin(detectState.rz);
// relative width of the detection window (1-> whole width of the detection window):
const W = detectState.s * _scaleW;
// distance between the front face of the cube and the camera:
const DFront = 1 / ( 2 * W * halfTanFOVX );
// D is the distance between the center of the unit cube and the camera:
const D = DFront + 0.5;
// coords in 2D of the center of the detection window in the viewport:
const xv = detectState.x * _scaleW;
const yv = detectState.y * _scaleW;
// coords in 3D of the center of the cube (in the view coordinates system):
const z = -D; // minus because view coordinate system Z goes backward
const x = xv * D * halfTanFOVX;
const y = yv * D * halfTanFOVX / _canvasAspectRatio;
// set position before pivot:
threeCompositeObject.position.set(-sz*_settings.pivotOffsetYZ[0], -cz*_settings.pivotOffsetYZ[0], -_settings.pivotOffsetYZ[1]);
// set rotation and apply it to position:
threeCompositeObject.rotation.set(detectState.rx+_settings.rotationOffsetX, detectState.ry, detectState.rz, "ZYX");
threeCompositeObject.position.applyEuler(threeCompositeObject.rotation);
// add translation part:
_threeTranslation.set(x, y+_settings.pivotOffsetYZ[0], z+_settings.pivotOffsetYZ[1]);
threeCompositeObject.position.add(_threeTranslation);
}); //end loop on composite objects
}
//public methods:
const that = {
// launched with the same spec object than callbackReady. set spec.threeCanvasId to the ID of the threeCanvas to be in 2 canvas mode:
init: function(spec, detectCallback){
destroy();
_maxFaces = spec.maxFacesDetected;
_glVideoTexture = spec.videoTexture;
_videoTransformMat2 = spec.videoTransformMat2;
_gl = spec.GL;
_faceFilterCv = spec.canvasElement;
_isMultiFaces = (_maxFaces>1);
_videoElement = spec.videoElement;
// enable 2 canvas mode if necessary:
let threeCanvas = null;
if (spec.threeCanvasId){
_isSeparateThreeCanvas = true;
// adjust the threejs canvas size to the threejs canvas:
threeCanvas = document.getElementById(spec.threeCanvasId);
threeCanvas.setAttribute('width', _faceFilterCv.width);
threeCanvas.setAttribute('height', _faceFilterCv.height);
} else {
threeCanvas = _faceFilterCv;
}
if (typeof(detectCallback) !== 'undefined'){
_detectCallback = detectCallback;
}
// init THREE.JS context:
_threeRenderer = new THREE.WebGLRenderer({
context: (_isSeparateThreeCanvas) ? null : _gl,
canvas: threeCanvas,
alpha: (_isSeparateThreeCanvas || spec.alpha) ? true : false,
preserveDrawingBuffer: true // to make image capture possible
});
_threeScene = new THREE.Scene();
_threeTranslation = new THREE.Vector3();
create_threeCompositeObjects();
create_videoScreen();
// handle device orientation change:
window.addEventListener('orientationchange', function(){
setTimeout(punkfacefilter.resize, 1000);
}, false);
const returnedDict = {
videoMesh: _threeVideoMesh,
renderer: _threeRenderer,
scene: _threeScene
};
if (_isMultiFaces){
returnedDict.faceObjects = _threeCompositeObjects
} else {
returnedDict.faceObject = _threeCompositeObjects[0];
}
return returnedDict;
}, //end that.init()
detect: function(detectState){
const ds = (_isMultiFaces) ? detectState : [detectState];
// update detection states:
detect(ds);
},
get_isDetected: function() {
return _isDetected;
},
render: function(detectState, threeCamera){
const ds = (_isMultiFaces) ? detectState : [detectState];
// update detection states then poses:
detect(ds);
update_poses(ds, threeCamera);
if (_isSeparateThreeCanvas){
// render the video texture on the faceFilter canvas:
_gl.viewport(0, 0, _faceFilterCv.width, _faceFilterCv.height);
_gl.useProgram(_glShpCopyCut);
_gl.uniformMatrix2fv(_glShpCopyCutVideoMatUniformPointer, false, _videoTransformMat2);
_gl.activeTexture(_gl.TEXTURE0);
_gl.bindTexture(_gl.TEXTURE_2D, _glVideoTexture);
_gl.drawElements(_gl.TRIANGLES, 3, _gl.UNSIGNED_SHORT, 0);
} else {
// reinitialize the state of THREE.JS because JEEFACEFILTER have changed stuffs:
// -> can be VERY costly !
_threeRenderer.state.reset();
}
// trigger the render of the THREE.JS SCENE:
_threeRenderer.render(_threeScene, threeCamera);
},
sortFaces: function(bufferGeometry, axis, isInv){ // sort faces long an axis
// Useful when a bufferGeometry has alpha: we should render the last faces first
const axisOffset = {X:0, Y:1, Z:2}[axis.toUpperCase()];
const sortWay = (isInv) ? -1 : 1;
// fill the faces array:
const nFaces = bufferGeometry.index.count/3;
const faces = new Array(nFaces);
for (let i=0; i<nFaces; ++i){
faces[i] = [bufferGeometry.index.array[3*i], bufferGeometry.index.array[3*i+1], bufferGeometry.index.array[3*i+2]];
}
// compute centroids:
const aPos = bufferGeometry.attributes.position.array;
const centroids = faces.map(function(face, faceIndex){
return [
(aPos[3*face[0]]+aPos[3*face[1]]+aPos[3*face[2]])/3, // X
(aPos[3*face[0]+1]+aPos[3*face[1]+1]+aPos[3*face[2]+1])/3, // Y
(aPos[3*face[0]+2]+aPos[3*face[1]+2]+aPos[3*face[2]+2])/3, // Z
face
];
});
// sort centroids:
centroids.sort(function(ca, cb){
return (ca[axisOffset]-cb[axisOffset]) * sortWay;
});
// reorder bufferGeometry faces:
centroids.forEach(function(centroid, centroidIndex){
const face = centroid[3];
bufferGeometry.index.array[3*centroidIndex] = face[0];
bufferGeometry.index.array[3*centroidIndex+1] = face[1];
bufferGeometry.index.array[3*centroidIndex+2] = face[2];
});
}, //end sortFaces
get_threeVideoTexture: function(){
return _threeVideoTexture;
},
apply_videoTexture: function(threeMesh){
if (_isVideoTextureReady){
return;
}
threeMesh.onAfterRender = function(){
// Replace _threeVideoTexture.__webglTexture by the real video texture:
try {
_threeRenderer.properties.update(_threeVideoTexture, '__webglTexture', _glVideoTexture);
_threeVideoTexture.magFilter = THREE.LinearFilter;
_threeVideoTexture.minFilter = THREE.LinearFilter;
_isVideoTextureReady = true;
} catch(e){
console.log('WARNING in punkThreeHelper: the glVideoTexture is not fully initialized');
}
delete(threeMesh.onAfterRender);
};
},
// create an occluder, IE a transparent object which writes on the depth buffer:
create_threejsOccluder: function(occluderURL, callback){
const occluderMesh = new THREE.Mesh();
new THREE.BufferGeometryLoader().load(occluderURL, function(occluderGeometry){
const mat = new THREE.ShaderMaterial({
vertexShader: THREE.ShaderLib.basic.vertexShader,
fragmentShader: "precision lowp float;\n void main(void){\n gl_FragColor=vec4(1.,0.,0.,1.);\n }",
uniforms: THREE.ShaderLib.basic.uniforms,
colorWrite: false
});
occluderMesh.renderOrder = -1; //render first
occluderMesh.material = mat;
occluderMesh.geometry = occluderGeometry;
if (typeof(callback)!=='undefined' && callback) callback(occluderMesh);
});
return occluderMesh;
},
set_pivotOffsetYZ: function(pivotOffset) {
_settings.pivotOffsetYZ = pivotOffset;
},
create_camera: function(zNear, zFar){
const threeCamera = new THREE.PerspectiveCamera(1, 1, (zNear) ? zNear : 0.1, (zFar) ? zFar : 100);
that.update_camera(threeCamera);
return threeCamera;
},
update_camera: function(threeCamera){
// compute aspectRatio:
const canvasElement = _threeRenderer.domElement;
const cvw = canvasElement.width;
const cvh = canvasElement.height;
_canvasAspectRatio = cvw / cvh;
// compute vertical field of view:
const vw = _videoElement.videoWidth;
const vh = _videoElement.videoHeight;
const videoAspectRatio = vw / vh;
const fovFactor = (vh > vw) ? (1.0 / videoAspectRatio) : 1.0;
const fov = _settings.cameraMinVideoDimFov * fovFactor;
console.log('INFO in punkThreeHelper - update_camera(): Estimated vertical video FoV is', fov);
// compute X and Y offsets in pixels:
let scale = 1.0;
if (_canvasAspectRatio > videoAspectRatio) {
// the canvas is more in landscape format than the video, so we crop top and bottom margins:
scale = cvw / vw;
} else {
// the canvas is more in portrait format than the video, so we crop right and left margins:
scale = cvh / vh;
}
const cvws = vw * scale, cvhs = vh * scale;
const offsetX = (cvws - cvw) / 2.0;
const offsetY = (cvhs - cvh) / 2.0;
_scaleW = cvw / cvws;
// apply parameters:
threeCamera.aspect = _canvasAspectRatio;
threeCamera.fov = fov;
console.log('INFO in punkThreeHelper.update_camera(): camera vertical estimated FoV is', fov, 'deg');
threeCamera.setViewOffset(cvws, cvhs, offsetX, offsetY, cvw, cvh);
threeCamera.updateProjectionMatrix();
// update drawing area:
_threeRenderer.setSize(cvw, cvh, false);
_threeRenderer.setViewport(0, 0, cvw, cvh);
}, //end update_camera()
resize: function(w, h, threeCamera){
_threeRenderer.domElement.width = w;
_threeRenderer.domElement.height = h;
punkfacefilter.resize();
if (threeCamera){
that.update_camera(threeCamera);
}
}
}
return that;
})();
// Export ES6 module:
try {
module.exports = punkThreeHelper;
} catch(e){
console.log('punkThreeHelper ES6 Module not exported');
window.punkThreeHelper = punkThreeHelper;
}
</script>
<!-- INCLUDE DEMO SCRIPT -->
<script>
let THREECAMERA = null;
// callback: launched if a face is detected or lost.
function detect_callback(faceIndex, isDetected) {
if (isDetected) {
console.log('INFO in detect_callback(): DETECTED');
} else {
console.log('INFO in detect_callback(): LOST');
}
}
// build the 3D. called once when punk Face Filter is OK
function init_threeScene(spec) {
const threeStuffs = punkThreeHelper.init(spec, detect_callback);
// CREATE A CUBE
const loader = new GLTFLoader();
loader.load( '/your-glb-file2.glb', function ( gltf ) {
threeStuffs.faceObject.add( gltf.scene );
} );
//CREATE THE CAMERA
THREECAMERA = punkThreeHelper.create_camera();
}
// entry point:
function main(){
punkResizer.size_canvas({
canvasId: 'jeeFaceFilterCanvas',
callback: function(isError, bestVideoSettings){
init_faceFilter(bestVideoSettings);
}
})
}
function init_faceFilter(videoSettings){
punkfacefilter.init({
followZRot: true,
canvasId: 'jeeFaceFilterCanvas',
NNCPath: '/', // root of NN_DEFAULT.json file
maxFacesDetected: 1,
callbackReady: function(errCode, spec){
if (errCode){
console.log('AN ERROR HAPPENS. ERR =', errCode);
return;
}
console.log('INFO: punkfacefilter IS READY');
init_threeScene(spec);
},
// called at each render iteration (drawing loop):
callbackTrack: function(detectState){
punkThreeHelper.render(detectState, THREECAMERA);
}
}); //end punkfacefilter.init call
}
window.addEventListener('load', main);
</script>
<style>
a {color: #eee; text-decoration: none}
a:hover {color: blue;}
body {overflow: auto; overflow-y: auto;
background-color: white;
background-attachment: fixed;
background-position: center;
background-size: contain;
margin: 0px;}
#jeeFaceFilterCanvas {
z-index: 10;
position: absolute;
max-height: 100%;
max-width: 100%;
left: 50%;
top: 50%;
width: 100vmin;
transform: translate(-50%, -50%) rotateY(180deg);
}
#threeCanvas{
z-index: 11;
position: absolute;
max-height: 100%;
max-width: 100%;
left: 50%;
top: 50%;
width: 100vmin;
transform: translate(-50%, -50%) rotateY(180deg);
}
#media (max-width: 787px) {
#jeeFaceFilterCanvas {
right: 0px;
top: 60px;
transform: rotateY(180deg);
}
}
</style>
</head>
<body>
<canvas width="600" height="600" id='jeeFaceFilterCanvas'></canvas>
</body>
</html>
/your-glb-file2.glb is a correct 3D file in the same directory as this. I made this code from the html file I linked, but src’d the script from URLs, unlike the real html file I linked.
The
// CREATE A CUBE
const loader = new GLTFLoader();
loader.load( '/your-glb-file2.glb', function ( gltf ) {
threeStuffs.faceObject.add( gltf.scene );
} );
near the end is the problem. The app correctly puts a cube over my face like I want it to when it’s
// CREATE A CUBE
const cubeGeometry = new THREE.BoxGeometry(1,1,1);
const cubeMaterial = new THREE.MeshNormalMaterial();
const threeCube = new THREE.Mesh(cubeGeometry, cubeMaterial);
threeCube.frustumCulled = false;
threeStuffs.faceObject.add(threeCube);
I tried constructing the loader as a "sub"function of three.js and not as one, but it isn't working either way, when it worked with a cube over my face and the cube 3D object loaded with three.js' native functions.

Three JS GLTF Loader Not Working with Nuxt 3

I'm facing an error when implementing three JS Gltf Loader with nuxt 3.
Error message :
" Uncaught (in promise) TypeError: Class constructor Loader cannot be invoked without 'new' .. "
versions:
"three": "^0.148.0",
"three-gltf-loader": "^1.111.0"
<template>
<div ref="container"></div>
</template>
<script>
import { ref, onMounted } from "vue";
import * as THREE from "three";
import GLTFLoader from "three-gltf-loader";
export default {
setup() {
const container = ref(null);
const scene = ref(new THREE.Scene());
const renderer = ref(new THREE.WebGLRenderer({ antialias: true }));
const width = 700;
const height = 700;
const camera = ref(
new THREE.PerspectiveCamera(75, width / height, 0.1, 1000)
);
const loader = ref(new GLTFLoader());
onMounted(async () => {
renderer.value.setSize(
container.value.clientWidth,
container.value.clientHeight
);
container.value.appendChild(renderer.value.domElement);
camera.value.position.z = 5;
const response = await fetch("logo.gltf");
const gltf = await response.json();
loader.value.parse(
gltf,
"",
(gltf) => {
scene.value.add(gltf.scene);
renderer.value.render(scene.value, camera.value);
},
undefined,
(error) => {
console.error(error);
}
);
});
return { container };
},
};
</script>
"three": "^0.148.0", "three-gltf-loader": "^1.111.0"
This kind of setup isn't recommended since you can import the latest GLTFLoader module from the three repository. Try it again with these imports:
import * as THREE from "three";
import { GLTFLoader } from "three/addons/loaders/GLTFLoader.js";
I found a solution on Alvaro Dev Labs' YT channel, he used TroisJS instead of ThreeJS ( in fact, it's Vite + Three).
And for nuxt 3 you just need to add ".client.vue " to the file name of your component to run it on the client side (Model.client.vue).
<script lang="ts">
import { defineComponent, ref, onMounted } from "vue";
import {
Renderer,
Scene,
Camera,
PointLight,
AmbientLight,
GltfModel,
} from "troisjs";
export default defineComponent({
components: {
Renderer,
Scene,
Camera,
PointLight,
AmbientLight,
GltfModel,
},
setup() {
const renderer = ref(null);
const model = ref(null);
function onReady(model) {
console.log("Ready", model);
}
onMounted(() => {
renderer?.value?.onBeforeRender(() => {
model.value.rotation.x += 0.01;
});
});
return {
renderer,
model,
onReady,
};
},
});
</script>
<template>
<div>
<Renderer ref="renderer" antialias orbit-ctrl resize="window">
<Camera :position="{ x: -10, z: 20 }" />
<Scene background="#fff">
<AmbientLight />
<PointLight
color="white"
:position="{ x: 100, y: 1000, z: 40 }"
:intensity="1"
/>
<GltfModel ref="model" src="/Models/logo.gltf" #load="onReady" />
</Scene>
</Renderer>
</div>
</template>
import like this
import { GLTFLoader } from 'three/examples/jsm/loaders/GLTFLoader.js'
Then use like this
const loader = new GLTFLoader()

how i can THREEJs TextGeomtry added more verties?

i use dependencies
"#react-three/drei": "7.5.1",
"#react-three/fiber": "6.2.3",
"#types/three": "0.128.0",
/* eslint-disable new-cap */
/* eslint-disable no-nested-ternary */
import React, { useEffect, useLayoutEffect, useMemo, useRef } from 'react';
import * as THREE from 'three';
import { useThree } from '#react-three/fiber';
import BMJUA from './BMJUA.json';
import GeometryUtils from './GeometryUtils';
const Particle = ({
children,
vAlign = 'center',
hAlign = 'center',
size = 40,
color = '#0000ef',
ref,
}: any) => {
const font = new THREE.FontLoader().parse(BMJUA);
const config = useMemo(
() => ({ font, size: 40, height: 3, bevelSegments: 5, step: 10 }),
[font]
);
const textGeoRef = useRef<THREE.TextGeometry>();
return (
<group ref={ref}>
<points>
<textGeometry
ref={textGeoRef}
attach="geometry"
args={[children, config]}
/>
<pointsMaterial size={1} sizeAttenuation vertexColors color="lime" />
</points>
</group>
);
};
export default Particle;
i tried text with points mesh
But I got the missing points.
my program screen
Why are there no intermediate midpoints?
I want to add a function to adjust the position of the text point.
Please help me

Background image not showing on canvas in a strange way

I'm coding a greeting card generator to train in VueJS 3. Everything is working correctly, apart from one thing, look at my code:
<template>
<div>
<h1>greeting card generator</h1>
<div class="board">
<canvas id='myCanvas' :width="size.w" :height="size.h" tabindex='0'
style="border:1px solid #000000;"
></canvas>
</div>
<textarea
:style="'width:' + size.w + 'px; resize:none;'"
v-model="texte"
placeholder="Write your text here">
</textarea>
</div>
</template>
<script>
import {
defineComponent, onMounted, ref, reactive, watch,
} from 'vue';
export default defineComponent({
setup() {
const myCanvas = ref(null);
const texte = ref('');
const rapport = ref(0);
const size = reactive({
w: window.innerWidth * 0.8,
h: (window.innerWidth * 0.8) / 1.8083832335329342,
});
function drawText() {
const fontSize = 0.05 * window.innerWidth - 10;
myCanvas.value.font = `${fontSize}px Adrip`;
myCanvas.value.textAlign = 'center';
const x = size.w / 2;
const lineHeight = fontSize;
const lines = texte.value.split('\n');
for (let i = 0; i < lines.length; i += 1) {
myCanvas.value.fillText(
lines[lines.length - i - 1],
x,
(size.h * 0.98) - (i * lineHeight),
);
}
}
function initCarte() {
const background = new Image();
background.src = '/img/fond.jpeg';
background.onload = function () {
rapport.value = background.naturalWidth / background.naturalHeight;
size.h = size.w / rapport.value;
try {
myCanvas.value.drawImage(background, 0, 0, size.w, size.h);
} catch (e) {
console.log(`ERREUR DE CHARGEMENT D'IMAGE: ${e}`);
}
drawText();
};
}
function handleResize() {
size.w = window.innerWidth * 0.8;
size.h = size.w / rapport.value;
initCarte();
}
window.addEventListener('resize', handleResize);
onMounted(() => {
const c = document.getElementById('myCanvas');
const ctx = c.getContext('2d');
myCanvas.value = ctx;
initCarte();
});
watch(texte, () => {
initCarte();
});
return {
myCanvas,
size,
texte,
};
},
});
</script>
<!-- Add "scoped" attribute to limit CSS to this component only -->
<style scoped>
#font-face {
font-family: 'Adrip';
src: local('Adrip'), url('/fonts/adrip1.ttf') format('truetype');
}
#myCanvas {
border: 1px solid grey;
}
</style>
Look at this line:
h: (window.innerWidth * 0.8) / 1.8083832335329342,
If I don't hardcode this and only put the canonical value window.innerWidth * 0.8, the image doesn't display, although the size.h = size.w / rapport.value; line executes correctly.
I really don't understand this behaviour, could somebody explain it to me?
Also, if anybody has a clue on how it would be possible to load the image once and for all so that I don't have to load it at every refresh, it would be better :)
Thanks in advance!
Your problem is that you change the size of the canvas after drawing the image, due to how templating magic works. If you put debugger behind drawText(); in the background onload function, you will see that it actually draws the image. However, in this same function, you set size.h. size is reactive, and is thus marked as "dirty". size is also used in the template, so the template is marked dirty. After the onload function is executed, Vue will rerender your template... and erase your image.
I think your best bet here is to use nextTick. You need to use it sparingly, but I think this is one of the instances where you have no choice but to wait for the DOM to settle. To do this, import nextTick from vue:
import { nextTick } from 'vue';
Then surround your drawImage try-catch block with that.
background.onload = function () {
rapport.value = background.naturalWidth / background.naturalHeight;
size.h = size.w / rapport.value;
nextTick(() => {
try {
myCanvas.value.drawImage(background, 0, 0, size.w, size.h);
} catch (e) {
console.log(`ERREUR DE CHARGEMENT D'IMAGE: ${e}`);
}
drawText();
});
};
As for your last question how to load the image once... the short answer is... you can't. Whenever the canvas changes, you need to redraw it. At least the image should be cached by the browser, so it just draws it from cache rather than doing another http request.

Editing part sizes of a 3d model

I want to develop a web app to entering measurements of a man and displaying a 3d model with these measurements. I have chosen three.js to start it. And I downloaded a 3d model named standard-male-figure from clara.io. Here is my code to display human model.
import React, { Component } from "react";
import PropTypes from "prop-types";
import withStyles from "#material-ui/core/styles/withStyles";
import * as THREE from "three-full";
const styles = (/*theme*/) => ({
});
class ThreeDView extends Component {
constructor(props){
super(props);
this.start = this.start.bind(this);
this.stop = this.stop.bind(this);
this.renderScene - this.renderScene.bind(this);
this.animate = this.animate.bind(this);
}
componentDidMount() {
const width = this.mount.clientWidth;
const height = this.mount.clientHeight;
//ADD SCENE
this.scene = new THREE.Scene();
//ADD CAMERA
this.camera = new THREE.PerspectiveCamera(100,1);
this.camera.position.z = 12;
this.camera.position.y = 0;
this.camera.position.x = 0;
//ADD RENDERER
this.renderer = new THREE.WebGLRenderer({ antialias: true });
this.renderer.setClearColor("#f0f0f0");
this.renderer.setSize(width, height);
this.mount.appendChild(this.renderer.domElement);
// MOUSE ROTATION
this.orbit = new THREE.OrbitControls(this.camera,this.renderer.domElement);
this.orbit.update();
//ADD LIGHTS
this.light = new THREE.PointLight(0xffffff,1.3);
this.light.position.z = 10;
this.light.position.y=20;
this.scene.add(this.light);
// ADD MAN FIGURE
const loader = new THREE.ColladaLoader();
loader.load("/models/standard-male-figure.dae",(manFigure)=>{
this.man = manFigure;
this.man.name = "man-figure";
this.man.scene.position.y = -10;
this.scene.add(this.man.scene);
},undefined,()=>alert("Loading failed"));
this.start();
}
componentWillUnmount() {
this.stop();
this.mount.removeChild(this.renderer.domElement);
}
start() {
if (!this.frameId) {
this.frameId = requestAnimationFrame(this.animate);
}
}
stop () {
cancelAnimationFrame(this.frameId);
}
animate () {
this.renderScene();
this.frameId = window.requestAnimationFrame(this.animate);
}
renderScene () {
this.orbit.update();
this.light.position.z = this.camera.position.z;
this.light.position.y=this.camera.position.y+20;
this.light.position.x=this.camera.position.x;
this.renderer.render(this.scene, this.camera);
}
render() {
return (
<div style={{ height: "640px" }} ref={(mount) => { this.mount = mount; }} >
</div>
);
}
}
ThreeDView.propTypes = {
values: PropTypes.object
};
/*
all values in inches
values = {
heightOfHand:10,
, etc..
}
*/
export default withStyles(styles)(ThreeDView);
values is measurements that user is entering. I have no idea about how to start updating 3d model with these measurements. Please give me a starting point or any advise to complete this. Thank You!.
Firstly you can get the current size and scale of your man
const box = new THREE.Box3().setFromObject(this.man)
const currentObjectSize = box.getSize()
const currentObjectScale = this.man.scale
and when an user update the size value (newSize), you can calculate a new scale for you man
const newScale = new THREE.Vector3(
currentObjectScale.x * newSize.x/currentObjectSize.x,
currentObjectScale.y * newSize.y/currentObjectSize.y,
currentObjectScale.z * newSize.z/currentObjectSize.z
)
and update your man with this scale
this.man.scale.set(newScale.x, newScale.y, newScale.z)

Resources