import { OrbitControls } from "three/examples/jsm/controls/OrbitControls";
extend({ OrbitControls });
const Orbit = () => {
const { camera, gl } = useThree();
return <OrbitControls args={[camera, gl.domElement]} />;
};
function App() {
return (
<div style={{ height: "100vh", width: "100vw" }}>
<Canvas style={{ background: "black" }} camera={{ position: [3, 3, 3] }}>
<Orbit />
<axesHelper args={[5]} />
</Canvas>
</div>
);
}
This is the error I'm getting in the console
Uncaught TypeError: Class constructor OrbitControls cannot be invoked without 'new'
OrbitControls from three/examples/jsm/controls/OrbitControls is not for React Components.
#react-three/drei can help you.
Example
or
const { camera, gl } = useThree();
useEffect(
() => {
const controls = new OrbitControls(camera, gl.domElement);
return () => {
controls.dispose();
};
},
[camera, gl]
);
Related
I'm trying to to get current mouse position on the canvas when html contents are clicked. With useFrame, I can successfully print out mouse position on any parts of canvas except when mouse is on html contents (it just shows the last mouse position before it enters html part and just stops there).
Are there any ways I can get mouse position when it hovers on html content?
Here are my codes:
#app.js
import React, {useRef, useState } from 'react';
import {Canvas, useFrame, useThree } from '#react-three/fiber';
import { Stars, Html } from '#react-three/drei'
function Sphere(props){
const mesh = useRef(null);
const { viewport, mouse } = useThree()
useFrame(({ mouse }) => {
const x_viewport = (mouse.x * viewport.height) / 2
console.log(x_viewport)
});
return(
<mesh
ref={mesh}
<sphereGeometry args={[1, 16, 16]} />
<meshStandardMaterial />
</mesh>
);
};
function Blocks (props) {
const {handlePost } = props;
let rows = [];
for (let i = 0; i < 30; i++) {
rows.push(<div onClick={handlePost} key={i} ></div>);
}
return (
<>
<div> { rows } </div>
</>
)
}
export default function Meshes() {
const [ hasRender, setRender ] = useState(false);
const handlePost = (e) => {
setRender(true);
}
return (
<>
<Canvas
orthographic
dpr={[1, 1.5]}
mode="concurrent"
camera={{ zoom: 10, position: [0, 0, 500] }}
style={{ width: c_width, color: "black" }}
>
<color attach="background" args={["black"]} />
<Html >
<Blocks handlePost={handlePost} />
</Html>
<ambientLight intensity={0.1} />
<directionalLight position={[0, 1, 1]} />
<Sphere days={days}/>
</Canvas>
{ hasRender && ( <PostItems /> )}
</>
);
}
function App() {
return (
<>
<Meshes />
</>
)
}
I would like to animate my ringBufferGeometry like in example below, but I would like to use GSAP to animate this ringBufferGeometry for certain amount of time and I would like then to stop it when the thetaLength will equal to some value. Something like in example below but without the use of Math.sin().
three js RingBufferGeometry, update thetaLength
This is what I want to achieve(link below), but I want stop to animate when it reaches the full triangle form.
https://jsfiddle.net/02oyunbj/1/
My code
import React from 'react'
import ReactDOM from 'react-dom'
import App from './App'
function Container(props) {
return <div style={{ position: 'absolute', inset: 0 }} {...props} />
}
ReactDOM.render(
<Container>
<App />
</Container>,
document.getElementById('root')
)
import { useRef, useEffect } from 'react'
import { Canvas } from '#react-three/fiber'
import { OrbitControls } from '#react-three/drei'
import * as THREE from 'three'
import gsap from 'gsap'
export default function App() {
return (
<Canvas camera={{ position: [0, 0, 15] }}>
<color attach="background" args={['black']} />
<OrbitControls />
<spotLight position={[15, 15, 15]} angle={0.3} color={'white'} />
<TriangleRing />
</Canvas>
)
}
const TriangleRing = () => {
const triangleRingRef = useRef()
const parametersObj = {
innerRadius: 4,
outerRadius: 8,
thetaSegments: 1,
phiSegments: 30,
thetaStart: 1.55,
thetaLength: 0,
}
const parameters = [
parametersObj.innerRadius,
parametersObj.outerRadius,
parametersObj.thetaSegments,
parametersObj.phiSegments,
parametersObj.thetaStart,
parametersObj.thetaLength,
]
useEffect(() => {
console.log(triangleRingRef.current)
const triangleGeometry = triangleRingRef.current.geometry
gsap.set(triangleGeometry, {
parameters: { ...parametersObj, thetaLength: 0 },
})
const tl = gsap.timeline({ defaults: { ease: 'power3.inOut' } })
tl.to(triangleGeometry, {
parameters: { ...parametersObj, thetaLength: 6.3 },
duration: 4,
})
})
return (
<mesh ref={triangleRingRef}>
<ringBufferGeometry args={parameters} name="TriangleRing" />
<meshToonMaterial side={THREE.DoubleSide} color={0xffffff} />
</mesh>
)
}
Link to sandbox:
https://codesandbox.io/s/affectionate-lichterman-edpfr?file=/src/App.jsx:0-1517
By default GSAP works in terms of seconds, not velocity or anything like that. So you'll need to calculate how long the tween(s) should run (by using the difference in value and the rate at which you want it to move) to get it to result in the end value that you need.
Without a minimal, complete, and verifiable example it's hard for us to help more at this point.
When I try to load an image by using require, the image does not load but when I load the same image from a URL, the image loads. Here is the snippet of code that I am calling the image from
class Home extends React.Component {
render() {
return (
<ScrollView
noSpacer={true}
noScroll={true}
style={styles.container}
showVerticalSCrollIndicator = {false}
showHorizontalScrollIndicator = {false}
>
{this.state.loading ? (
<ActivityIndicator
style={[styles.centering, styles.gray]}
color="#5d38aa"
size="large"
/>
) : (
<div>
<Header title={this.state.user.name} />
<div id='image'>
<Image
source={require('./arrow.png')}
style={styles.image}
/>
</div>
</div>
)}
</ScrollView>
);
}
}
The image is loaded here
<Image
source={require('./arrow.png')}
style={styles.image}
/>
Please make sure that you give the right path to your image.
You can use the source as an object:
<Image source={{ uri: 'something.jpg' }} />
But what you have should work, check your path.
There were few errors here and there, I think you were trying to port ReactJS code to RN and not surprisingly there were few slip-ups like you used div instead and View and small things like that, also boxShadow was not working so I removed that.
After a few tweaks code is working and images are loading.
As I stated earlier, I have omitted the User component and sanityClient, you can implement them later.
Here is the working home.js after changes.
import React from "react";
import {
ScrollView,
ActivityIndicator,
StyleSheet,
Image,
ImageBackground,
View,
} from "react-native";
// import UserList from "./user-list";
import Header from "./header";
// import sanityClient from "";
// import BackButton from "./back-button";
// import User from "./user";
// import {Asset} from 'expo-asset';
// const imageURI = Asset.fromModule(require('./arrow.png')).uri;
// const image = require("./assets/aoeu.jpg");
class Home extends React.Component {
state = {
user: {},
loading: true,
};
componentDidMount() {
// TODO: get users
this.getUser();
}
async getUser() {
// sanityClient
// .fetch(
// `*[ _type == "user" && emailAddress.current == "dwight#viamaven.com"]`
// )
// .then((data) => {
// console.log(data);
// this.setState({ user: data[0], loading: false });
// console.log(this.state.user);
// })
// .catch((err) => console.error(err));
// const res = await fetch("https://randomuser.me/api/?results=20");
// const { results} = await res.json();
// // console.log(results)
// this.setState({users: [...results], loading: false});
}
render() {
return (
<ScrollView
noSpacer={true}
noScroll={true}
style={styles.container}
showVerticalSCrollIndicator={false}
showHorizontalScrollIndicator={false}
>
{!this.state.loading ? (
<ActivityIndicator
style={[styles.centering, styles.gray]}
color="#5d38aa"
size="large"
/>
) : (
<View>
<Header title={"Spidy"} />
<View id="image">
<Image source={require("./arrow.png")} style={styles.image} />
</View>
{/* <User /> */}
</View>
)}
</ScrollView>
);
}
}
var styles = StyleSheet.create({
container: {
backgroundColor: "white",
width: 375,
height: 812,
// top: '50px',
},
centering: {
alignItems: "center",
justifyContent: "center",
padding: 8,
height: "100vh",
},
image: {
width: 50,
height: 50,
marginRight: 20,
// boxShadow: "0px 1px 2px 0px rgba(0,0,0,0.1)",
// boxShadow: "10px 10px 17px -12px rgba(0,0,0,0.75)",
},
});
export default Home;
Zip file containing all the changes: src
Output:
A stack navigator that lives on a bottom tab contains 2 screens. One screen implements the react-native-camera and the other a modal. I'm trying to make the modal transparent but it fails (it has a white background). That's happening because of the useIsFocused hook that mounts and unmounts my camera component. Do you have any suggestions on how I can solve the issue?
function Scanner(){
return(
<Scan.Navigator headerMode='none' mode='modal'
screenOptions={{
cardStyle: { backgroundColor: 'transparent'},
cardOverlayEnabled: true,
}}
>
<Scan.Screen name='Camera' component={Camera}/>
<Scan.Screen name='ValidationModal' component= {Validation} />
</Scan.Navigator>
)
}
Camera = ({navigation}) => {
const [flash, setFlash] = React.useState(false)
const isFocused = useIsFocused();
const flashOn = ()=> {
setFlash(prevFlash => !prevFlash)
}
barcodeRecognized = ({ barcodes }) => {
barcodes.forEach(barcode => {
const kappa = JSON.parse(barcode.data)
navigation.navigate('ValidationMdal')
})
};
if(isFocused){
return(
<RNCamera
ref={ref => {
this.camera = ref;
}}
type={RNCamera.Constants.Type.back}
captureAudio={false}
flashMode={flash?RNCamera.Constants.FlashMode.torch:RNCamera.Constants.FlashMode.off}
androidCameraPermissionOptions={{
title: 'Permission to use camera',
message: 'We need your permission to use your camera',
buttonPositive: 'Ok',
buttonNegative: 'Cancel',
}}
style={{ flex: 1,width: '100%'}}
onGoogleVisionBarcodesDetected={this.barcodeRecognized}>
<TouchableHighlight style={{position:'absolute', top:10, right:10, borderRadius:50, zIndex:100, backgroundColor:'rgba(255,255,255,0.7)'}} onPress={flashOn} >
<Image source={flash?require("../../images/_Active.png"):require("../../images/_Idle.png")} />
</TouchableHighlight>
</RNCamera>
)
}else if(!isFocused){
return null
}
}
Validation = ({navigation}) =>{
return(
<View style={styles.container}>
<Image style={{flex:1}} source={require('../../green-tick.png')} resizeMode={'contain'} />
<TouchableHighlight style={} title='Dismiss' onPress={()=>navigation.goBack()}>
<Text>OK</Text>
</TouchableHighlight>
</View>
)
}
I am currently using React Native Audio to record audio and storing the sound. I am wondering if there is anyway to get the progress of this recording in realtime to map the microphone input to an animation, to give a visual feedback that the microphone is working.
Currently, the onProgress() function of the package only sends, from what I've found, only the current timecode.
Thanks for any help!
I am currently working on somthing similer.
That how i done it.
import React, { Component } from 'react';
import {
Platform,
StyleSheet,
Text,
View,
TouchableOpacity,
Button,
LayoutAnimation,
Image,
ScrollView,
Animated
} from 'react-native';
export default class App extends Component {
state = {
isPressed: false,
animated: new Animated.Value(0),
opacityA: new Animated.Value(1),
}
constructor(props) {
super(props);
this._onPress = this._onPress.bind(this);
}
_runAnimation() {
const { animated, opacityA } = this.state;
Animated.loop(
Animated.parallel([
Animated.timing(animated, {
toValue: 1,
duration: 1000,
}),
Animated.timing(opacityA, {
toValue: 0,
duration: 1000,
})
])
).start();
}
_stopAnimation() {
Animated.loop(
Animated.parallel([
Animated.timing(animated),
Animated.timing(opacityA)
])
).stop();
}
_onPress() {
this.setState(
state => ({ isPressed: !state.isPressed }),
)
}
_micButton() {
const { isPressed, animated, opacityA, } = this.state;
if (isPressed) {
//some function
this._runAnimation();
return (
<Animated.View style={{
width: 100,
height: 100,
borderRadius: 50,
backgroundColor: 'rgba(153,0,0,0.4)',
opacity: opacityA,
transform: [
{
scale: animated
}
]
}}>
{/* icon or image */}
</Animated.View>
);
} else {
//some function
return (
<View style={{
width: 100,
height: 100,
borderRadius: 50,
backgroundColor: 'rgba(153,0,0,0.4)',
}}>
{/* icon or image */}
</View>
);
}
}
render() {
return (
<View style={styles.container}>
<TouchableOpacity onPress={this._onPress}>
{this._micButton()}
</TouchableOpacity>
</View>
);
}
}
const styles = StyleSheet.create({
container: {
flex: 1,
justifyContent: 'center',
alignItems: 'center',
backgroundColor: '#F5FCFF',
},
});
i hope that solve your problem.
I use expo-av. There is an API setOnRecordingStatusUpdate, you can set a function that is called regularly with the status of the Recording. For example,
async startRecording(callback: onRecordingStatusUpdated) {
this.isLoading = true
await Audio.setAudioModeAsync({
allowsRecordingIOS: true,
interruptionModeIOS: Audio.INTERRUPTION_MODE_IOS_DO_NOT_MIX,
playsInSilentModeIOS: true,
shouldDuckAndroid: true,
interruptionModeAndroid: Audio.INTERRUPTION_MODE_ANDROID_DO_NOT_MIX,
playThroughEarpieceAndroid: true
});
if (this.recording !== null) {
// only one recorder is allowed to exist
this.recording.setOnRecordingStatusUpdate(null);
this.recording = null;
}
const recording = new Audio.Recording();
await recording.prepareToRecordAsync(this.recordingSettings);
// ✨✨✨set the callback
recording.setOnRecordingStatusUpdate(callback);
this.recording = recording;
await this.recording.startAsync(); // Will call this._updateScreenForRecordingStatus to update the screen.
this.isLoading = false
}
// 🌟🌟🌟
startRecording((status => console.log('[onRecording]', status)))