i am trying to import a backdrop for my application and using a image as that backdrop, but cant seem to get it to work. Im using this example on loading images in; https://github.com/emilk/egui/blob/c69fe941afdea5ef6f3f84ed063554500b6262e8/eframe/examples/image.rs
Here is my code:
use eframe::{run_native, epi::App, egui, NativeOptions};
use image::GenericImageView;
struct Tasks{
texture: Option<(egui::Vec2, egui::TextureId)>,
}
impl App for Tasks {
fn name(&self) -> &str {
"CheckIt"
}
fn update(&mut self,ctx: &eframe::egui::CtxRef,frame: &mut eframe::epi::Frame<'_>) {
//background color
let frame = egui::containers::Frame {
fill: egui::Color32::from_rgb(241, 233, 218),
..Default::default()
};
if self.texture.is_none() {
// Load the image:
let image_data = include_bytes!("date_backdrop.png");
let image = image::load_from_memory(image_data).expect("Failed to load image");
let image_buffer = image.to_rgba8();
let size = (image.width() as usize, image.height() as usize);
let pixels = image_buffer.into_vec();
assert_eq!(size.0 * size.1 * 4, pixels.len());
let pixels: Vec<_> = pixels
.chunks_exact(4)
.map(|p| egui::Color32::from_rgba_unmultiplied(p[0], p[1], p[2], p[3]))
.collect();
// Allocate a texture:
let texture = ctx.tex_allocator().alloc_srgba_premultiplied(size, &pixels);
let size = egui::Vec2::new(size.0 as f32, size.1 as f32);
self.texture = Some((size, texture));
}
//main window
egui::CentralPanel::default().frame(frame).show(ctx, |ui| {
if let Some((size, texture)) = self.texture {
ui.heading("This is an image:");
ui.image(texture, size);
ui.heading("This is an image you can click:");
ui.add(egui::ImageButton::new(texture, size));
}
});
}
}
fn main(){
let app: Tasks = Tasks{texture: None};
let win_options = eframe::NativeOptions{
initial_window_size: Some(egui::Vec2::new(386.0, 636.0)),
always_on_top: true,
resizable: false,
..Default::default()
};
run_native(Box::new(app), win_options);
}
im getting an error on this line of code
let texture = ctx.tex_allocator().alloc_srgba_premultiplied(size, &pixels);
here is the error message: no method named tex_allocator found for reference &CtxRef in the current scope
method not found in &CtxRef
im still new to rust and building ui with it.
I have tried asking ChatGPT and googleing the problem but to no success
Related
using this github repo as a reference: https://github.com/emilk/egui/blob/master/examples/retained_image/src/main.rs
Im trying to load an image into my frame using the egui_extras::RetainedImage, but it is giving me an error that the function RetainedImage::from_image_bytes cannot be found in RetainedImage.
I have also checked image.rs class to make sure that the function is even there, which it is.
here is my code:
use eframe::{run_native, epi::App, egui, NativeOptions};
use egui_extras::RetainedImage;
struct InitView {
image: RetainedImage,
tint: egui::Color32,
}
impl Default for InitView {
fn default() -> Self {
Self {
image: RetainedImage::from_image_bytes(
"date_backdrop.png",
include_bytes!("date_backdrop.png"),
)
.unwrap(),
tint: egui::Color32::from_rgb(255, 0, 255),
}
}
}
impl App for InitView {
fn name(&self) -> &str {
"CheckIt"
}
fn update(&mut self,ctx: &eframe::egui::CtxRef,frame: &mut eframe::epi::Frame<'_>) {
//background color
let frame = egui::containers::Frame {
fill: egui::Color32::from_rgb(241, 233, 218),
..Default::default()
};
//main window
egui::CentralPanel::default().frame(frame).show(ctx, |ui| {
ui.label("test");
});
}
}
fn main(){
let app: InitView = InitView{..Default::default()};
let win_options = eframe::NativeOptions{
initial_window_size: Some(egui::Vec2::new(386.0, 636.0)),
always_on_top: true,
resizable: false,
..Default::default()
};
run_native(Box::new(app), win_options);
}
what im i doing wrong? im still new to rust
You need to add the image feature.
Edit your Cargo.toml and replace egui_extras with egui_extras = { version = "0.20.0", features = ["image"] } or run cargo add egui_extras -F "image" in your project root directory.
I have a working solution to filtering out an input vec of strings compared to a vector of a struct. However, my code seems complicated and I tried simplify the code using a iter::filter(https://doc.rust-lang.org/stable/std/iter/struct.Filter.html). This caused issues because the iterator gave back values that were references and could not be directly used. It seems like my understanding of the iter and what can be done in a structs vector needs refreshing. Below is the simplified filtering code that works:
#[derive(Debug)]
pub struct Widget {
name: String,
pin: u16,
}
impl Widget{
pub fn new(widget_name: String, widget_pin: String) -> Widget {
let widget_pin_u16 = widget_pin.parse::<u16>().expect("Unable to parse");
let nw = Widget {
name: widget_name,
pin: widget_pin_u16
};
return nw
}
}
pub struct WidgetHolder {
widgets: Vec<Widget>,
widget_holder_name: String
}
impl WidgetHolder {
fn add_widgets(&mut self, valid_widgets_found: Vec<String>) {
let mut widgets_to_add: Vec<String> = Vec::new();
for widget in valid_widgets_found {
// The string musy be compared to pin field, so we're converting
let widget_offset = widget
.clone()
.parse::<u16>()
.expect("Unable to parse widget base into int.");
// If it doesnt exist in our widgetHolder widgets vector, then lets add it.
let mut widget_exists = false;
for existing_widget in &self.widgets {
if widget_offset == existing_widget.pin {
widget_exists = true;
break;
}
}
if !widget_exists {
widgets_to_add.push(widget.clone());
}
}
if widgets_to_add.is_empty() {
return;
}
for widget in widgets_to_add {
let loaded_widget = Widget::new(self.widget_holder_name.clone(), widget);
self.widgets.push(loaded_widget);
}
}
}
pub fn main() {
let init_vec = Vec::new();
let mut wh = WidgetHolder {
widgets: init_vec,
widget_holder_name: "MyWidget".to_string()
};
let vec1 = vec!["1".to_string(), "2".to_string(), "3".to_string()];
wh.add_widgets(vec1);
println!("{:?}", wh.widgets);
let vec2 = vec!["2".to_string(), "3".to_string(), "4".to_string()];
wh.add_widgets(vec2);
println!("{:?}", wh.widgets);
}
Is there a way I can clean up this code without having to use so many data structures and loops? The filter api looks clean but does it work with a vector inside of a struct that I am trying to mutate(append to it)?
EDIT
After trying to get a stack trace, I actually got the filter to work...
fn add_widgets(&mut self, valid_widgets_found: Vec<String>) {
let widgets_to_add: Vec<String> = valid_widgets_found.into_iter()
.filter(|widget_pin| {
let widget_offset = widget_pin.clone().parse::<u16>().expect("Unable to parse widget base into int.");
let mut widget_exists = false;
for existing_widget in &self.widgets {
if widget_offset == existing_widget.pin {
widget_exists = true;
break;
}
}
!widget_exists
})
.collect();
if widgets_to_add.is_empty() {
return;
}
for widget in widgets_to_add {
let loaded_widget = Widget::new(self.widget_holder_name.clone(), widget);
self.widgets.push(loaded_widget);
}
}
I figured out the answer. Seemed like a syntax error when I initially tried it. For anyone who's looking for a filter example in the future:
fn add_widgets(&mut self, valid_widgets_found: Vec<String>) {
let widgets_to_add: Vec<String> = valid_widgets_found.into_iter()
.filter(|widget_pin| {
let widget_offset = widget_pin.clone().parse::<u16>().expect("Unable to parse widget base into int.");
let mut widget_exists = false;
for existing_widget in &self.widgets {
if widget_offset == existing_widget.pin {
widget_exists = true;
break;
}
}
!widget_exists
})
.collect();
if widgets_to_add.is_empty() {
return;
}
for widget in widgets_to_add {
let loaded_widget = Widget::new(self.widget_holder_name.clone(), widget);
self.widgets.push(loaded_widget);
}
}
I am utilising a cargo lib called image = 0.23.14 where, I am trying to overlay the image on top of each other.
On their repository, there is an example where you can concat the images side by side.
use image::{
GenericImage,
GenericImageView,
ImageBuffer,
Pixel,
Primitive
};
fn h_concat<I, P, S>(images: &[I]) -> ImageBuffer<P, Vec<S>>
where
I: GenericImageView<Pixel = P>,
P: Pixel<Subpixel = S> + 'static,
S: Primitive + 'static {
let mut imgbuf = image::ImageBuffer::new(100, 100);
for img in images {
imgbuf.copy_from(img, 0, 0).unwrap();
}
imgbug
}
fn main() -> Result<()> {
h_concat(&[
image::open("images/img1.png").unwrap(),
image::open("images/img2.png").unwrap(),
]).save("random.png").unwrap();
Ok(())
}
I am wondering what if I want to append more files together.
Okay, after some fiddling and doing a bit more research on the documentation. I did found that there is a method image::imageops::overlay which solves my problem.
use image::{DynamicImage, imageops};
fn h_concat(mut base: DynamicImage, imgs: &[DynamicImage]) -> DynamicImage {
for img in imgs {
imageops::overlay(&mut base, img, 0, 0);
}
base
}
fn main() -> Result<()> {
let base = image::open("images/img1.png").unwrap();
h_concat(base, &[
image::open("images/img2.png").unwrap()
]).save("random.png").unwrap();
}
I'm kinda stuck. I have a program abstraction to locate uniforms for me. I have a fragment shader with a float uniform elapsed, and a vertex shader with a mat4 uniform projection.
The helper function is defined as followed:
pub fn find_uniform(&mut self, name: &str) -> Result<(), UniformError> {
if let Some(_) = self.uniforms.get(name) {
return Ok(());
}
let target_location = unsafe {
let location = gl::GetUniformLocation(self.program_id, name.as_bytes().as_ptr() as *const i8);
let error = gl::GetError();
if error != gl::NO_ERROR {
return Err(UniformError::new(true, error));
}
location
};
if target_location < 0 {
return Err(UniformError::new(false, UNIFORM_NOT_FOUND));
}
self.uniforms.insert(name.to_string(), target_location);
return Ok(());
}
If I use the helper function to look for only elapsed or if i look for both with native gl calls it works:
// No error
if let Err(e) = program.find_uniform("elapsed") {
eprint!("Failed to find elapsed, probably loading wrong shader. err: {}", e);
return;
};
// OR
unsafe {
let location1 = gl::GetUniformLocation(program.program_id, b"elapsed".as_ptr() as *const i8);
println!("{}", location1); // 0
let location2 = gl::GetUniformLocation(program.program_id, b"projection".as_ptr() as *const i8);
println!("{}", location2); // 1
}
But if I use my helper function for both it fails to find whatever i look for first:
if let Err(e) = program.find_uniform("elapsed") {
// Enters error branch here
eprint!("Failed to find elapsed, probably loading wrong shader. err: {}", e);
return;
};
if let Err(e) = program.find_uniform("projection") {
eprint!("Failed to find projection, probably loading wrong shader. err: {}", e);
return;
};
Does anyone have an idea of what I'm doing wrong?
Looking at your code, the conversion to name.as_bytes().as_ptr() as *const i8 is unsafe and depends on memory layout. Rust strings are not C-strings and are not null terminated by default. Use std::ffi::CString::new(); to ensure the string null-terminated. So your current program depends purely if you get lucky in having a null byte after your byte strings.
The following code should work:
let target_location = unsafe {
use std::ffi::CString;
let c_name = CString::new(name).expect("Convert to c-string");
let location = gl::GetUniformLocation(self.program_id, c_name.as_ptr());
let error = gl::GetError();
if error != gl::NO_ERROR {
return Err(UniformError::new(true, error));
}
location
};
See also:
How to pass data to openGL functions correctly in Rust
This question already has an answer here:
Very slow framerate with AVFoundation and Metal in MacOS
(1 answer)
Closed 2 years ago.
I am working on a video filter for iOS and macOS, which captures video input from the default camera, applies a filter (MPSImageGaussianBlur), and renders it using MTKView.
It works fine on iOS (13 on iPhone 6s and iPhone 11), but I see just a red screen on MacOS (10.15 on MacBook Pro) and can't figure out why. The code calls captureOutput() and draw() repeatedly as expected.
This is VS2CameraSession, which performs the majority of work. (Please notice that I added kCVPixelBufferMetalCompatibilityKey flag to the videoSettings as recommended at CVMetalTextureCacheCreateTextureFromImage returns -6660 on macOS 10.13)
import AVFoundation
import MetalPerformanceShaders
class VS2CameraSession: NSObject {
let gpu = MTLCreateSystemDefaultDevice()!
private let session = AVCaptureSession()
private let camera = AVCaptureDevice.default(for: .video)
private var textureCache:CVMetalTextureCache?
private var texture:MTLTexture?
func startRunning() {
CVMetalTextureCacheCreate(nil, nil, gpu, nil, &textureCache)
guard let camera = camera,
let input = try? AVCaptureDeviceInput(device: camera) else {
return
}
guard session.canAddInput(input) else {
return
}
session.addInput(input)
let output = AVCaptureVideoDataOutput()
output.alwaysDiscardsLateVideoFrames = true
#if os(macOS)
// https://stackoverflow.com/questions/46549906/cvmetaltexturecachecreatetexturefromimage-returns-6660-on-macos-10-13
output.videoSettings = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA,
kCVPixelBufferMetalCompatibilityKey as String: true
]
#else
output.videoSettings = [
kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA
]
#endif
output.setSampleBufferDelegate(self, queue: DispatchQueue.main)
session.addOutput(output)
session.startRunning()
}
func draw(drawable:CAMetalDrawable?) {
guard let texture = self.texture,
let drawable = drawable,
let commandQueue = gpu.makeCommandQueue(),
let commandBuffer = commandQueue.makeCommandBuffer() else {
return
}
// Apply filter(s)
let filter = MPSImageGaussianBlur(device:gpu, sigma: 10.0)
filter.encode(commandBuffer: commandBuffer, sourceTexture: texture, destinationTexture: drawable.texture)
commandBuffer.present(drawable)
commandBuffer.commit()
self.texture = nil // no need to draw it again
}
}
extension VS2CameraSession : AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer),
let textureCache = self.textureCache {
let width = CVPixelBufferGetWidth(pixelBuffer)
let height = CVPixelBufferGetHeight(pixelBuffer)
var textureRef:CVMetalTexture?
CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, textureCache, pixelBuffer, nil,
.bgra8Unorm, width, height, 0, &textureRef)
texture = CVMetalTextureGetTexture(textureRef!)
}
}
}
This is VS2CameraViewController, which uses VS2CameraSession to render its view.
import UIKit
import SwiftUI
import MetalKit
final class VS2CameraViewController: UIViewController {
let cameraSession = VS2CameraSession()
override func loadView() {
let metalView = MTKView()
metalView.device = self.cameraSession.gpu
metalView.delegate = self
metalView.clearColor = MTLClearColorMake(1, 1, 1, 1)
metalView.colorPixelFormat = MTLPixelFormat.bgra8Unorm
metalView.framebufferOnly = false
self.view = metalView
}
override func viewDidLoad() {
cameraSession.startRunning()
}
}
extension VS2CameraViewController : MTKViewDelegate {
func mtkView(_ view: MTKView, drawableSizeWillChange size: CGSize) {
}
func draw(in view: MTKView) {
cameraSession.draw(drawable: view.currentDrawable)
}
}
extension VS2CameraViewController : UIViewControllerRepresentable {
typealias UIViewControllerType = VS2CameraViewController
public func makeUIViewController(context: UIViewControllerRepresentableContext<VS2CameraViewController>) -> VS2CameraViewController {
return VS2CameraViewController()
}
public func updateUIViewController(_ uiViewController: VS2CameraViewController, context: UIViewControllerRepresentableContext<VS2CameraViewController>) {
}
}
The entire source code is available at https://github.com/snakajima/VideoShader2/tree/stack_overflow.
I found an answer here.
Very slow framerate with AVFoundation and Metal in MacOS
I just need to retain the reference to the sampleBuffer along with the texture.