I am getting this type error and I cannot figure out what is wrong code below:
use std::{collections::{HashMap, btree_map::Values}, any::TypeId};
#[macro_use]
use lazy_static::lazy_static;
pub fn constraint_eq(x: &f32, y: &f32) -> f32 {
(x-y).abs()
}
pub fn constraint_neq(x: &f32, y: &f32) -> f32 {
match x != y {
true => 0.0,
false => (x-y).abs()
}
}
pub fn constraint_lt(x: &f32, y: &f32) -> f32 {
match x < y {
true => 0.0,
false => (x-y).abs()
}
}
pub fn constraint_gt(x: &f32, y: &f32) -> f32 {
match x > y {
true => 0.0,
false => (x-y).abs()
}
}
type ConstraintFn = fn(&f32, &f32) -> f32;
lazy_static! {//
pub static ref OPERATORS: HashMap<String, fn(&f32, &f32) -> f32> = {
let mut m = HashMap::new();
m.insert("==".to_string(), constraint_eq as ConstraintFn);
m.insert("!=".to_string(), constraint_neq as ConstraintFn);
m.insert("<".to_string(), constraint_lt as ConstraintFn);
m.insert(">".to_string(), constraint_gt as ConstraintFn);
m
};
}
pub struct Constraint{
// EQUALS: &'static str = "==0"
//
EQUALS_ZERO: &'static str,
LESS_THAN_ZERO: &'static str ,
GREATER_THAN_ZERO: &'static str,
op: &'static str,
// function: fn(&f32) -> f32,
function: Box<dyn Fn(&'a f32) -> f32>
}
impl Constraint{
pub fn new(op: &'static str, value: Option<f32>) -> Constraint {
let EQUALS_ZERO: &'static str = "==0";
let LESS_THAN_ZERO: &'static str = "<0";
let GREATER_THAN_ZERO: &'static str = ">0";
// let op: &'static str;
if value.is_some() {
let op = &format!("{}{}", &op, &value.unwrap().to_string());
let function = Box::new(move |x:f32| OPERATORS[op](&x, &value.unwrap()));
return Constraint {EQUALS_ZERO, LESS_THAN_ZERO, GREATER_THAN_ZERO, function, op}
} else {
let op = &format!("{}{}", &op, &0.0.to_string());
let function = Box::new(move |x:f32| OPERATORS[op](&x, &value.unwrap()));
return Constraint {EQUALS_ZERO, LESS_THAN_ZERO, GREATER_THAN_ZERO, function, op}
}
}
pub fn call(&self, value: &f32) -> f32 {
(self.function)(value)
}
// Constraint {EQUALS_ZERO, LESS_THAN_ZERO, GREATER_THAN_ZERO, function, op}
}
I am trying to get a partial function of it becoming a closure, but sure how to fix this error.
the error that I am getting is :
| let function = Box::new(move |x:f32| OPERATORS[op](&x, &value.unwrap()));
| ------------ found signature defined here
101 |
102 | return Constraint {EQUALS_ZERO, LEQ_ZERO, GEQ_ZERO, LESS_THAN_ZERO, GREATER_THAN_ZERO, function, op}
| ^^^^^^^^ expected due to this
|
= note: expected closure signature `fn(&'static f32) -> _`
found closure signature `fn(f32) -> _`
= note: required for the cast from `[closure#src/constraint.rs:100:37: 100:49]` to the object type `dyn Fn(&'static f32) -> f32`
What am I doing wrong, and how can this be fixed? I
I tried incorporating lifetimes but I have been running into an error. I switched from fn(&f32)-> f32 to Box<dyn Fn(&'a f32) -> f32>
Related
Specifically, what is the correct way to initialize the CompositionTarget member in the following Rust Crate for Windows CoreApp code excerpt?
#[implement(Windows::ApplicationModel::Core::IFrameworkView)]
struct AppView {
target: CompositionTarget,
}
#[allow(non_snake_case)]
impl AppView {
fn new() -> Self { //
Self {
//target: std::ptr::null_mut(),
//target: CompositionTarget(0),
}
}
fn Initialize(&self, _: &Option<CoreApplicationView>) -> Result<()> {
Ok(())
}
fn Load(&self, _: &HSTRING) -> Result<()> {
Ok(())
}
fn Uninitialize(&self) -> Result<()> {
Ok(())
}
fn Run(&self) -> Result<()> {
let window = CoreWindow::GetForCurrentThread()?;
window.Activate()?;
let dispatcher = window.Dispatcher()?;
dispatcher.ProcessEvents(CoreProcessEventsOption::ProcessUntilQuit)?;
Ok(())
}
fn SetWindow(&self, _: &Option<CoreWindow>) -> Result<()> {
let compositor = Compositor::new()?;
let root = compositor.CreateContainerVisual()?;
//...
Ok(())
}
}
fn main() -> Result<()> {
unsafe {
CoInitializeEx(std::ptr::null_mut(), COINIT_MULTITHREADED)?;
}
let app: IFrameworkViewSource = App().into();
CoreApplication::Run(app)?;
Ok(())
}
Rust does not have null. Instead, you should use Option:
struct AppView {
target: Option<CompositionTarget>,
}
impl AppView {
fn new() -> Self {
Self {
target: None,
}
}
...
This works well since in the windows crate a CompositionTarget cannot be null, any function that would return a null value will return an Option::None or Err(Error::Ok) instead. See this issue.
I have a vector of struct and want to initialize it with all zeros.
struct MyStruct {
v1: u32,
v2: u64,
}
type MyVector = Vec<MyStruct>;
Cause the size of vector is already known, I can specify the capacity.
My first approach is as below,
impl Default for MyStruct {
fn default() -> Self {
Self {
v1: 0,
v2: 0,
}
}
}
fn init_my_vec() {
let size = 1000;
let mut my_vec: MyVector = Vec::with_capacity(size);
(0..size).for_each(|_| my_vec.push(MyStruct::default()))
}
As far as I know, the vector initialization with 0 is faster than using iterator. like this,
let usize_vec: Vec<usize> = vec![0; 1000];
// is faster than
let mut usize_vec: Vec<usize> = Vec::with_capacity(1000);
for i in 0..1000 {
usize_vec.push(0);
}
Question
Am I right about vector initialization speed? As fill with 0 is special instruction, using iterator is slower than using macro.
Is there any method that can initialize the vector of struct with 0 values safely and fast?
Or I should use unsafe code like making empty bytes and casting it to vector?
Speed measurement about Question 1
const VEC_SIZE: usize = 10_000;
fn init_with_iter() -> u128 {
let start = Instant::now();
let mut usize_vec: Vec<usize> = Vec::with_capacity(VEC_SIZE);
for i in 0..VEC_SIZE {
usize_vec.push(0);
}
start.elapsed().as_micros()
}
fn init_with_macro() -> u128 {
let start = Instant::now();
let _: Vec<usize> = vec![0; VEC_SIZE];
start.elapsed().as_micros()
}
Average time taken to generate vector 10,000 times is
using iter(init_with_iter): 514.6805 ms
using macro(init_with_macro): 2.0361 ms
on my machine
Speed measurement about Question 3
I think using unsafe function mem::zeroed is slightly faster than any others
const VEC_SIZE: usize = 10_000;
fn init_with_iter() -> u128 {
let start = Instant::now();
let mut my_vec: MyVector = Vec::with_capacity(VEC_SIZE);
for _ in 0..VEC_SIZE {
my_vec.push(MyStruct::default());
}
start.elapsed().as_micros()
}
fn init_with_macro() -> u128 {
let start = Instant::now();
let _: MyVector = vec![MyStruct::default(); VEC_SIZE];
start.elapsed().as_micros()
}
fn init_with_zeroed() -> u128 {
let start = Instant::now();
let _: MyVector = unsafe { vec![std::mem::zeroed(); VEC_SIZE] };
start.elapsed().as_micros()
}
Average time taken to generate vector 1,000 times is
using iter(init_with_iter): 575.572 ms.
using macro(init_with_macro): 486.958 ms
using unsafe function(init_with_zeroed): 468.885 ms
on my machine
Here is a criterion benchmark of your three approaches:
use criterion::{black_box, criterion_group, criterion_main, Criterion};
criterion_group!(
benches,
init_structs_with_iter,
init_structs_with_macro,
init_structs_with_unsafe
);
criterion_main!(benches);
const N_ITEMS: usize = 1000;
#[allow(unused)]
#[derive(Debug, Clone)]
struct MyStruct {
v1: u32,
v2: u64,
}
impl Default for MyStruct {
fn default() -> Self {
Self { v1: 0, v2: 0 }
}
}
fn init_structs_with_iter(c: &mut Criterion) {
c.bench_function("structs: with_iter", |b| {
b.iter(|| {
let mut my_vec = Vec::with_capacity(N_ITEMS);
(0..my_vec.capacity()).for_each(|_| my_vec.push(MyStruct::default()));
black_box(my_vec);
})
});
}
fn init_structs_with_macro(c: &mut Criterion) {
c.bench_function("structs: with_macro", |b| {
b.iter(|| {
let my_vec = vec![MyStruct::default(); N_ITEMS];
black_box(my_vec);
})
});
}
fn init_structs_with_unsafe(c: &mut Criterion) {
c.bench_function("structs: with_unsafe", |b| {
b.iter(|| {
let my_vec: Vec<MyStruct> = vec![unsafe { std::mem::zeroed() }; N_ITEMS];
black_box(my_vec);
})
});
}
And the results:
structs: with_iter time: [1.3857 us 1.3960 us 1.4073 us]
structs: with_macro time: [563.30 ns 565.30 ns 567.32 ns]
structs: with_unsafe time: [568.84 ns 570.09 ns 571.49 ns]
The vec![] macro seems to be the fastest (and also the cleanest and easiest to read).
As you can see, the time is measured in nanoseconds, so although the iterator version is 2-3x slower, it won't matter in practice. Optimizing the zero-initialization of a struct is the least important thing you can do - you can save at most 1 microsecond ;)
PS: those times include the memory allocation and deallocation times
I wish that enums in Rust can be used like Haskell's productive type. I want to
access a field's value directly
assign a field's value directly or make a clone with the changing value.
Directly means that not using too long pattern matching code, but just could access like let a_size = a.size.
In Haskell:
data TypeAB = A {size::Int, name::String} | B {size::Int, switch::Bool} deriving Show
main = do
let a = A 1 "abc"
let b = B 1 True
print (size a) -- could access a field's value directly
print (name a) -- could access a field's value directly
print (switch b) -- could access a field's value directly
let aa = a{size=2} -- could make a clone directly with the changing value
print aa
I tried two styles of Rust enum definition like
Style A:
#[derive(Debug)]
enum EntryType {
A(TypeA),
B(TypeB),
}
#[derive(Debug)]
struct TypeA {
size: u32,
name: String,
}
#[derive(Debug)]
struct TypeB {
size: u32,
switch: bool,
}
fn main() {
let mut ta = TypeA {
size: 3,
name: "TAB".to_string(),
};
println!("{:?}", &ta);
ta.size = 2;
ta.name = "TCD".to_string();
println!("{:?}", &ta);
let mut ea = EntryType::A(TypeA {
size: 1,
name: "abc".to_string(),
});
let mut eb = EntryType::B(TypeB {
size: 1,
switch: true,
});
let vec_ab = vec![&ea, &eb];
println!("{:?}", &ea);
println!("{:?}", &eb);
println!("{:?}", &vec_ab);
// Want to do like `ta.size = 2` for ea
// Want to do like `ta.name = "bcd".to_string()` for ea
// Want to do like `tb.switch = false` for eb
// ????
println!("{:?}", &ea);
println!("{:?}", &eb);
println!("{:?}", &vec_ab);
}
Style B:
#[derive(Debug)]
enum TypeCD {
TypeC { size: u32, name: String },
TypeD { size: u32, switch: bool },
}
fn main() {
// NOTE: Rust requires representative struct name before each constructor
// TODO: Check constructor name can be duplicated
let mut c = TypeCD::TypeC {
size: 1,
name: "abc".to_string(),
};
let mut d = TypeCD::TypeD {
size: 1,
switch: true,
};
let vec_cd = vec![&c, &d];
println!("{:?}", &c);
println!("{:?}", &d);
println!("{:?}", &vec_cd);
// Can't access a field's value like
// let c_size = c.size
let c_size = c.size; // [ERROR]: No field `size` on `TypeCD`
let c_name = c.name; // [ERROR]: No field `name` on `TypeCD`
let d_switch = d.switch; // [ERROR]: No field `switch` on `TypeCD`
// Can't change a field's value like
// c.size = 2;
// c.name = "cde".to_string();
// d.switch = false;
println!("{:?}", &c);
println!("{:?}", &d);
println!("{:?}", &vec_cd);
}
I couldn't access/assign values directly in any style. Do I have to implement functions or a trait just to access a field's value? Is there some way of deriving things to help this situation?
What about style C:
#[derive(Debug)]
enum Color {
Green { name: String },
Blue { switch: bool },
}
#[derive(Debug)]
struct Something {
size: u32,
color: Color,
}
fn main() {
let c = Something {
size: 1,
color: Color::Green {
name: "green".to_string(),
},
};
let d = Something {
size: 2,
color: Color::Blue { switch: true },
};
let vec_cd = vec![&c, &d];
println!("{:?}", &c);
println!("{:?}", &d);
println!("{:?}", &vec_cd);
let _ = c.size;
}
If all variant have something in common, why separate them?
Of course, I need to access not common field too.
This would imply that Rust should define what to do when the actual type at runtime doesn't contain the field you required. So, I don't think Rust would add this one day.
You could do it yourself. It will require some lines of code, but that matches the behavior of your Haskell code. However, I don't think this is the best thing to do. Haskell is Haskell, I think you should code in Rust and not try to code Haskell by using Rust. That a general rule, some feature of Rust come directly from Haskell, but what you want here is very odd in my opinion for Rust code.
#[derive(Debug)]
enum Something {
A { size: u32, name: String },
B { size: u32, switch: bool },
}
impl Something {
fn size(&self) -> u32 {
match self {
Something::A { size, .. } => *size,
Something::B { size, .. } => *size,
}
}
fn name(&self) -> &String {
match self {
Something::A { name, .. } => name,
Something::B { .. } => panic!("Something::B doesn't have name field"),
}
}
fn switch(&self) -> bool {
match self {
Something::A { .. } => panic!("Something::A doesn't have switch field"),
Something::B { switch, .. } => *switch,
}
}
fn new_size(&self, size: u32) -> Something {
match self {
Something::A { name, .. } => Something::A {
size,
name: name.clone(),
},
Something::B { switch, .. } => Something::B {
size,
switch: *switch,
},
}
}
// etc...
}
fn main() {
let a = Something::A {
size: 1,
name: "Rust is not haskell".to_string(),
};
println!("{:?}", a.size());
println!("{:?}", a.name());
let b = Something::B {
size: 1,
switch: true,
};
println!("{:?}", b.switch());
let aa = a.new_size(2);
println!("{:?}", aa);
}
I think there is currently no built-in way of accessing size directly on the enum type. Until then, enum_dispatch or a macro-based solution may help you.
So I've been trying to implement a library for vector and matrix maths, and I created some functions that worked alright but wanted to generalize for all number primitives and add the functionality into the normal operators.
My thought was that I'd create a container for a Vec<T>, that can contain either number types (like i32) or another container for Vec, so that matrices where possible. Ergo:
#[derive(Clone, Debug)]
struct Mat<T>(Vec<T>);
Then, to add together two vecs of any number I implement Add as:
impl<'a, T> Add for &'a Mat<T>
where T: PartialEq + PartialOrd + Add<T> + Sub<T> + Mul<T> + Div<T> + Rem<T> + Clone {
type Output = Option<Mat<<T as std::ops::Add>::Output>>;
fn add(self, other: &Mat<T>) -> Self::Output {
let a: &Vec<T> = self.pop();
let b: &Vec<T> = other.pop();
match a.len() == b.len() {
true => {
let mut retvec: Vec<<T as std::ops::Add>::Output> = Vec::new();
for i in 0..a.len() {
retvec.push(a[i].clone() + b[i].clone());
}
Some(Mat(retvec))
},
false => None
}
}
}
Edit: To further clarify, Mat::pop() is just the unwrap function, though probably poorly named.
The basic scenario of adding together two vectors of any number seems to work.
#[test]
fn add_override_vectors() {
let vec: Mat<i32> = Mat(vec![2, 2, 2]);
let newvec = &vec + &vec;
assert_eq!(*newvec.unwrap().pop(), vec![4,4,4]);
}
But matrices are giving me a headache. For them, the add function looks very similar, except for the let Some(x) statement:
impl<'a, T> Add for &'a Mat<Mat<T>>
where T: Add<&'a Mat<T>>{
type Output = Option<Mat<T>>;
fn add(self, other: &Mat<Mat<T>>) -> Self::Output {
let a: &Vec<Mat<T>> = self.pop();
let b: &Vec<Mat<T>> = other.pop();
match a.len() == b.len() {
true => {
let mut retvec: Vec<T> = Vec::new();
for i in 0..a.len() {
if let Some(x) = &a[i] + &b[i] {
retvec.push(x);
}
}
Some(Mat(retvec))
},
false => None
}
}
}
The error message I get is:
error[E0369]: binary operation `+` cannot be applied to type `&Mat<T>`
--> src\main.rs:46:38
|
46 | if let Some(x) = &a[i] + &b[i] {
| ^^^^^^^^^^^^^
|
= note: an implementation of `std::ops::Add` might be missing for `&Mat<T>`
So the compiler says that Add might not be implemented for &Mat<T>, but I thought that I've specified the bound so that it has that requirement in where T: Add<&'a Mat<T>. To me it seems that whatever is in &a[i] should have the Add trait implemented. What am I doing wrong here?
Just as extra clarification, my idea is that Add for &'a Mat<Mat<T>> should be able to be called recursively until it boils down to the Vec with an actual number type in it. Then the Add for &'a Mat<T> should be called.
There are two problems: the wrong associated Output type and the type of retvec
Something like that should work:
impl<'a, T> Add for &'a Mat<Mat<T>>
where
T: PartialEq + PartialOrd + Add<T> + Clone,
{
type Output = Option<Mat<Mat<<T as std::ops::Add>::Output>>>;
fn add(self, other: &Mat<Mat<T>>) -> Self::Output {
let a: &Vec<Mat<T>> = self.pop();
let b: &Vec<Mat<T>> = other.pop();
match a.len() == b.len() {
true => {
let mut retvec: Vec<Mat<<T as std::ops::Add>::Output>> = Vec::new();
for i in 0..a.len() {
if let Some(x) = &a[i] + &b[i] {
retvec.push(x);
}
}
Some(Mat(retvec))
}
false => None,
}
}
}
A part the compilation issue I think it is not correct to implement a trait for a "recursive" struct
like Mat<Mat<T>>, if you think X as type X = Mat<T> then the impl for Mat<T> suffices:
impl<'a, T> Add for &'a Mat<T>
where
T: PartialEq + PartialOrd + Add<T> + Clone
with the additional impl for Mat<T> values:
impl<T> Add for Mat<T>
where
T: PartialEq + PartialOrd + Add<T> + Clone
Below I post a full working code, please note that the Output type is no more an Option<Mat<T>> but a plain Mat<T> object:
this avoids a lot of headaches and probably it is conceptually wrong if you want to impl some type of algebra.
use std::ops::*;
use std::vec::Vec;
#[derive(Clone, Debug, PartialEq, PartialOrd)]
struct Mat<T>(Vec<T>);
impl<T> Mat<T> {
fn pop(&self) -> &Vec<T> {
&self.0
}
}
impl<T> Add for Mat<T>
where
T: PartialEq + PartialOrd + Add<T> + Clone,
{
type Output = Mat<<T as std::ops::Add>::Output>;
fn add(self, other: Mat<T>) -> Self::Output {
let a: &Vec<T> = self.pop();
let b: &Vec<T> = other.pop();
match a.len() == b.len() {
true => {
let mut retvec: Vec<<T as std::ops::Add>::Output> = Vec::new();
for i in 0..a.len() {
retvec.push(a[i].clone() + b[i].clone());
}
Mat(retvec)
}
false => Mat(Vec::new()),
}
}
}
impl<'a, T> Add for &'a Mat<T>
where
T: PartialEq + PartialOrd + Add<T> + Clone,
{
type Output = Mat<<T as std::ops::Add>::Output>;
fn add(self, other: &Mat<T>) -> Self::Output {
let a: &Vec<T> = self.pop();
let b: &Vec<T> = other.pop();
match a.len() == b.len() {
true => {
let mut retvec: Vec<<T as std::ops::Add>::Output> = Vec::new();
for i in 0..a.len() {
retvec.push(a[i].clone() + b[i].clone());
}
Mat(retvec)
}
false => Mat(Vec::new()),
}
}
}
#[test]
fn add_override_vectors() {
let vec: Mat<Mat<i32>> = Mat(vec![Mat(vec![2, 2, 2]), Mat(vec![3, 3, 3])]);
let newvec = &vec + &vec;
assert_eq!(*newvec.pop(), vec![Mat(vec![4, 4, 4]), Mat(vec![6, 6, 6])]);
}
#[test]
fn add_wrong_vectors() {
let vec1: Mat<Mat<i32>> = Mat(vec![Mat(vec![2, 2, 2]), Mat(vec![4, 4, 4])]);
let vec2: Mat<Mat<i32>> = Mat(vec![Mat(vec![3, 3, 3]), Mat(vec![3, 3])]);
let newvec = &vec1 + &vec2;
assert_eq!(*newvec.pop(), vec![Mat(vec![5, 5, 5]), Mat(vec![])]);
}
fn main() {
let vec: Mat<Mat<i32>> = Mat(vec![Mat(vec![1, 2, 2]), Mat(vec![3, 3, 3])]);
let newvec = &vec + &vec;
println!("Hello, world!: {:?}", newvec);
}
PS: Your Mat<T> type is not a matrix in the classical sense, perhaps another name should be more appropriate to avoid confusion.
My enum looks like this:
#[derive(Clone, Debug)]
pub enum Type {
GLnull,
GLenum(GLenum),
GLboolean(GLboolean),
GLint(GLint),
GLbyte(GLbyte),
GLshort(GLshort),
GLclampx(GLclampx),
GLubyte(GLubyte),
GLushort(GLushort),
GLuint(GLuint),
GLsizei(GLsizei),
GLclampf(GLclampf),
GLdouble(GLdouble),
GLclampd(GLclampd),
GLfloat_4fv((GLfloat, GLfloat, GLfloat, GLfloat)),
GLfloat(GLfloat),
GLintptr(GLintptr),
GLsizeiptr(GLsizeiptr),
GLbitfield(GLbitfield),
GLchar_ptr(String),
}
macro_rules! get{
($e:expr) => {
match $e {
Type::GLsizei(x) => { x }
Type::GLbitfield(x) => { x }
_ => { 0 }
}
}
}
Now how do I create a macro that gets the value of the enum type?
Like #aochagavia say there is no point to have a macro if you must do specific stuff with your enum.
The following macro could help you, the purpose is to have a macro that create a enum and generate some method. This only work if all variant have one type.
macro_rules! foo {
($($(#[$meta:meta])* foo $name:ident($ty:ty),)*) => {
#[derive(Debug, Clone)]
pub enum Foo {
$($(#[$meta])* $name($ty),)*
}
impl Foo {
pub fn display(&self) {
match *self {
$(Foo::$name(x) => println!("{}", x),)*
}
}
}
}
}
foo! {
foo A(i32),
foo B(i64),
}
fn main() {
let a = Foo::A(32);
let b = Foo::B(64);
a.display();
b.display();
}
The original macro is from #koka-el-kiwi, I take it as an example and modification for your case.
The following method is also available
pub enum Type<T> {
gli32(T),
gli64(T),
glfloat4fv(T),
glString(T),
glVec(T),
}
impl<T> Type<T> {
pub fn unwarp(&self) -> &T {
match *self {
Type::gli32(ref x) => x,
Type::gli64(ref x) => x,
Type::glfloat4fv(ref x) => x,
Type::glString(ref x) => x,
Type::glVec(ref x) => x,
}
}
}
fn main() {
println!("Hello, world!");
let f = Type::gli32(32 as i32);
let ff64 = Type::gli64((64, 32));
let f4fv = Type::glfloat4fv((0.1, 0.2, 0.0));
let cstr = Type::glString(CString::new("glstring").unwrap());
let ve = [1, 2, 3, 5];
let glve = Type::glVec(ve);
println!("f ={} {:?} {:?} {:?}",
f.unwarp(),
f4fv.unwarp(),
cstr.unwarp(),
glve.unwarp());
}