How could I verify if mouse was clicked over the gimage_1 area coordinates?
The gScreenSurface is the main surface where gimage_1 is drawn.
Basically I am stuck how to get the gimage_1 coordinates and verify with mouse clicked position.
Pseudocode:
SDL_Window* gWindow = NULL;
SDL_Surface* gScreenSurface = NULL;
SDL_Surface *gimage_1 = NULL;
SDL_Event e;
while( SDL_PollEvent( &e ) != 0 )
{
switch (e.type) {
case SDL_MOUSEBUTTONDOWN:
int x, y;
SDL_GetMouseState( &x, &y);
}
SDL_BlitSurface( gimage_1, NULL, gScreenSurface, NULL );
SDL_UpdateWindowSurface( gWindow );
SDL_Surface objects are images without position or area. You give them a position when you render them onto the screen via a SDL_Rect. In your example, gimage_1 does not have any coordinates. The SDL_Rect you use when rendering it does.
Related
I have been working on a project in which I have to select an area of image and then use CTRL+C and CTRL+V to copy and paste the selected area. I have tried creating a buffer using createGraphics() but did not work. I have also tried getting pixels one by one and creating image but that is not working either. Can somebody help? Below is my code.
if (this.isCtrlPressed == true && p5.key == 'v') {
let img = p5.createImage(this.dw, this.dh);
p5.draw = () => {
img.loadPixels();
for (let i = 0; i < parseInt(this.sw); i++) {
for (let j = 0; j < parseInt(this.sh); j++) {
img.set(i, j, p5.get(i, j));
}
}
img.updatePixels();
p5.stroke(255);
p5.image(img, 10, 10);
p5.noFill();
p5.rect(p5.mouseX, p5.mouseY, 10, 10);
return;
}
You're not too far off.
Bare in mind that you can also call get() with width, height arguments, not just the first two (x, y): this will make it easy to copy a section of the image.
The p5.Graphics you get using createGraphics() is handy indeed.
Remember to store the selection rectangle properties (x, y, width, height) to copy the image in the first place.
Here's a very rough sketch based on the CreateImage p5.js example:
/*
* #name Create Image
* #description The createImage() function provides a fresh buffer of pixels to
* play with. This example creates an image gradient.
*/
let img; // Declare variable 'img'.
// copy/paste selection rectangle
let selection = {x:0, y:0, w:0, h:0};
// p5.Image from main image using selection
let clipboard;
// where to paste the clipboard p5.Image
let pasteBuffer;
function setup() {
createCanvas(720, 400);
img = createImage(230, 230);
img.loadPixels();
for (let x = 0; x < img.width; x++) {
for (let y = 0; y < img.height; y++) {
let a = map(y, 0, img.height, 255, 0);
img.set(x, y, [0, 153, 204, a]);
}
}
img.updatePixels();
// selection drawing style
noFill();
stroke(255);
// setup buffer to paste into
pasteBuffer = createGraphics(width, height);
}
function draw() {
background(0);
// render original image
image(img, 0, 0);
// render pasted graphics
image(pasteBuffer, 0, 0);
// render selection rectangle
rect(selection.x, selection.y, selection.w, selection.h);
}
function mousePressed(){
// store selection start
selection.x = mouseX;
selection.y = mouseY;
}
function mouseDragged(){
// update selection dimension as the difference between the current mouse coordinates and the previous ones (selection x, y)
selection.w = mouseX - selection.x;
selection.h = mouseY - selection.y;
}
function keyPressed(keyEvent){
if (key == 'c') {
console.log("copy image", selection);
// use get() to "clone" a subsection of the main image
clipboard = img.get(selection.x, selection.y, selection.w, selection.h);
}
if (key == 'v') {
console.log("paste image");
if(clipboard){
// simply render the clipboard image
pasteBuffer.image(clipboard, mouseX, mouseY);
}
}
}
<script src="https://cdnjs.cloudflare.com/ajax/libs/p5.js/1.4.2/p5.min.js"></script>
Note the above isn't perfect (e.g. you need to use abs() to handle making selections from right to left, not just left to right), but hopefully it illustrates the usage of a selection rectangle, copying pixels and "pasting"/rendering them into a buffer.
Speaking of copying pixels, an alternative option could be using a p5.Image (using createImage() instead of createGraphics()) and copy() (instead of buffer.image()) (and you'd use the selection rectangle properties to know from where to where to paste pixels).
I am implementing a Tree-View control using Win32 API where an insertion mark is displayed during drag operations.
The problem is that the insertion line is drawn over the drag image as shown in the screenshot below. (Note the line going over the document icon on the right.)
What can I do to prevent that from happening?
This is the code handling the WM_LBUTTONUP message:
void TreeviewDlg_OnMouseMove(HWND hwndParent, int x, int y, UINT keyFlags) {
if(TreeViewGlobals::g_fDragging) {
HWND hwndTV =
(HWND)GetWindowLongPtr(hwndParent, GWLP_USERDATA); // Tree-View control handle
// convert the dialog coordinates to control coordinates
POINT point;
point.x = x;
point.y = y;
ClientToScreen(hwndParent, &point);
ScreenToClient(hwndTV, &point);
ImageList_DragShowNolock(FALSE); // turn off the dragged image
// so the background can be refreshed
// Find out if the pointer is on the item. If it is, put insert mark.
TVHITTESTINFO tvht;
tvht.pt.x = point;
HTREEITEM htiTarget; // Handle to target item.
if((htiTarget = TreeView_HitTest(hwndTV, &tvht)) != NULL) {
TreeView_SetInsertMark(hwndTV, htiTarget, TRUE);
}
ImageList_DragShowNolock(TRUE);
ImageList_DragMove(point.x, point.y);
}
}
Does anyone know how to convert from Pixel Coordinates to UI Coordinates and vice-versa in Unity? Let's say for example I want to click somewhere on the screen with the mouse, and a UI Image to be at that click position. If I do this won't work:
Image img = null // I assign it via the inspector
void Update()
{
if(Input.GetMouseButtonDown(0))
{
img.rectTransform.anchorPosition = Input.mousePosition;
}
}
Image img = null // I assign it via the inspector
void Update()
{
if(Input.GetMouseButtonDown(0))
{
Vector2 point;
RectTransformUtility.ScreenPointToLocalPointInRectangle((RectTransform)img.rectTransform.parent, Input.mousePosition, canvasCamera, out point);
img.rectTransform.anchorPosition = point;
}
}
Hello people of stackflow,
I'm new to processing but fairly familiar with coding.
For a school project, i'm making an interactive installation where the visitor is able to play with his "shadow".
They should be able to draw objects like wings or capes onto his shadow. These objects then need to move along with the skeleton of the player.
For instances, if i draw a big hat on my head it needs to move along with my head.
Now i made this simple code wich makes a silhouet of the player and the player is than able to draw on it and save screenshots.
import SimpleOpenNI.*;
SimpleOpenNI context;
PImage userImage;
int[] userMap;
PImage rgbImage;
PGraphics pg;
color pixelColor;
int dikte = 10;
void setup(){
size(1024,768);
context=new SimpleOpenNI(this);
context.enableRGB();
context.enableDepth();
context.enableUser();
pg = createGraphics(1024,768);
background(255);
userImage=createImage(640,480,RGB);
}
void draw(){
pg.beginDraw();
pg.strokeWeight(dikte);
if (mousePressed && (mouseButton == LEFT) == true) {
pg.stroke(0);
pg.line(mouseX, mouseY, pmouseX, pmouseY);
}
if (mousePressed && (mouseButton == RIGHT) == true) {
pg.stroke(255);
pg.line(mouseX, mouseY, pmouseX, pmouseY);
}
context.update();
rgbImage=context.rgbImage();
userMap=context.userMap();
for(int y=0;y<context.depthHeight();y++){
for(int x=0;x<context.depthWidth();x++){
int index=x+y*640;
if(userMap[index]!=0){
pixelColor=rgbImage.pixels[index];
userImage.pixels[index]=color(0,0,0);
}else{
userImage.pixels[index]=color(255);
}
}
}
userImage.updatePixels();
pg.endDraw();
image(userImage, 0, 0);
image(pg, 0, 0);
}
void keyPressed() {
if (key == CODED) {
if (keyCode == UP) {
//Dit maakt de screenshot
saveFrame("line-######.png");
}
if (keyCode == DOWN) {
//clear the drawings
pg.clear();
background(255);
}
if (keyCode == RIGHT && dikte<30) {
//adjust the stroke weight
dikte++;
}
if (keyCode == LEFT && dikte>2) {
//adjust the stroke weight
dikte--;
}
}
}
Now i have tested and inspected these codes, but i'm not able to change it so that the skeletons uses the draw function to use that as the limb.
void draw(){
//clears the screen with the black color, this is usually a good idea
//to avoid color artefacts from previous draw iterations
background(255);
//asks kinect to send new data
context.update();
//retrieves depth image
PImage depthImage=context.depthImage();
depthImage.loadPixels();
//get user pixels - array of the same size as depthImage.pixels, that gives information about the users in the depth image:
// if upix[i]=0, there is no user at that pixel position
// if upix[i] > 0, upix[i] indicates which userid is at that position
int[] upix=context.userMap();
//colorize users
for(int i=0; i < upix.length; i++){
if(upix[i] > 0){
//there is a user on that position
//NOTE: if you need to distinguish between users, check the value of the upix[i]
img.pixels[i]=color(0,0,255);
}else{
//add depth data to the image
img.pixels[i]=depthImage.pixels[i];
}
}
img.updatePixels();
//draws the depth map data as an image to the screen
//at position 0(left),0(top) corner
image(img,0,0);
//draw significant points of users
//get array of IDs of all users present
int[] users=context.getUsers();
ellipseMode(CENTER);
//iterate through users
for(int i=0; i < users.length; i++){
int uid=users[i];
//draw center of mass of the user (simple mean across position of all user pixels that corresponds to the given user)
PVector realCoM=new PVector();
//get the CoM in realworld (3D) coordinates
context.getCoM(uid,realCoM);
PVector projCoM=new PVector();
//convert realworld coordinates to projective (those that we can use to draw to our canvas)
context.convertRealWorldToProjective(realCoM, projCoM);
fill(255,0,0);
ellipse(projCoM.x,projCoM.y,10,10);
//check if user has a skeleton
if(context.isTrackingSkeleton(uid)){
//draw head
PVector realHead=new PVector();
//get realworld coordinates of the given joint of the user (in this case Head -> SimpleOpenNI.SKEL_HEAD)
context.getJointPositionSkeleton(uid,SimpleOpenNI.SKEL_HEAD,realHead);
PVector projHead=new PVector();
context.convertRealWorldToProjective(realHead, projHead);
fill(0,255,0);
ellipse(projHead.x,projHead.y,10,10);
//draw left hand
PVector realLHand=new PVector();
context.getJointPositionSkeleton(uid,SimpleOpenNI.SKEL_LEFT_HAND,realLHand);
PVector projLHand=new PVector();
context.convertRealWorldToProjective(realLHand, projLHand);
fill(255,255,0);
ellipse(projLHand.x,projLHand.y,10,10);
}
}
}
</pre>
Can someone please help me out with this,
kind regards
I wrote this code to draw a simple rectangle in a dialog , I also added ON_WM_PAINT() to my message map. but it didnt show anything on dialog to me ! I really appreciate it if anyone could tell my mistakes in code:
void Ctest4Dlg::OnPaint()
{
if (IsIconic())
{
CPaintDC dc(this); // device context for painting
// TODO: Add your message handler code here
SendMessage(WM_ICONERASEBKGND, reinterpret_cast<WPARAM>(dc.GetSafeHdc()), 0);
// Center icon in client rectangle
int cxIcon = GetSystemMetrics(SM_CXICON);
int cyIcon = GetSystemMetrics(SM_CYICON);
CRect rect;
GetClientRect(&rect);
int x = 2;
int y = 2;
// Draw the icon
dc.DrawIcon(x, y, m_hIcon);
//I want to draw a rectangle
dc.Rectangle(10,10,50,50);
}
else
{
CDialogEx::OnPaint();
}
}
Looks like your paint code only runs when the window is iconic? Why are you doing that?
Put it in the else block, after the call to CDialogEx::OnPaint().
Your first and biggest mistake is trying to draw directly in a dialog. While it is possible to do so, it's almost always a bad idea. A dialog should usually be treated as a container for controls.