How can I convert an Image to byte array in J2ME? - image

My requirement is like this. I need to read a file from the Mobile phone using a file connection, create a thumbnail of that image and post to the server. I am able to read the image using the FileConnection API, and also able to create the thumbnail.
After creating the thumbnail, I am not able find a method to convert back that image to byte[]. Is it possible?
Code for thumbnail conversion:
private Image createThumbnail(Image image) {
int sourceWidth = image.getWidth();
int sourceHeight = image.getHeight();
int thumbWidth = 128;
int thumbHeight = -1;
if (thumbHeight == -1)
thumbHeight = thumbWidth * sourceHeight / sourceWidth;
Image thumb = Image.createImage(thumbWidth, thumbHeight);
thumb.getGraphics();
Graphics g = thumb.getGraphics();
for (int y = 0; y < thumbHeight; y++) {
for (int x = 0; x < thumbWidth; x++) {
g.setClip(x, y, 1, 1);
int dx = x * sourceWidth / thumbWidth;
int dy = y * sourceHeight / thumbHeight;
g.drawImage(image, x - dx, y - dy);
}
}
Image immutableThumb = Image.createImage(thumb);
return thumb;
}

MIDP2.0's Image.getRGB() is your friend. You can obtain the ARGB pixel data as an int array as follows:
int w = theImage.getWidth();
int h = theImage.getHeight();
int[] argb = new int[w * h];
theImage.getRGB(argb, 0, w, 0, 0, w, h);
The int array can then be used as a parameter to Image.createRGBImage(), or in desktop Java, BufferedImage can be used as follows:
BufferedImage img = new BufferedImage(w, h, BufferedImage.TYPE_INT_ARGB);
img.setRGB(0, 0, w, h, ints, 0, w);

Related

C# EmguCV Resize Mat but keep bounds/resolution

I've tried many things, but all my attempts fails.
I need to resize a Gray image (2560x1440) to lower or higher resolution, then i need to set the bounds to the original size (2560x1440) but keep the resized image on the center.
I'm using EmguCV 4.3 and Mat, i tried many aproach and use of ROI on Mat constructor and a copyTo, but nothing work, it always set new Mat with the resized bounds
Example of the required:
Source image: (2560x1440)
50% resized, but keep same bounds as source (2560x1440)
300% resized, but keep same bounds as source (2560x1440)
Use WarpAffine to apply an affine transformation to the image. Using the transformation matrix you can apply scale and translate transformation. Rotation is also supported but not covered in my example. Translation values can also be negative.
The WrapAffine method has some more parameter with which you can play around.
public void Test()
{
var img = new Mat("Bmv60.png", ImreadModes.Grayscale);
Mat upscaled = GetContentScaled(img, 2.0, 0.5, 0, 0);
upscaled.Save("scaled1.png");
Mat downscaled = GetContentScaled(img, 0.5, 0.5, 0, 0);
downscaled.Save("scaled2.png");
}
private Mat GetContentScaled(Mat src, double xScale, double yScale, double xTrans, double yTrans, Inter interpolation = Inter.Linear)
{
var dst = new Mat(src.Size, src.Depth, src.NumberOfChannels);
var translateTransform = new Matrix<double>(2, 3)
{
[0, 0] = xScale, // xScale
[1, 1] = yScale, // yScale
[0, 2] = xTrans + (src.Width - src.Width * xScale) / 2.0, //x translation + compensation of x scaling
[1, 2] = yTrans + (src.Height - src.Height * yScale) / 2.0 // y translation + compensation of y scaling
};
CvInvoke.WarpAffine(src, dst, translateTransform, dst.Size, interpolation);
return dst;
}
I feel as if there should be a more elegant way to do this, however, I offer two extension methods:
static void CopyToCenter(this Image<Gray,byte> imgScr, Image<Gray, byte> imgDst)
{
int dx = (imgScr.Cols - imgDst.Cols) / 2;
int dy = (imgScr.Rows - imgDst.Rows) / 2;
byte[,,] scrData = imgScr.Data;
byte[,,] dstData = imgDst.Data;
for(int v = 0; v < imgDst.Rows; v++)
{
for (int u = 0; u < imgDst.Cols; u++)
{
dstData[v,u, 0] = scrData[v + dy, u + dx, 0];
}
}
}
static void CopyFromCenter(this Image<Gray, byte> imgDst, Image<Gray, byte> imgScr)
{
int dx = (imgDst.Cols - imgScr.Cols) / 2;
int dy = (imgDst.Rows - imgScr.Rows) / 2;
byte[,,] scrData = imgScr.Data;
byte[,,] dstData = imgDst.Data;
for (int v = 0; v < imgScr.Rows; v++)
{
for (int u = 0; u < imgScr.Cols; u++)
{
dstData[v + dy, u + dx, 0] = scrData[v, u, 0];
}
}
}
Which can use them like this:
static void Main(string[] args)
{
double scaleFactor = 0.8;
Image<Gray, byte> orginalImage = new Image<Gray, byte>("Bmv60.png");
Image<Gray, byte> scaledImage = orginalImage.Resize(scaleFactor, Inter.Linear);
Image<Gray, byte> outputImage = new Image<Gray, byte>(orginalImage.Size);
if(scaleFactor > 1)
{
scaledImage.CopyToCenter(outputImage);
}
else
{
outputImage.CopyFromCenter(scaledImage);
}
}
You didn't request a specific language, so I hope C# is useful.

how do i make the black pixels in a PImage transparent?

The following code makes a rising smoke effect.
PImage buffer1;
PImage buffer2;
PImage cooling;
PImage buffer3;
float yInitial = 0.0;
void setup() {
size(600, 200);
buffer1 = createImage(width, 200, RGB);
buffer2 = createImage(width, 200, RGB);
buffer3 = createImage(width, 200, RGB);
cooling = createImage(width, 200, RGB);
}
void newLine(int rows) {
//create row of white pixels
buffer1.loadPixels();
for (int x = 0; x< buffer1.width; x++){
for(int j = 0; j< rows; j++){
//find location to draw pixels
int y = buffer1.height - (j+1);
int index = x + y * buffer1.width;
buffer1.pixels[index] = color(255);
}
}
buffer1.updatePixels();
}
void cool(){
cooling.loadPixels();
//start x at 0
float xoff = 0.0;
float increment = 0.02;
for (int x = 0; x < cooling.width; x++){
xoff += increment;
//start y at 0
float yoff = yInitial;
for (int y = 0; y < cooling.height; y++){
yoff += increment;
//calculate noise and enlarge
float n = noise(xoff, yoff);
if(n<0.4)
n = 0;
float bright = noise(xoff, yoff) *25;
//set pixel to grayscale value
cooling.pixels[x+y*cooling.width]= color(bright);
}
}
cooling.updatePixels();
yInitial += increment;
}
void draw(){
cool();
newLine(10);
background(0);
buffer1.loadPixels();
buffer2.loadPixels();
//look through all x and y coordinates and find the neightboring pixels colour
for (int x = 1; x < buffer1.width-1; x++) {
for (int y = 1; y < buffer1.height-1; y++) {
int index0 = x + y * buffer1.width;
int index1 = (x+1) + y * buffer1.width;
int index2 = (x-1) + y * buffer1.width;
int index3 = x + (y+1) * buffer1.width;
int index4 = x + (y-1) * buffer1.width;
color c1 = buffer1.pixels[index1];
color c2 = buffer1.pixels[index2];
color c3 = buffer1.pixels[index3];
color c4 = buffer1.pixels[index4];
color c5 = cooling.pixels[index0];
float newC = brightness(c1) + brightness(c2)+ brightness(c3) + brightness(c4);
newC = newC - brightness(c5);
newC = newC / 4;
buffer2.pixels[index4] = color(newC);
}
}
buffer2.updatePixels();
//swap
PImage temp = buffer1;
buffer1 = buffer2;
buffer2 = temp;
image(buffer2, 0, 0);
}
I intend to use this code to draw the moving smoke over an image that will be in the background (i haven't put this in yet). To do so I attempted to make all the black pixels in the PImage transparent. This should mean only the smoke will be drawn over the background image. I then changed all the transparent pixels back to black after the PImage had been drawn. However, when I added this in the smoke completely stopped being drawn. What have I done wrong and is this the correct way to try to draw the smoke over a background image? Here is my code:
PImage buffer1;
PImage buffer2;
PImage cooling;
PImage buffer3;
float yInitial = 0.0;
void setup() {
size(600, 200);
buffer1 = createImage(width, 200, RGB);
buffer2 = createImage(width, 200, RGB);
buffer3 = createImage(width, 200, RGB);
cooling = createImage(width, 200, RGB);
}
void newLine(int rows) {
//create row of white pixels
buffer1.loadPixels();
for (int x = 0; x< buffer1.width; x++){
for(int j = 0; j< rows; j++){
//find location to draw pixels
int y = buffer1.height - (j+1);
int index = x + y * buffer1.width;
buffer1.pixels[index] = color(255);
}
}
buffer1.updatePixels();
}
void cool(){
cooling.loadPixels();
//start x at 0
float xoff = 0.0;
float increment = 0.02;
for (int x = 0; x < cooling.width; x++){
xoff += increment;
//start y at 0
float yoff = yInitial;
for (int y = 0; y < cooling.height; y++){
yoff += increment;
//calculate noise and enlarge
float n = noise(xoff, yoff);
if(n<0.4)
n = 0;
float bright = noise(xoff, yoff) *25;
//set pixel to grayscale value
cooling.pixels[x+y*cooling.width]= color(bright);
}
}
cooling.updatePixels();
yInitial += increment;
}
void draw(){
cool();
newLine(10);
buffer1.loadPixels();
buffer2.loadPixels();
//look through all x and y coordinates and find the neightboring pixels colour
for (int x = 1; x < buffer1.width-1; x++) {
for (int y = 1; y < buffer1.height-1; y++) {
int index0 = x + y * buffer1.width;
int index1 = (x+1) + y * buffer1.width;
int index2 = (x-1) + y * buffer1.width;
int index3 = x + (y+1) * buffer1.width;
int index4 = x + (y-1) * buffer1.width;
color c1 = buffer1.pixels[index1];
color c2 = buffer1.pixels[index2];
color c3 = buffer1.pixels[index3];
color c4 = buffer1.pixels[index4];
color c5 = cooling.pixels[index0];
float newC = brightness(c1) + brightness(c2)+ brightness(c3) + brightness(c4);
newC = newC - brightness(c5);
newC = newC / 4;
buffer2.pixels[index4] = color(newC);
//make the black pixels transparent
if(color(buffer2.pixels[index0])==color(0));
buffer2.pixels[index0] = color(0,0);
if(color(buffer1.pixels[index0])==color(0));
buffer1.pixels[index0] = color(0,0);
}
}
buffer2.updatePixels();
//swap
PImage temp = buffer1;
buffer1 = buffer2;
buffer2 = temp;
image(buffer2, 0, 0);
//set transparent pixels back to black
for (int x = 1; x < buffer1.width-1; x++) {
for (int y = 1; y < buffer1.height-1; y++) {
int index0 = x + y * buffer1.width;
if(color(buffer2.pixels[index0])==color(0,0))
buffer2.pixels[index0] = color(0);
if(color(buffer1.pixels[index0])==color(0,0));
buffer1.pixels[index0] = color(0);
}
}
}
That's a pretty cool smoke effect.
There's an easier to composite the smoke effect using blendMode().
What you're after is something like blendMode(SCREEN); or blendMode(ADD); which would make black pixels on the effect image transparent.
PImage buffer1;
PImage buffer2;
PImage cooling;
PImage buffer3;
float yInitial = 0.0;
void setup() {
size(600, 200);
fill(192,0,0);
// change blend mode to treat black pixels as transparent
blendMode(SCREEN);
buffer1 = createImage(width, 200, RGB);
buffer2 = createImage(width, 200, RGB);
buffer3 = createImage(width, 200, RGB);
cooling = createImage(width, 200, RGB);
}
void newLine(int rows) {
//create row of white pixels
buffer1.loadPixels();
for (int x = 0; x< buffer1.width; x++){
for(int j = 0; j< rows; j++){
//find location to draw pixels
int y = buffer1.height - (j+1);
int index = x + y * buffer1.width;
buffer1.pixels[index] = color(255);
}
}
buffer1.updatePixels();
}
void cool(){
cooling.loadPixels();
//start x at 0
float xoff = 0.0;
float increment = 0.02;
for (int x = 0; x < cooling.width; x++){
xoff += increment;
//start y at 0
float yoff = yInitial;
for (int y = 0; y < cooling.height; y++){
yoff += increment;
//calculate noise and enlarge
float n = noise(xoff, yoff);
if(n<0.4)
n = 0;
float bright = noise(xoff, yoff) *25;
//set pixel to grayscale value
cooling.pixels[x+y*cooling.width]= color(bright);
}
}
cooling.updatePixels();
yInitial += increment;
}
void updateSmokeEffect(){
cool();
newLine(10);
buffer1.loadPixels();
buffer2.loadPixels();
//look through all x and y coordinates and find the neightboring pixels colour
for (int x = 1; x < buffer1.width-1; x++) {
for (int y = 1; y < buffer1.height-1; y++) {
int index0 = x + y * buffer1.width;
int index1 = (x+1) + y * buffer1.width;
int index2 = (x-1) + y * buffer1.width;
int index3 = x + (y+1) * buffer1.width;
int index4 = x + (y-1) * buffer1.width;
color c1 = buffer1.pixels[index1];
color c2 = buffer1.pixels[index2];
color c3 = buffer1.pixels[index3];
color c4 = buffer1.pixels[index4];
color c5 = cooling.pixels[index0];
float newC = brightness(c1) + brightness(c2)+ brightness(c3) + brightness(c4);
newC = newC - brightness(c5);
newC = newC / 4;
buffer2.pixels[index4] = color(newC);
}
}
buffer2.updatePixels();
swapBuffers();
}
void swapBuffers(){
//swap
PImage temp = buffer1;
buffer1 = buffer2;
buffer2 = temp;
}
void draw(){
background(0);
// test drawing something in the background
ellipse(mouseX,mouseY,30,30);
updateSmokeEffect();
// render blended image on top
image(buffer2, 0, 0);
}
You can use blendMode(BLEND) to revert back to the default blend mode:
void draw(){
background(0);
blendMode(ADD);
// test drawing something in the background
ellipse(mouseX,mouseY,30,30);
updateSmokeEffect();
// render blended image on top
image(buffer2, 0, 0);
blendMode(BLEND);
// test drawing something in the foreground
ellipse(mouseX + 35,mouseY,30,30);
}
Additionally you can checkout PGraphics to mimic layers, though for pixel effects like that you can do plenty with PImage (and it's set() / copy() / blend() / etc. methods)
You might also be interested shaders: fragment shaders in particular for "pixel" manipulation on the GPU

Processing mirror image over x axis?

I was able to copy the image to the location but not able to mirror it. what am i missing?
PImage img;
float srcY;
float srcX;
int destX;
int destY;
img = loadImage("http://oldpalmgolfclub.com/wp-content/uploads/2012/02/Palm- Beach-State-College2-e1329949470871.jpg");
size(img.width, img.height * 2);
image(img, 0, 0);
image(img, 0, 330);
int num_pixels = img.width * img.height;
int copiedWidth = 319 - 254;
int copiedHeight = 85 - 22;
int startX = (width / 2) - (copiedWidth / 2);
int startY = (height / 2) - (copiedHeight / 2);
How about simply scaling by -1 on the x axis ?
PImage img;
img = loadImage("https://processing.org/img/processing-web.png");
size(img.width, img.height * 2);
image(img,0,0);
scale(-1,1);//flip on X axis
image(img,-img.width,img.height);//draw offset
This can be achieved by manipulating pixels as well, but needs a bit of arithmetic:
PImage img;
img = loadImage("https://processing.org/img/processing-web.png");
size(img.width, img.height * 2);
int t = millis();
PImage flipped = createImage(img.width,img.height,RGB);//create a new image with the same dimensions
for(int i = 0 ; i < flipped.pixels.length; i++){ //loop through each pixel
int srcX = i % flipped.width; //calculate source(original) x position
int dstX = flipped.width-srcX-1; //calculate destination(flipped) x position = (maximum-x-1)
int y = i / flipped.width; //calculate y coordinate
flipped.pixels[y*flipped.width+dstX] = img.pixels[i];//write the destination(x flipped) pixel based on the current pixel
}
//y*width+x is to convert from x,y to pixel array index
flipped.updatePixels()
println("done in " + (millis()-t) + "ms");
image(img,0,0);
image(flipped,0,img.height);
The above can be achieved using get() and set(), but using the pixels[] array is faster. A single for loop is generally faster than using 2 nested for loops to traverse the image with x,y counters:
PImage img;
img = loadImage("https://processing.org/img/processing-web.png");
size(img.width, img.height * 2);
int t = millis();
PImage flipped = createImage(img.width,img.height,RGB);//create a new image with the same dimensions
for(int y = 0; y < img.height; y++){
for(int x = 0; x < img.width; x++){
flipped.set(img.width-x-1,y,img.get(x,y));
}
}
println("done in " + (millis()-t) + "ms");
image(img,0,0);
image(flipped,0,img.height);
You can copy a 1px 'slice'/column in a single for loop and which is faster(but still not as fast as direct pixel manipulation):
PImage img;
img = loadImage("https://processing.org/img/processing-web.png");
size(img.width, img.height * 2);
int t = millis();
PImage flipped = createImage(img.width,img.height,RGB);//create a new image with the same dimensions
for(int x = 0 ; x < flipped.width; x++){ //loop through each columns
flipped.set(flipped.width-x-1,0,img.get(x,0,1,img.height)); //copy a column in reverse x order
}
println("done in " + (millis()-t) + "ms");
image(img,0,0);
image(flipped,0,img.height);
There are other alternatives like accessing the java BufferedImage (although this means the Processing sketch will work in Java Mode mostly) or using a PShader, but these approaches are more complex. It's generally a good idea to keep things simple (especially when getting started).

image creation fails in midlet

I'm trying to make a image as follows:-
try
{
strom = Image.createImage("stromB.png");
map0 = Image.createImage("map0.png");
}
catch (Exception e)
{
System.out.println("image creatino faild");
System.out.println(e.getMessage());
}
strom works fine but map0 always throw an exception. My guess is that the size is bigger and may be that's why.
How can I use a larger png image in the midlet?
Are there any limitation for the midlet?
What is the maximum size I can use for a midlet?
map0 throw exception may be because of large size, use following method to larger images to resize it according to screen size
/**
* This methog resizes an image by resampling its pixels
* #param src The image to be resized
* #return The resized image
*/
private Image resizeImage(Image src) {
int srcWidth = src.getWidth();
int srcHeight = src.getHeight();
Image tmp = Image.createImage(screenWidth, srcHeight);
Graphics g = tmp.getGraphics();
int ratio = (srcWidth << 16) / screenWidth;
int pos = ratio/2;
//Horizontal Resize
for (int x = 0; x < screenWidth; x++) {
g.setClip(x, 0, 1, srcHeight);
g.drawImage(src, x - (pos >> 16), 0, Graphics.LEFT | Graphics.TOP);
pos += ratio;
}
Image resizedImage = Image.createImage(screenWidth, screenHeight);
g = resizedImage.getGraphics();
ratio = (srcHeight << 16) / screenHeight;
pos = ratio/2;
//Vertical resize
for (int y = 0; y < screenHeight; y++) {
g.setClip(0, y, screenWidth, 1);
g.drawImage(tmp, 0, y - (pos >> 16), Graphics.LEFT | Graphics.TOP);
pos += ratio;
}
return resizedImage;
}//resize image

Greyscale Image from YUV420p data

From what I have read on the internet the Y value is the luminance value and can be used to create a grey scale image. The following link: https://web.archive.org/web/20141230145627/http://bobpowell.net/grayscale.aspx, has some C# code on working out the luminance of a bitmap image :
{
Bitmap bm = new Bitmap(source.Width,source.Height);
for(int y=0;y<bm.Height;y++) public Bitmap ConvertToGrayscale(Bitmap source)
{
for(int x=0;x<bm.Width;x++)
{
Color c=source.GetPixel(x,y);
int luma = (int)(c.R*0.3 + c.G*0.59+ c.B*0.11);
bm.SetPixel(x,y,Color.FromArgb(luma,luma,luma));
}
}
return bm;
}
I have a method that returns the YUV values and have the Y data in a byte array. I have the current piece of code and it is failing on Marshal.Copy – attempted to read or write protected memory.
public Bitmap ConvertToGrayscale2(byte[] yuvData, int width, int height)
{
Bitmap bmp;
IntPtr blue = IntPtr.Zero;
int inputOffSet = 0;
long[] pixels = new long[width * height];
try
{
for (int y = 0; y < height; y++)
{
int outputOffSet = y * width;
for (int x = 0; x < width; x++)
{
int grey = yuvData[inputOffSet + x] & 0xff;
unchecked
{
pixels[outputOffSet + x] = UINT_Constant | (grey * INT_Constant);
}
}
inputOffSet += width;
}
blue = Marshal.AllocCoTaskMem(pixels.Length);
Marshal.Copy(pixels, 0, blue, pixels.Length); // fails here : Attempted to read or write protected memory
bmp = new Bitmap(width, height, width, PixelFormat.Format24bppRgb, blue);
}
catch (Exception)
{
throw;
}
finally
{
if (blue != IntPtr.Zero)
{
Marshal.FreeHGlobal(blue);
blue = IntPtr.Zero;
}
}
return bmp;
}
Any help would be appreciated?
I think you have allocated pixels.Length bytes, but are copying pixels.Length longs, which is 8 times as much memory (a long is 64 bits or 8 bytes in size).
You could try:
blue = Marshal.AllocCoTaskMem(Marshal.SizeOf(pixels[0]) * pixels.Length);
You might also need to use int[] for pixels and PixelFormat.Format32bppRgb in the Bitmap constructor (as they are both 32 bits). Using long[] gives you 64 bits per pixel which isn't what a 24 bit pixel format is expecting.
You might end up with shades of blue instead of grey though - depends on what your values of UINT_Constant and INT_Constant are.
There is no need to do "& 0xff", as yuvData[] already contains a byte.
Here are another couple of approaches you could try.
public Bitmap ConvertToGrayScale(byte[] yData, int width, int height)
{
// 3 * width bytes per scanline, rounded up to a multiple of 4 bytes
int stride = 4 * (int)Math.Ceiling(3 * width / 4.0);
byte[] pixels = new byte[stride * height];
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
byte grey = yData[y * width + x];
pixels[y * stride + 3 * x] = grey;
pixels[y * stride + 3 * x + 1] = grey;
pixels[y * stride + 3 * x + 2] = grey;
}
}
IntPtr pixelsPtr = Marshal.AllocCoTaskMem(pixels.Length);
try
{
Marshal.Copy(pixels, 0, pixelsPtr, pixels.Length);
Bitmap bitmap = new Bitmap(
width,
height,
stride,
PixelFormat.Format24bppRgb,
pixelsPtr);
return bitmap;
}
finally
{
Marshal.FreeHGlobal(pixelsPtr);
}
}
public Bitmap ConvertToGrayScale(byte[] yData, int width, int height)
{
// 3 * width bytes per scanline, rounded up to a multiple of 4 bytes
int stride = 4 * (int)Math.Ceiling(3 * width / 4.0);
IntPtr pixelsPtr = Marshal.AllocCoTaskMem(stride * height);
try
{
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
byte grey = yData[y * width + x];
Marshal.WriteByte(pixelsPtr, y * stride + 3 * x, grey);
Marshal.WriteByte(pixelsPtr, y * stride + 3 * x + 1, grey);
Marshal.WriteByte(pixelsPtr, y * stride + 3 * x + 2, grey);
}
}
Bitmap bitmap = new Bitmap(
width,
height,
stride,
PixelFormat.Format24bppRgb,
pixelsPtr);
return bitmap;
}
finally
{
Marshal.FreeHGlobal(pixelsPtr);
}
}
I get a black image with a few pixel in the top left corner if I use this code and this is stable when running :
public static Bitmap ToGrayscale(byte[] yData, int width, int height)
{
Bitmap bm = new Bitmap(width, height, PixelFormat.Format32bppRgb);
Rectangle dimension = new Rectangle(0, 0, bm.Width, bm.Height);
BitmapData picData = bm.LockBits(dimension, ImageLockMode.ReadWrite, bm.PixelFormat);
IntPtr pixelStateAddress = picData.Scan0;
int stride = 4 * (int)Math.Ceiling(3 * width / 4.0);
byte[] pixels = new byte[stride * height];
try
{
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
byte grey = yData[y * width + x];
pixels[y * stride + 3 * x] = grey;
pixels[y * stride + 3 * x + 1] = grey;
pixels[y * stride + 3 * x + 2] = grey;
}
}
Marshal.Copy(pixels, 0, pixelStateAddress, pixels.Length);
bm.UnlockBits(picData);
}
catch (Exception)
{
throw;
}
return bm;
}

Resources