image addition on already created jsc3d 3d object - jsc3d

Is it possible to add an external image or text on an already jsc3d created 3d object.For example if any canvas imagedata needs to be stored on the created 3d object,then is it possible?

Yes, its possible.
If you look at the jsc3d implementation of Texture, you will see that a texture has already an underlying canvas.
Let say you have a canvas called "myTexture" and a Mesh called "myMesh", and to make it simple, you just only need a texture with a fixed size of 128x128 px, this will paint your canvas onto your mesh:
var canvas = document.getElementById('myTexture');
var context = canvas.getContext('2d');
var dim = 128;
var imgData = context.getImageData(0,0,dim,dim);
var data = imgData.data;
var size = data.length / 4;
var texture = new JSC3D.Texture;
texture.data = new Array(size);
var alpha;
for(var i=0, j=0; i<size; i++, j+=4) {
alpha = data[j + 3];
texture.data[i] = alpha << 24 | data[j] << 16 | data[j+1] << 8 | data[j+2];
if(alpha < 255)
texture.hasTransparency = true;
}
texture.width = dim;
texture.height = dim;
myMesh.setTexture(texture);
viewer.update();
The .data loop is taken from JSC3D.Texture.prototype.createFromImage (credits humu2009, creator of jsc3d).

Related

Providing canvas2d image tint for Spritefonts

I'm doing Spritefonts and currently implemented tint for it on WebGL!
But on canvas2d i tried to do it via ctx.globalCompositeOperation but it shows following
As you see, Black pixels are also filled...
Here is my code...
var size = 32;
var x = 200;
var y = 200;
var spacing = 0;
for (var i = 0; i < txt.length; i++) {
var q = fonts[0].info[txt[i]];
ctx.save();
if (q) ctx.drawImage(fonts[0].src, q.x, q.y, q.w, q.h, x + (spacing || 0) + (i * size), y, size, size);
ctx.globalCompositeOperation = "source-in";
ctx.fillStyle = "green";
ctx.fillRect(0, 0, canvas.width, canvas.height);
ctx.restore();
}
When trying with "darken" mode instead, It fills correctly but also it fills background (Which i don't want this...)
I also tried with ctx.getImageData() and ctx.putImageData() but letters not shown
var size = 32;
var x = 200;
var y = 200;
var spacing = 0;
for (var i = 0; i < txt.length; i++) {
var q = fonts[0].info[txt[i]];
if (q) {
ctx.drawImage(fonts[0].src, q.x, q.y, q.w, q.h, x + (spacing || 0) + (i * size), y, size, size);
f = ctx.getImageData(x + (spacing || 0) + (i * size), y, size, size);
for (var i = 0; i < f.data.length; i += 4) {
f.data[i + 0] = 100;
f.data[i + 1] = 100;
f.data[i + 2] = 255;
f.data[i + 3] = 255;
}
ctx.putImageData(f, x + (spacing || 0) + (i * size), y, 0, 0, size, size);
}
}
The image i'm using is from here
Fixed by using "lighten" mode for black pixels with filling background, Then applied "darken" mode instead of "source-in" and all done!
var size = 32;
var x = 200;
var y = 200;
var spacing = 0;
for (var i = 0; i < txt.length; i++) {
var q = fonts[0].info[txt[i]];
ctx.save();
ctx.globalCompositeOperation = "lighten";
ctx.fillStyle = ctx.canvas.style.backgroundColor;
ctx.fillRect(0, 0, ctx.canvas.width, ctx.canvas.height);
if (q) ctx.drawImage(fonts[0].src, q.x, q.y, q.w, q.h, x + (spacing || 0) + (i * size), y, size, size);
ctx.globalCompositeOperation = "darken";
ctx.fillStyle = "green";
ctx.fillRect(0, 0, ctx.canvas.width, ctx.canvas.height);
ctx.restore();
}
This is better way i found:
Create canvas with dimensions that complies with spritefont image dimensions
Save context state in the created canvas
Set fillStyle of the created canvas context with spritefont text color (Tint)
Set globalAlpha of created canvas context to opacity
Fill created canvas background with spritefont text color (Tint)
Apply "destination-atop" composite mode in created canvas context
Reset globalAlpha of created canvas context to 1 (Default)
Draw spritefont image onto created canvas
Restore context state in created canvas
Then, Let default canvas context (Not created one) draw characters from spritefont image, So we let it draw part of canvas we created (Note that spritefont image fills all created canvas)
Done!
var size = 32;
var x = 200;
var y = 200;
var spacing = 0;
var opacity = 0.8;
var color = "green";
for (var i = 0; i < txt.length; i++) {
var q = fonts[0].info[txt[i]];
var c = document.createElement("canvas").getContext("2d");
c.canvas.width = fonts[0].src.width;
c.canvas.height = fonts[0].src.height;
c.save();
c.fillStyle = color;
c.globalAlpha = opacity || 0.8;
c.fillRect(0, 0, c.canvas.width, c.canvas.height);
c.globalCompositeOperation = "destination-atop";
c.globalAlpha = 1;
c.drawImage(fonts[0].src, 0, 0);
c.restore();
if (q) ctx.drawImage(c.canvas, q.x, q.y, q.w, q.h, x + (i * (size + spacing)), y, size, size);
}

Three.js issue at edges of tiled texture using MeshFaceMaterial

I am trying to tile the texture from multiple images onto a plane geometry using MeshFaceMaterial. Every thing works fine, except for a blurry edge forming in between tiles.
.
var textureArray = [];
var tileColumns = 2;
var tileRows = 1;
textureArray[0] = THREE.ImageUtils.loadTexture('./test3.jpg');
textureArray[1] = THREE.ImageUtils.loadTexture('./test4.jpg');
var faceCountPerTileX = 2 * widthSegments/tileColumns;
var faceCountPerTileY = heightSegments/tileRows;
var faceCountX = 2 * widthSegments;
var faceCountY = heightSegments;
for(var tileIndexY = 0; tileIndexY < tileRows; tileIndexY++){
for(var tileIndexX = 0; tileIndexX < tileColumns; tileIndexX++){
var index = tileIndexY * tileColumns + tileIndexX;
textureArray[index].wrapS = THREE.RepeatWrapping;
textureArray[index].wrapT = THREE.RepeatWrapping;
textureArray[index].repeat.set(tileColumns,tileRows);
materialContainer[tileIndexY * tileColumns + tileIndexX] = new THREE.MeshBasicMaterial({
map: textureArray[tileIndexY * tileColumns + tileIndexX],
overdraw: true,
ambient: 0xffffff
});
for(var faceIndexY = tileIndexY * faceCountPerTileY; faceIndexY < (tileIndexY+1) * faceCountPerTileY; faceIndexY++){
for(var faceIndexX = tileIndexX * faceCountPerTileX; faceIndexX < (tileIndexX+1) * faceCountPerTileX; faceIndexX++){
g.faces[faceIndexY * faceCountX + faceIndexX].materialIndex = tileIndexY * tileColumns + tileIndexX;
}
}
}
}
var mat = new THREE.MeshFaceMaterial(materialContainer);
var obj = new THREE.Mesh(g, mat);
I have tried all known solutions, i have even tried writing a custom shader and using ShaderMaterial. But no luck, can some help me out to fix the issue?
By the looks of it, you set the texture mode of the invidual textures in your set to repeat.
This seems wrong, the individual textures do not repeat, they are displayed only once. Setting a texture to repeat causes the right side of the texture to "blend through" on the left (and vice versa), causing visible seams like the one on your screenshot.

Resizing a DXGI Resource or Texture2D in SharpDX

I want to resize a screen captured using the Desktop Duplication API in SharpDX. I am using the Screen Capture sample code from the SharpDX Samples repository, relevant portion follows:.
SharpDX.DXGI.Resource screenResource;
OutputDuplicateFrameInformation duplicateFrameInformation;
// Try to get duplicated frame within given time
duplicatedOutput.AcquireNextFrame(10000, out duplicateFrameInformation, out screenResource);
if (i > 0)
{
// copy resource into memory that can be accessed by the CPU
using (var screenTexture2D = screenResource.QueryInterface<Texture2D>())
device.ImmediateContext.CopyResource(screenTexture2D, screenTexture);
// Get the desktop capture texture
var mapSource = device.ImmediateContext.MapSubresource(screenTexture, 0, MapMode.Read, MapFlags.None);
System.Diagnostics.Debug.WriteLine(watch.Elapsed);
// Create Drawing.Bitmap
var bitmap = new System.Drawing.Bitmap(width, height, PixelFormat.Format32bppArgb);
var boundsRect = new System.Drawing.Rectangle(0, 0, width, height);
// Copy pixels from screen capture Texture to GDI bitmap
var mapDest = bitmap.LockBits(boundsRect, ImageLockMode.WriteOnly, bitmap.PixelFormat);
var sourcePtr = mapSource.DataPointer;
var destPtr = mapDest.Scan0;
for (int y = 0; y < height; y++)
{
// Iterate and write to bitmap...
I would like to resize the image much smaller than the actual screen size before processing it as a byte array. I do not need to save the image, just get at the bytes. I would like to do this relatively quickly and efficiently (e.g. leveraging GPU if possible).
I'm not able to scale during CopyResource, as the output dimensions are required to be the same as the input dimensions. Can I perform another copy from my screenTexture2D to scale? How exactly do I scale the resource - do I use a Swap Chain, Matrix transform, or something else?
If you are fine resizing to a power of two from the screen, you can do it by:
Create a smaller texture with RenderTarget/ShaderResource usage, and options GenerateMipMaps, same size of screen, mipcount > 1 (2 for having size /2, 3 for having /4...etc.).
Copy the first mipmap of the screen texture to the smaller texture
DeviceContext.GenerateMipMaps on the smaller texture
Copy the selected mimap of the smaller texture (1: /2, 2: /4...etc.) to the staging texture (that should also be declared smaller, i.e. same size as the mipmap that is going to be used)
A quick hack on the original code to generate a /2 texture would be like this:
[STAThread]
private static void Main()
{
// # of graphics card adapter
const int numAdapter = 0;
// # of output device (i.e. monitor)
const int numOutput = 0;
const string outputFileName = "ScreenCapture.bmp";
// Create DXGI Factory1
var factory = new Factory1();
var adapter = factory.GetAdapter1(numAdapter);
// Create device from Adapter
var device = new Device(adapter);
// Get DXGI.Output
var output = adapter.GetOutput(numOutput);
var output1 = output.QueryInterface<Output1>();
// Width/Height of desktop to capture
int width = output.Description.DesktopBounds.Width;
int height = output.Description.DesktopBounds.Height;
// Create Staging texture CPU-accessible
var textureDesc = new Texture2DDescription
{
CpuAccessFlags = CpuAccessFlags.Read,
BindFlags = BindFlags.None,
Format = Format.B8G8R8A8_UNorm,
Width = width/2,
Height = height/2,
OptionFlags = ResourceOptionFlags.None,
MipLevels = 1,
ArraySize = 1,
SampleDescription = { Count = 1, Quality = 0 },
Usage = ResourceUsage.Staging
};
var stagingTexture = new Texture2D(device, textureDesc);
// Create Staging texture CPU-accessible
var smallerTextureDesc = new Texture2DDescription
{
CpuAccessFlags = CpuAccessFlags.None,
BindFlags = BindFlags.RenderTarget | BindFlags.ShaderResource,
Format = Format.B8G8R8A8_UNorm,
Width = width,
Height = height,
OptionFlags = ResourceOptionFlags.GenerateMipMaps,
MipLevels = 4,
ArraySize = 1,
SampleDescription = { Count = 1, Quality = 0 },
Usage = ResourceUsage.Default
};
var smallerTexture = new Texture2D(device, smallerTextureDesc);
var smallerTextureView = new ShaderResourceView(device, smallerTexture);
// Duplicate the output
var duplicatedOutput = output1.DuplicateOutput(device);
bool captureDone = false;
for (int i = 0; !captureDone; i++)
{
try
{
SharpDX.DXGI.Resource screenResource;
OutputDuplicateFrameInformation duplicateFrameInformation;
// Try to get duplicated frame within given time
duplicatedOutput.AcquireNextFrame(10000, out duplicateFrameInformation, out screenResource);
if (i > 0)
{
// copy resource into memory that can be accessed by the CPU
using (var screenTexture2D = screenResource.QueryInterface<Texture2D>())
device.ImmediateContext.CopySubresourceRegion(screenTexture2D, 0, null, smallerTexture, 0);
// Generates the mipmap of the screen
device.ImmediateContext.GenerateMips(smallerTextureView);
// Copy the mipmap 1 of smallerTexture (size/2) to the staging texture
device.ImmediateContext.CopySubresourceRegion(smallerTexture, 1, null, stagingTexture, 0);
// Get the desktop capture texture
var mapSource = device.ImmediateContext.MapSubresource(stagingTexture, 0, MapMode.Read, MapFlags.None);
// Create Drawing.Bitmap
var bitmap = new System.Drawing.Bitmap(width/2, height/2, PixelFormat.Format32bppArgb);
var boundsRect = new System.Drawing.Rectangle(0, 0, width/2, height/2);
// Copy pixels from screen capture Texture to GDI bitmap
var mapDest = bitmap.LockBits(boundsRect, ImageLockMode.WriteOnly, bitmap.PixelFormat);
var sourcePtr = mapSource.DataPointer;
var destPtr = mapDest.Scan0;
for (int y = 0; y < height/2; y++)
{
// Copy a single line
Utilities.CopyMemory(destPtr, sourcePtr, width/2 * 4);
// Advance pointers
sourcePtr = IntPtr.Add(sourcePtr, mapSource.RowPitch);
destPtr = IntPtr.Add(destPtr, mapDest.Stride);
}
// Release source and dest locks
bitmap.UnlockBits(mapDest);
device.ImmediateContext.UnmapSubresource(stagingTexture, 0);
// Save the output
bitmap.Save(outputFileName);
// Capture done
captureDone = true;
}
screenResource.Dispose();
duplicatedOutput.ReleaseFrame();
}
catch (SharpDXException e)
{
if (e.ResultCode.Code != SharpDX.DXGI.ResultCode.WaitTimeout.Result.Code)
{
throw e;
}
}
}
// Display the texture using system associated viewer
System.Diagnostics.Process.Start(Path.GetFullPath(Path.Combine(Environment.CurrentDirectory, outputFileName)));
// TODO: We should cleanp up all allocated COM objects here
}
You need to take your original source surface in GPU memory and Draw() it on to a smaller surface. This involves simple vector/pixel shaders, which some folks with simple needs would rather bypass.
I would look to see if someone made a sprite lib for sharpdx. It should be a common "thing"...or using Direct2D (which is much more fun). Since D2D is just a user-mode library over D3D, it interops with D3D very easily.
I've never used SharpDx, but fFrom memory you would do something like this:
1.) Create an ID2D1Device, wrapping your existing DXGI Device (make sure your dxgi device creation flag has D3D11_CREATE_DEVICE_BGRA_SUPPORT)
2.) Get the ID2D1DeviceContext from your ID2D1Device
3.) Wrap your source and destination DXGI surfaces into D2D bitmaps with ID2D1DeviceContext::CreateBitmapFromDxgiSurface
4.) ID2D1DeviceContext::SetTarget of your destination surface
5.) BeginDraw, ID2D1DeviceContext::DrawBitmap, passing your source D2D bitmap. EndDraw
6.) Save your destination
Here is a pixelate example...
d2d_device_context_h()->BeginDraw();
d2d_device_context_h()->SetTarget(mp_ppBitmap1.Get());
D2D1_SIZE_F rtSize = mp_ppBitmap1->GetSize();
rtSize.height *= (1.0f / cbpx.iPixelsize.y);
rtSize.width *= (1.0f / cbpx.iPixelsize.x);
D2D1_RECT_F rtRect = { 0.0f, 0.0f, rtSize.width, rtSize.height };
D2D1_SIZE_F rsSize = mp_ppBitmap0->GetSize();
D2D1_RECT_F rsRect = { 0.0f, 0.0f, rsSize.width, rsSize.height };
d2d_device_context_h()->DrawBitmap(mp_ppBitmap0.Get(), &rtRect, 1.0f,
D2D1_BITMAP_INTERPOLATION_MODE_LINEAR, &rsRect);
d2d_device_context_h()->SetTarget(mp_ppBitmap0.Get());
d2d_device_context_h()->DrawBitmap(mp_ppBitmap1.Get(), &rsRect, 1.0f,
D2D1_BITMAP_INTERPOLATION_MODE_NEAREST_NEIGHBOR, &rtRect);
d2d_device_context_h()->EndDraw();
Where iPixelsize.xy is the size of the "pixelated pixel", note that i just use linear interpolation when shrinking the bmp and NOT when i reenlarge. This will generate a pixelation effect.

How to create bitmap from Surface (SharpDX)

I am new to DirectX and trying to use SharpDX to capture a screen shot using the Desktop Duplication API.
I am wondering if there is any easy way to create bitmap that I can use in CPU (i.e. save on file, etc.)
I am using the following code the get the desktop screen shot:
var factory = new SharpDX.DXGI.Factory1();
var adapter = factory.Adapters1[0];
var output = adapter.Outputs[0];
var device = new SharpDX.Direct3D11.Device(SharpDX.Direct3D.DriverType.Hardware,
DeviceCreationFlags.BgraSupport |
DeviceCreationFlags.Debug);
var dev1 = device.QueryInterface<SharpDX.DXGI.Device1>();
var output1 = output.QueryInterface<Output1>();
var duplication = output1.DuplicateOutput(dev1);
OutputDuplicateFrameInformation frameInfo;
SharpDX.DXGI.Resource desktopResource;
duplication.AcquireNextFrame(50, out frameInfo, out desktopResource);
var desktopSurface = desktopResource.QueryInterface<Surface>();
can anyone please give me some idea on how can I create a bitmap object from the desktopSurface (DXGI.Surface instance)?
I've just completed this myself although I am not going to say much about this code!
public byte[] GetScreenData()
{
// We want to copy the texture from the back buffer so
// we don't hog it.
Texture2DDescription desc = BackBuffer.Description;
desc.CpuAccessFlags = CpuAccessFlags.Read;
desc.Usage = ResourceUsage.Staging;
desc.OptionFlags = ResourceOptionFlags.None;
desc.BindFlags = BindFlags.None;
byte[] data = null;
using (var texture = new Texture2D(DeviceDirect3D, desc))
{
DeviceContextDirect3D.CopyResource(BackBuffer, texture);
using (Surface surface = texture.QueryInterface<Surface>())
{
DataStream dataStream;
var map = surface.Map(SharpDX.DXGI.MapFlags.Read, out dataStream);
int lines = (int)(dataStream.Length / map.Pitch);
data = new byte[surface.Description.Width * surface.Description.Height * 4];
int dataCounter = 0;
// width of the surface - 4 bytes per pixel.
int actualWidth = surface.Description.Width * 4;
for (int y = 0; y < lines; y++)
{
for (int x = 0; x < map.Pitch; x++)
{
if (x < actualWidth)
{
data[dataCounter++] = dataStream.Read<byte>();
}
else
{
dataStream.Read<byte>();
}
}
}
dataStream.Dispose();
surface.Unmap();
}
}
return data;
}
This will get you a byte[] which can then be used to generate a bitmap.
The following is how I saved to a png Image.
using (var stream = await file.OpenAsync( Windows.Storage.FileAccessMode.ReadWrite ))
{
BitmapEncoder encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.PngEncoderId, stream);
double dpi = DisplayProperties.LogicalDpi;
encoder.SetPixelData(BitmapPixelFormat.Bgra8, BitmapAlphaMode.Straight,
(uint)width, (uint)height, dpi, dpi, pixelData);
encoder.BitmapTransform.ScaledWidth = (uint)newWidth;
encoder.BitmapTransform.ScaledHeight = (uint)newHeight;
await encoder.FlushAsync();
waiter.Set();
}
I know this was answered a while ago, and maybe you figured it out by now :3 but if someone else gets stuck I hope this helps!
The MSDN page for the Desktop Duplication API tells us the format of the image:
DXGI provides a surface that contains a current desktop image through the new IDXGIOutputDuplication::AcquireNextFrame method. The format of the desktop image is always DXGI_FORMAT_B8G8R8A8_UNORM no matter what the current display mode is.
You can use the Surface.Map(MapFlags, out DataStream) method get access to the data on the CPU.
The code should look like* this:
DataStream dataStream;
desktopSurface.Map(MapFlags.Read, out dataStream);
for(int y = 0; y < surface.Description.Width; y++) {
for(int x = 0; x < surface.Description.Height; x++) {
// read DXGI_FORMAT_B8G8R8A8_UNORM pixel:
byte b = dataStream.Read<byte>();
byte g = dataStream.Read<byte>();
byte r = dataStream.Read<byte>();
byte a = dataStream.Read<byte>();
// color (r, g, b, a) and pixel position (x, y) are available
// TODO: write to bitmap or process otherwise
}
}
desktopSurface.Unmap();
*Disclaimer: I don't have a Windows 8 installation at hand, I'm only following the documentation. I hope this works :)

Smart Centering and Scaling after Model Import in three.js

Is there a way to determine the size and position of a model and then auto-center and scale the model so that it is positioned at the origin and within the view of the camera? I find that when I import a Collada model from Sketchup, if the model was not centered at the origin in Sketchup, then it is not centered in three.js. While that makes sense, it would be nice to auto-center to origin after importing.
I've seen some discussion in the different file loaders about getting the bounds of the imported model, but I have been unable to find any references to how to do that.
The scaling issue is less important, but I feel like it relates to a bounds function, which is why I asked it too.
EDIT:
More info after playing around a bit and a few more google searches...
The code for my callback function on loading the collada file now looks like this:
loader.load(mURL, function colladaReady( collada ) {
dae = collada.scene;
skin = collada.skins[ 0 ];
dae.scale.x = dae.scale.y = dae.scale.z = 1;
dae.updateMatrix();
//set arbitrary min and max for comparison
var minX = 100000;
var minY = 100000;
var minZ = 100000;
var maxX = 0;
var maxY = 0;
var maxZ = 0;
var geometries = collada.dae.geometries;
for(var propName in geometries){
if(geometries.hasOwnProperty(propName) && geometries[propName].mesh){
dae.geometry = geometries[propName].mesh.geometry3js;
dae.geometry.computeBoundingBox();
bBox = dae.geometry.boundingBox;
if(bBox.min.x < minX) minX = bBox.min.x;
if(bBox.min.y < minY) minY = bBox.min.x;
if(bBox.min.z < minZ) minZ = bBox.min.z;
if(bBox.max.x > maxX) maxX = bBox.max.x;
if(bBox.max.y > maxY) maxY = bBox.max.x;
if(bBox.max.z > maxZ) maxZ = bBox.max.z;
}
}
//rest of function....
This is generating some interesting data about the model. I can get an overall extreme coordinate for the model, which I'm assuming (probably incorrectly) would be close to an overall bounding box for the model. But trying to do anything with those coordinates (like averaging and moving the model to the averages) generates inconsistent results.
Also, it seems inefficient to have to loop through every geometry for a model, is there a better way? If not, can this logic be applied to other loaders?
You can use THREE.Box3#setFromObject to get the bounding box of any Object3D, including an imported model, without having to loop through the geometries yourself. So you could do something like
var bBox = new THREE.Box3().setFromObject(collada.scene);
to get the extreme bounding box of the model; then you could use any of the techniques in the answers that gaitat linked in order to set the camera position correctly. For instance, you could follow this technique (How to Fit Camera to Object) and do something like:
var height = bBox.size().y;
var dist = height / (2 * Math.tan(camera.fov * Math.PI / 360));
var pos = collada.scene.position;
camera.position.set(pos.x, pos.y, dist * 1.1); // fudge factor so you can see the boundaries
camera.lookAt(pos);
Quick fiddle: http://jsfiddle.net/p19r9re2/ .
try geometry.center()
center: function () {
var offset = new Vector3();
return function center() {
this.computeBoundingBox();
this.boundingBox.getCenter( offset ).negate();
this.translate( offset.x, offset.y, offset.z );
return this;
};
}(),

Resources