embree detecting a false contact between a triangulo and a segment - computational-geometry

I have a triangle and a segment which, according to embree, they are colliding. However, when I draw this elements, it is clear they don't collide.
In the image, the dots on the triangle and the segment are the intersection points, computed using the barycentric coordinates and the intersection distance given by embree.
I have also tested with CGAL if the elements collide, and CGAL doesn't detect any intersection.
I have absolutely no clue on what might be happening. I have been able to create a minimal reproducible example which shows the behavior I am describing
#include <embree3/rtcore_common.h>
#include <tuple>
#include <embree3/rtcore_device.h>
#include <embree3/rtcore_geometry.h>
#include <embree3/rtcore_ray.h>
#include <embree3/rtcore_scene.h>
#include <iostream>
// To get the points exactly as we saw them on the error
float dec(int x) {
struct U {
union {
int i;
float f;
};
};
U u;
u.i = x;
return u.f;
}
int main() {
// Data to reproduce the issue
auto [v0x, v0y, v0z] =
std::make_tuple(dec(0x3e6a2c13), dec(0x3fad24ce), dec(0x3ecc9e88));
auto [v1x, v1y, v1z] =
std::make_tuple(dec(0x3e6e35dd), dec(0x3fad7b6a), dec(0x3eca0623));
auto [v2x, v2y, v2z] =
std::make_tuple(dec(0x3e692c70), dec(0x3fad7b6a), dec(0x3eca0623));
RTCRay ray;
ray.org_x = dec(0x3e708b43);
ray.org_y = dec(0x3fad42a5);
ray.org_z = dec(0x3ecbb9a0);
ray.tfar = dec(0x3b9631f5);
ray.dir_x = dec(0x3f137064);
ray.dir_y = dec(0x3ec18586);
ray.dir_z = dec(0xbf399122);
ray.tnear = 0.0F;
ray.mask = 0;
ray.time = 0.0F;
ray.flags = 0;
// Create device and scene
auto device = rtcNewDevice("");
auto scene = rtcNewScene(device);
rtcSetSceneFlags(scene, RTC_SCENE_FLAG_DYNAMIC |
RTC_SCENE_FLAG_CONTEXT_FILTER_FUNCTION);
rtcSetSceneBuildQuality(scene, RTC_BUILD_QUALITY_LOW);
// Create geometry
auto geometry = rtcNewGeometry(device, RTC_GEOMETRY_TYPE_TRIANGLE);
rtcSetGeometryBuildQuality(geometry, RTC_BUILD_QUALITY_REFIT);
auto geomId = rtcAttachGeometry(scene, geometry);
auto *vb = static_cast<float *>(
rtcSetNewGeometryBuffer(geometry, RTC_BUFFER_TYPE_VERTEX, 0,
RTC_FORMAT_FLOAT3, 4 * sizeof(float), 12));
vb[4 * 0 + 0] = v0x;
vb[4 * 0 + 1] = v0y;
vb[4 * 0 + 2] = v0z;
vb[4 * 1 + 0] = v1x;
vb[4 * 1 + 1] = v1y;
vb[4 * 1 + 2] = v1z;
vb[4 * 2 + 0] = v2x;
vb[4 * 2 + 1] = v2y;
vb[4 * 2 + 2] = v2z;
auto *ib = static_cast<int *>(
rtcSetNewGeometryBuffer(geometry, RTC_BUFFER_TYPE_INDEX, 0,
RTC_FORMAT_UINT3, 3 * sizeof(int), 3));
ib[0] = 0;
ib[1] = 1;
ib[2] = 2;
rtcCommitGeometry(geometry);
rtcCommitScene(scene);
// Perform the query
RTCIntersectContext context{};
rtcInitIntersectContext(&context);
context.flags = RTC_INTERSECT_CONTEXT_FLAG_INCOHERENT;
rtcOccluded1(scene, &context, &ray);
// Same behavior with rtcIntersect1
std::cout << ray.tfar << std::endl; // -inf here means an intersection has been found
}
I have uploaded the example to gitlab in case someone wats to test it (https://gitlab.com/juanjo.casafranca/embree_issue)

Related

Why is chrominance lost when i copy DXGI_FORMAT_NV12 ID3D11Texture from a d3d11device to a d3d11on12device?

D3D11_TEXTURE2D_DESC texture_desc = {0};
texture_desc.Width = 640;
texture_desc.Height = 480;
texture_desc.MipLevels = 1;
texture_desc.Format = DXGI_FORMAT_NV12;
texture_desc.SampleDesc.Count = 1;
texture_desc.ArraySize = 1;
texture_desc.Usage = D3D11_USAGE_DEFAULT;
texture_desc.MiscFlags = D3D11_RESOURCE_MISC_SHARED;
Microsoft::WRL::ComPtr<ID3D11Texture2D> temp_texture_for_my_device{nullptr};
my_device->CreateTexture2D(&texture_desc, NULL, &temp_texture_for_my_device);
Microsoft::WRL::ComPtr<IDXGIResource> dxgi_resource{nullptr};
temp_texture_for_my_device.As(&dxgi_resource);
HANDLE shared_handle = NULL;
dxgi_resource->GetSharedHandle(&shared_handle);
dxgi_resource->Release();
Microsoft::WRL::ComPtr<ID3D11Texture2D> temp_texture_for_ffmpeg_device {nullptr};
ffmpeg_device->OpenSharedResource(shared_handle, __uuidof(ID3D11Texture2D), (void**)temp_texture_for_ffmpeg_device.GetAddressOf());
ffmpeg_device_context->CopySubresourceRegion(temp_texture_for_ffmpeg_device.Get(), 0, 0, 0, 0, (ID3D11Texture2D*)ffmpeg_avframe->data[0], (int)ffmpeg_avframe->data[1], NULL);
ffmpeg_device_context->Flush();
I copy temp_texture_for_ffmpeg_device to a D3D11_USAGE_STAGING, it's normal, but when i copy temp_texture_for_my_device to a D3D11_USAGE_STAGING, i lost the chrominance data.
When i map the texture to cpu via D3D11_USAGE_STAGING:
temp_texture_for_ffmpeg_device : RowPitch is 768, DepthPitch is 768 * 720;
temp_texture_for_my_device : RowPitch is 1024, DepthPitch is 1024 * 480;
I think there are some different parameters between the two devices(or device context?), but I don't know what parameters would cause such a difference
my_device and my_device_context are created by D3D11On12CreateDevice
The DirectX Video formats are planar, meaning that each component is contiguous in memory rather than being interleaved like most formats. For DirectX 12, this is explicitly exposed in the layout information which you can obtain via D3D12GetFormatPlaneCount.
Here's a template that works with D3D12_SUBRESOURCE_DATA and D3D12_MEMCPY_DEST. Here the SlicePitch is set to the size of an individual plane.
template<typename T, typename PT> void AdjustPlaneResource(
_In_ DXGI_FORMAT fmt,
_In_ size_t height,
_In_ size_t slicePlane,
_Inout_ T& res) noexcept
{
switch (static_cast<int>(fmt))
{
case DXGI_FORMAT_NV12:
case DXGI_FORMAT_P010:
case DXGI_FORMAT_P016:
if (!slicePlane)
{
// Plane 0
res.SlicePitch = res.RowPitch * static_cast<PT>(height);
}
else
{
// Plane 1
res.pData = const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(res.pData) + res.RowPitch * PT(height));
res.SlicePitch = res.RowPitch * static_cast<PT>((height + 1) >> 1);
}
break;
case DXGI_FORMAT_NV11:
if (!slicePlane)
{
// Plane 0
res.SlicePitch = res.RowPitch * static_cast<PT>(height);
}
else
{
// Plane 1
res.pData = const_cast<uint8_t*>(reinterpret_cast<const uint8_t*>(res.pData) + res.RowPitch * PT(height));
res.RowPitch = (res.RowPitch >> 1);
res.SlicePitch = res.RowPitch * static_cast<PT>(height);
}
break;
}
}
For DirectX 11, the extra planar information has to be assumed as it's not directly exposed by the API as such. You have to compute the extra space required for the additional plane(s). Here's a snippet from DirectXTex. In this case slice is the total size of all the planes in one 'slice' of the resource.
case DXGI_FORMAT_R8G8_B8G8_UNORM:
case DXGI_FORMAT_G8R8_G8B8_UNORM:
case DXGI_FORMAT_YUY2:
assert(IsPacked(fmt));
pitch = ((uint64_t(width) + 1u) >> 1) * 4u;
slice = pitch * uint64_t(height);
break;
case DXGI_FORMAT_Y210:
case DXGI_FORMAT_Y216:
assert(IsPacked(fmt));
pitch = ((uint64_t(width) + 1u) >> 1) * 8u;
slice = pitch * uint64_t(height);
break;
case DXGI_FORMAT_NV12:
case DXGI_FORMAT_420_OPAQUE:
assert(IsPlanar(fmt));
pitch = ((uint64_t(width) + 1u) >> 1) * 2u;
slice = pitch * (uint64_t(height) + ((uint64_t(height) + 1u) >> 1));
break;
case DXGI_FORMAT_P010:
case DXGI_FORMAT_P016:
assert(IsPlanar(fmt));
pitch = ((uint64_t(width) + 1u) >> 1) * 4u;
slice = pitch * (uint64_t(height) + ((uint64_t(height) + 1u) >> 1));
break;
case DXGI_FORMAT_NV11:
assert(IsPlanar(fmt));
pitch = ((uint64_t(width) + 3u) >> 2) * 4u;
slice = pitch * uint64_t(height) * 2u;
break;
case DXGI_FORMAT_P208:
assert(IsPlanar(fmt));
pitch = ((uint64_t(width) + 1u) >> 1) * 2u;
slice = pitch * uint64_t(height) * 2u;
break;
case DXGI_FORMAT_V208:
assert(IsPlanar(fmt));
pitch = uint64_t(width);
slice = pitch * (uint64_t(height) + (((uint64_t(height) + 1u) >> 1) * 2u));
break;
case DXGI_FORMAT_V408:
assert(IsPlanar(fmt));
pitch = uint64_t(width);
slice = pitch * (uint64_t(height) + (uint64_t(height >> 1) * 4u));
break;

Mesh Simplification with Assimp and OpenMesh

For days ago, I ask a question on how to use the edge collapse with Assimp. Smooth the obj and remove duplicated vertices in software are sloved the basic problem that could make edge collapse work, I mean it work because it could be simplicated by MeshLab like this:
It looks good in MeshLab, but I then do it in my engine which used Assimp and OpenMesh. The problem is Assimp imported the specified vertices and Indices, that could let the halfedge miss the opposite pair (Is this called non-manifold?).
The result snapshot use OpenMesh's Quadric Decimation:
To clear to find the problem, I do it without decimation and parse the OpenMesh data structure back directly. Everything is work fine as expect (I mean the result without decimation).
The code that I used to decimate the mesh:
Loader::BasicData Loader::TestEdgeCollapse(float vertices[], int vertexLength, int indices[], int indexLength, float texCoords[], int texCoordLength, float normals[], int normalLength)
{
// Mesh type
typedef OpenMesh::TriMesh_ArrayKernelT<> OPMesh;
// Decimater type
typedef OpenMesh::Decimater::DecimaterT< OPMesh > OPDecimater;
// Decimation Module Handle type
typedef OpenMesh::Decimater::ModQuadricT< OPMesh >::Handle HModQuadric;
OPMesh mesh;
std::vector<OPMesh::VertexHandle> vhandles;
int iteration = 0;
for (int i = 0; i < vertexLength; i += 3)
{
vhandles.push_back(mesh.add_vertex(OpenMesh::Vec3f(vertices[i], vertices[i + 1], vertices[i + 2])));
if (texCoords != nullptr)
mesh.set_texcoord2D(vhandles.back(),OpenMesh::Vec2f(texCoords[iteration * 2], texCoords[iteration * 2 + 1]));
if (normals != nullptr)
mesh.set_normal(vhandles.back(), OpenMesh::Vec3f(normals[i], normals[i + 1], normals[i + 2]));
iteration++;
}
for (int i = 0; i < indexLength; i += 3)
mesh.add_face(vhandles[indices[i]], vhandles[indices[i + 1]], vhandles[indices[i + 2]]);
OPDecimater decimater(mesh);
HModQuadric hModQuadric;
decimater.add(hModQuadric);
decimater.module(hModQuadric).unset_max_err();
decimater.initialize();
//decimater.decimate(); // without this, everything is fine as expect.
mesh.garbage_collection();
int verticesSize = mesh.n_vertices() * 3;
float* newVertices = new float[verticesSize];
int indicesSize = mesh.n_faces() * 3;
int* newIndices = new int[indicesSize];
float* newTexCoords = nullptr;
int texCoordSize = mesh.n_vertices() * 2;
if(mesh.has_vertex_texcoords2D())
newTexCoords = new float[texCoordSize];
float* newNormals = nullptr;
int normalSize = mesh.n_vertices() * 3;
if(mesh.has_vertex_normals())
newNormals = new float[normalSize];
Loader::BasicData data;
int index = 0;
for (v_it = mesh.vertices_begin(); v_it != mesh.vertices_end(); ++v_it)
{
OpenMesh::Vec3f &point = mesh.point(*v_it);
newVertices[index * 3] = point[0];
newVertices[index * 3 + 1] = point[1];
newVertices[index * 3 + 2] = point[2];
if (mesh.has_vertex_texcoords2D())
{
auto &tex = mesh.texcoord2D(*v_it);
newTexCoords[index * 2] = tex[0];
newTexCoords[index * 2 + 1] = tex[1];
}
if (mesh.has_vertex_normals())
{
auto &normal = mesh.normal(*v_it);
newNormals[index * 3] = normal[0];
newNormals[index * 3 + 1] = normal[1];
newNormals[index * 3 + 2] = normal[2];
}
index++;
}
index = 0;
for (f_it = mesh.faces_begin(); f_it != mesh.faces_end(); ++f_it)
for (fv_it = mesh.fv_ccwiter(*f_it); fv_it.is_valid(); ++fv_it)
{
int id = fv_it->idx();
newIndices[index] = id;
index++;
}
data.Indices = newIndices;
data.IndicesLength = indicesSize;
data.Vertices = newVertices;
data.VerticesLength = verticesSize;
data.TexCoords = nullptr;
data.TexCoordLength = -1;
data.Normals = nullptr;
data.NormalLength = -1;
if (mesh.has_vertex_texcoords2D())
{
data.TexCoords = newTexCoords;
data.TexCoordLength = texCoordSize;
}
if (mesh.has_vertex_normals())
{
data.Normals = newNormals;
data.NormalLength = normalSize;
}
return data;
}
Also provide the tree obj I tested, and the face data that generated by Assimp, I fetch out from visual studio debugger, that shows the problem that some of the indices could not find the index pair.
Few weeks thinking about this and fails, I thought I want some Academic/Mathematical solution for automatically generating these decimated mesh, but now I'm trying to find the simple way to implement this, the way I am able to do is changing the structure for loading multi-object (file.obj) in single custom object (class obj), and switch the object when needed it. The benefit of this is I could manage what should present and ignore any algorithm problem.
By the way, I list some obstacles that push me back to simple way.
Assimp Unique Indices and Vertices, this is nothing wrong, but for the algorithm, no way to make the adjacency half-edge structure for this.
OpenMesh for reading only object file(*.obj), this could be done when using read_mesh function, but the disadvantage is the lack example of document and hard to using in my engine.
Write a custom 3d model importer for any format is hard.
In conclusion, there are two ways to make level of details work in engine, one is using the mesh simplication algorithm and more test to ensure quality, the other is just switch the 3dmodel that made by 3d software, It is not automatic but stable. I use the second method, and I show the result here :)
However, this is not a real solution with my question, so I won't assign me an answer.

How can I read/transform the range images of the stanford bunny .ply-files?

I want to read the not reconstructed data from the Stanford Bunny. The point data is stored as several range images, which have to be transformed to be combined to one big point cloud, like written in the README:
These data files were obtained with a Cyberware 3030MS optical
triangulation scanner. They are stored as range images in the "ply"
format. The ".conf" file contains the transformations required to
bring each range image into a single coordinate system.
This is the .conf-file:
camera -0.0172 -0.0936 -0.734 -0.0461723 0.970603 -0.235889 0.0124573
bmesh bun000.ply 0 0 0 0 0 0 1
bmesh bun045.ply -0.0520211 -0.000383981 -0.0109223 0.00548449 -0.294635 -0.0038555 0.955586
bmesh bun090.ply 2.20761e-05 -3.34606e-05 -7.20881e-05 0.000335889 -0.708202 0.000602459 0.706009
bmesh bun180.ply 0.000116991 2.47732e-05 -4.6283e-05 -0.00215148 0.999996 -0.0015001 0.000892527
bmesh bun270.ply 0.000130273 1.58623e-05 0.000406764 0.000462632 0.707006 -0.00333301 0.7072
bmesh top2.ply -0.0530127 0.138516 0.0990356 0.908911 -0.0569874 0.154429 0.383126
bmesh top3.ply -0.0277373 0.0583887 -0.0796939 0.0598923 0.670467 0.68082 -0.28874
bmesh bun315.ply -0.00646017 -1.36122e-05 -0.0129064 0.00449209 0.38422 -0.00976512 0.923179
bmesh chin.ply 0.00435102 0.0882863 -0.108853 -0.441019 0.213083 0.00705734 0.871807
bmesh ear_back.ply -0.0829384 0.0353082 0.0711536 0.111743 0.925689 -0.215443 -0.290169
For each range image seven values are stored. But I do not know, what information can be obtained from these values.
I guess that three of them will contain some information about the translation and maybe three contain information about the rotation. But I didn't find something about the order of these values and how to transform the values to get one point cloud.
The wiki page doesn't handle with range images and I found nothing more at the Stanford pages. They just talk about, that the method of Turk94 is used to scan this data set, but the method has no information about the transformations needed. (Or I was not able to get the information out of this paper.)
Does anybody know how to read these values correctly? Why is there a transformation for the camera position? Is this just a good initial value to view the whole point cloud?
Thanks for your help.
EDIT:
Ok. At this point, I already tried to read the data and to correctly transform them, but everything did not work. I use the boost library to handle with the quaternions
Here is my code for it:
boost::math::quaternion<double> translation, quaternionRotation;
//Get Transformation
translation = boost::math::quaternion<double>(0.0, lineData[2].toDouble(), lineData[3].toDouble(), lineData[4].toDouble());
quaternionRotation = boost::math::quaternion<double>(lineData[5].toDouble(),lineData[6].toDouble(),lineData[7].toDouble(),lineData[8].toDouble());
//do some file related stuff
//...
//for each line: read the point data and transform it and store the point in a data array
pointData[j].x = stringPointData[0].toDouble();
pointData[j].y = stringPointData[1].toDouble();
pointData[j].z = stringPointData[2].toDouble();
tmpQuat = boost::math::quaternion<double> (0.0,pointData[j].x,pointData[j].y,pointData[j].z);
//first translation
tmpQuat += translation;
//then quaternion rotation
tmpQuat = (quaternionRotation * (tmpQuat) * boost::math::conj(quaternionRotation));
//read the data from quaternion to a usual type
pointData[j].x = tmpQuat.R_component_2();
pointData[j].y = tmpQuat.R_component_3();
pointData[j].z = tmpQuat.R_component_4();
I assume that the first component of the quaternion is the w component and the others refers to x, y andz like in equation 2 from here. If necessary I can provide the screenshots of the false transformations.
EDIT: It is written in the source code of zipper in the file zipper.c, that the 7 values are saved as followed:
transX transY transZ quatX quatY quatZ quatW
The quaternion is then transformed into a rotation matrix and then the rotation is performed with this new matrix. But even with this information, I am not able to transform it correctly. To test it, I implemented the function quat_to_mat() from zipper in my project:
glm::dmat4 cPlyObjectLoader::quat_to_mat(boost::math::quaternion<double> quat) const
{
float s;
float xs,ys,zs;
float wx,wy,wz;
float xx,xy,xz;
float yy,yz,zz;
glm::dmat4 mat(1.0);
s = 2 / (quat.R_component_2()*quat.R_component_2() +
quat.R_component_3()*quat.R_component_3() +
quat.R_component_4()*quat.R_component_4() +
quat.R_component_1()*quat.R_component_1());
xs = quat.R_component_2() * s;
ys = quat.R_component_3() * s;
zs = quat.R_component_4() * s;
wx = quat.R_component_1() * xs;
wy = quat.R_component_1() * ys;
wz = quat.R_component_1() * zs;
xx = quat.R_component_2() * xs;
xy = quat.R_component_2() * ys;
xz = quat.R_component_2() * zs;
yy = quat.R_component_3() * ys;
yz = quat.R_component_3() * zs;
zz = quat.R_component_4() * zs;
mat[0][0] = 1 - (yy + zz);
mat[0][1] = xy - wz;
mat[0][2] = xz + wy;
mat[0][3] = 0;
mat[1][0] = xy + wz;
mat[1][1] = 1 - (xx + zz);
mat[1][2] = yz - wx;
mat[1][3] = 0;
mat[2][0] = xz - wy;
mat[2][1] = yz + wx;
mat[2][2] = 1 - (xx + yy);
mat[2][3] = 0;
mat[3][0] = 0;
mat[3][1] = 0;
mat[3][2] = 0;
mat[3][3] = 1;
return mat;
}
Now I am doing the translation and rotation with a vector and this matrix:
quaternionRotation = boost::math::quaternion<double>(lineData[8].toDouble(),lineData[5].toDouble(),lineData[6].toDouble(),lineData[7].toDouble());
rotationMat = this->quat_to_mat(quaternionRotation);
translationVec = glm::dvec4(lineData[2].toDouble(), lineData[3].toDouble(), lineData[4].toDouble(),0.0);
//same stuff as above
//...
glm::dvec4 curPoint = glm::dvec4(pointData[j].x,pointData[j].y,pointData[j].z,1.0);
curPoint += translationVec;
curPoint = rotationMat*curPoint;
The result is different to my quaternion rotation (Why? It should be the same.), but not correct.
Debug information:
the input of all transformations is correct
the input of all points is correct
As i read from stanford 3d scan
For all the Stanford models, alignment was done using a modified ICP
algorithm, as described in this paper. These alignments are stored in
".conf" files, which list each range image in the model along with a
translation and a quaternion rotation.
Here is the link to "this paper"
Edit: The two methods are called zippering and volmetric merging
As Ello mentioned, it is written at the stanford 3D repo:
For all the Stanford models, alignment was done using a modified ICP algorithm, as described in this paper. These alignments are stored in ".conf" files, which list each range image in the model along with a translation and a quaternion rotation.
But that is not enough to understand everything of this data file.
It is correct, that the first line:
camera -0.0172 -0.0936 -0.734 -0.0461723 0.970603 -0.235889 0.0124573
stores a good initial camera position and every other line starting with bmesh refers to a .ply-file, which stores a ranged image.
The transformation values are stored as followed:
transX transY transZ quatX quatY quatZ quatW
where trans... refers to a translation value and quat... refers to a value of the quaternion. Currently, I do not know, why it doesn't work with the quaternion rotation by itself, but by transforming it into a rotation matrix with the code of zipper the transformation is correct. Be aware, that the translation is stored first, but to get a correct transformation the rotation has to be done at the beginning and the translation afterwards.
My code snippet to read the files and transform it, is the following:
boost::math::quaternion<double> translation, quaternionRotation;
//Get Transformation
translationVec = glm::dvec4(lineData[2].toDouble(), lineData[3].toDouble(), lineData[4].toDouble(),0.0);
quaternionRotation = boost::math::quaternion<double>(lineData[8].toDouble(),lineData[5].toDouble(),lineData[6].toDouble(),lineData[7].toDouble());
//calculate the unit quaternion
double magnitude = std::sqrt(
quaternionRotation.R_component_1()*quaternionRotation.R_component_1()+
quaternionRotation.R_component_2()*quaternionRotation.R_component_2()+
quaternionRotation.R_component_3()*quaternionRotation.R_component_3()+
quaternionRotation.R_component_4()*quaternionRotation.R_component_4());
quaternionRotation /= magnitude;
rotationMat = this->quat_to_mat(quaternionRotation);
//do some file related stuff
//...
//for each line: read the point data and transform it and store the point in a data array
pointData[j].x = stringPointData[0].toDouble();
pointData[j].y = stringPointData[1].toDouble();
pointData[j].z = stringPointData[2].toDouble();
//transform the curren point
glm::dvec4 curPoint = glm::dvec4(pointData[j].x,pointData[j].y,pointData[j].z,1.0);
//first rotation
curPoint = rotationMat*curPoint;
//then translation
curPoint += translationVec;
//store the data in a data array
pointData[j].x = curPoint.x;
pointData[j].y = curPoint.y;
pointData[j].z = curPoint.z;
I know, that it's not the best one, but it works. Feel free to optimize it by yourself.
Here is the file converter that I wrote. It will assemble all the scans into a single file, one point per line. It supports different file formats (including Stanford .conf files).
#include <string>
#include <vector>
#include <sstream>
#include <iostream>
#include <stdio.h>
#include <ctype.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#ifndef M_PI
#define M_PI 3.14159265
#endif
class LineInput {
public:
LineInput(const std::string& filename) {
F_ = fopen(filename.c_str(), "r" ) ;
ok_ = (F_ != 0) ;
}
~LineInput() {
if(F_ != 0) {
fclose(F_); F_ = 0 ;
}
}
bool OK() const { return ok_ ; }
bool eof() const { return feof(F_) ; }
bool get_line() {
line_[0] = '\0' ;
// Skip the empty lines
while(!isprint(line_[0])) {
if(fgets(line_, MAX_LINE_LEN, F_) == 0) {
return false ;
}
}
// If the line ends with a backslash, append
// the next line to the current line.
bool check_multiline = true ;
int total_length = MAX_LINE_LEN ;
char* ptr = line_ ;
while(check_multiline) {
int L = strlen(ptr) ;
total_length -= L ;
ptr = ptr + L - 2;
if(*ptr == '\\' && total_length > 0) {
*ptr = ' ' ;
ptr++ ;
fgets(ptr, total_length, F_) ;
} else {
check_multiline = false ;
}
}
if(total_length < 0) {
std::cerr
<< "MultiLine longer than "
<< MAX_LINE_LEN << " bytes" << std::endl ;
}
return true ;
}
int nb_fields() const { return field_.size() ; }
char* field(int i) { return field_[i] ; }
int field_as_int(int i) {
int result ;
ok_ = ok_ && (sscanf(field(i), "%d", &result) == 1) ;
return result ;
}
double field_as_double(int i) {
double result ;
ok_ = ok_ && (sscanf(field(i), "%lf", &result) == 1) ;
return result ;
}
bool field_matches(int i, const char* s) {
return !strcmp(field(i), s) ;
}
void get_fields(const char* separators=" \t\r\n") {
field_.resize(0) ;
char* tok = strtok(line_,separators) ;
while(tok != 0) {
field_.push_back(tok) ;
tok = strtok(0,separators) ;
}
}
private:
enum { MAX_LINE_LEN = 65535 } ;
FILE* F_ ;
char line_[MAX_LINE_LEN] ;
std::vector<char*> field_ ;
bool ok_ ;
} ;
std::string to_string(int x, int mindigits) {
char buff[100] ;
sprintf(buff, "%03d", x) ;
return std::string(buff) ;
}
double M[4][4] ;
void transform(double* xyz) {
double xyzw[4] ;
for(unsigned int c=0; c<4; c++) {
xyzw[c] = M[3][c] ;
}
for(unsigned int j=0; j<4; j++) {
for(unsigned int i=0; i<3; i++) {
xyzw[j] += M[i][j] * xyz[i] ;
}
}
for(unsigned int c=0; c<3; c++) {
xyz[c] = xyzw[c] / xyzw[3] ;
}
}
bool read_frames_file(int no) {
std::string filename = "scan" + to_string(no,3) + ".frames" ;
std::cerr << "Reading frames from:" << filename << std::endl ;
LineInput in(filename) ;
if(!in.OK()) {
std::cerr << " ... not found" << std::endl ;
return false ;
}
while(!in.eof() && in.get_line()) {
in.get_fields() ;
if(in.nb_fields() == 17) {
int f = 0 ;
for(unsigned int i=0; i<4; i++) {
for(unsigned int j=0; j<4; j++) {
M[i][j] = in.field_as_double(f) ; f++ ;
}
}
}
}
return true ;
}
bool read_pose_file(int no) {
std::string filename = "scan" + to_string(no,3) + ".pose" ;
std::cerr << "Reading pose from:" << filename << std::endl ;
LineInput in(filename) ;
if(!in.OK()) {
std::cerr << " ... not found" << std::endl ;
return false ;
}
double xyz[3] ;
double euler[3] ;
in.get_line() ;
in.get_fields() ;
xyz[0] = in.field_as_double(0) ;
xyz[1] = in.field_as_double(1) ;
xyz[2] = in.field_as_double(2) ;
in.get_line() ;
in.get_fields() ;
euler[0] = in.field_as_double(0) * M_PI / 180.0 ;
euler[1] = in.field_as_double(1) * M_PI / 180.0 ;
euler[2] = in.field_as_double(2) * M_PI / 180.0 ;
double sx = sin(euler[0]);
double cx = cos(euler[0]);
double sy = sin(euler[1]);
double cy = cos(euler[1]);
double sz = sin(euler[2]);
double cz = cos(euler[2]);
M[0][0] = cy*cz;
M[0][1] = sx*sy*cz + cx*sz;
M[0][2] = -cx*sy*cz + sx*sz;
M[0][3] = 0.0;
M[1][0] = -cy*sz;
M[1][1] = -sx*sy*sz + cx*cz;
M[1][2] = cx*sy*sz + sx*cz;
M[1][3] = 0.0;
M[2][0] = sy;
M[2][1] = -sx*cy;
M[2][2] = cx*cy;
M[2][3] = 0.0;
M[3][0] = xyz[0];
M[3][1] = xyz[1];
M[3][2] = xyz[2];
M[3][3] = 1.0;
return true ;
}
void setup_transform_from_translation_and_quaternion(
double Tx, double Ty, double Tz,
double Qx, double Qy, double Qz, double Qw
) {
/* for unit q, just set s = 2 or set xs = Qx + Qx, etc. */
double s = 2.0 / (Qx*Qx + Qy*Qy + Qz*Qz + Qw*Qw);
double xs = Qx * s;
double ys = Qy * s;
double zs = Qz * s;
double wx = Qw * xs;
double wy = Qw * ys;
double wz = Qw * zs;
double xx = Qx * xs;
double xy = Qx * ys;
double xz = Qx * zs;
double yy = Qy * ys;
double yz = Qy * zs;
double zz = Qz * zs;
M[0][0] = 1.0 - (yy + zz);
M[0][1] = xy - wz;
M[0][2] = xz + wy;
M[0][3] = 0.0;
M[1][0] = xy + wz;
M[1][1] = 1 - (xx + zz);
M[1][2] = yz - wx;
M[1][3] = 0.0;
M[2][0] = xz - wy;
M[2][1] = yz + wx;
M[2][2] = 1 - (xx + yy);
M[2][3] = 0.0;
M[3][0] = Tx;
M[3][1] = Ty;
M[3][2] = Tz;
M[3][3] = 1.0;
}
bool read_points_file(int no) {
std::string filename = "scan" + to_string(no,3) + ".3d" ;
std::cerr << "Reading points from:" << filename << std::endl ;
LineInput in(filename) ;
if(!in.OK()) {
std::cerr << " ... not found" << std::endl ;
return false ;
}
while(!in.eof() && in.get_line()) {
in.get_fields() ;
double xyz[3] ;
if(in.nb_fields() >= 3) {
for(unsigned int c=0; c<3; c++) {
xyz[c] = in.field_as_double(c) ;
}
transform(xyz) ;
printf("%f %f %f\n",xyz[0],xyz[1],xyz[2]) ;
}
}
return true ;
}
/* only works for ASCII PLY files */
void read_ply_file(char* filename) {
std::cerr << "Reading points from:" << filename << std::endl;
LineInput in(filename) ;
if(!in.OK()) {
std::cerr << filename << ": could not open" << std::endl ;
return;
}
bool reading_vertices = false;
int nb_vertices = 0 ;
int nb_read_vertices = 0 ;
while(!in.eof() && in.get_line()) {
in.get_fields();
if(reading_vertices) {
double xyz[3] ;
for(unsigned int c=0; c<3; c++) {
xyz[c] = in.field_as_double(c) ;
}
transform(xyz) ;
printf("%f %f %f\n",xyz[0],xyz[1],xyz[2]) ;
++nb_read_vertices;
if(nb_read_vertices == nb_vertices) {
return;
}
} else if(
in.field_matches(0,"element") &&
in.field_matches(1,"vertex")
) {
nb_vertices = in.field_as_int(2);
} else if(in.field_matches(0,"end_header")) {
reading_vertices = true;
}
}
}
/* For Stanford scanning repository */
void read_conf_file(char* filename) {
LineInput in(filename) ;
if(!in.OK()) {
std::cerr << filename << ": could not open" << std::endl ;
return;
}
while(!in.eof() && in.get_line()) {
in.get_fields();
if(in.nb_fields() == 0) { continue ; }
if(in.field_matches(0,"bmesh")) {
char* filename = in.field(1);
// Translation vector
double Tx = in.field_as_double(2);
double Ty = in.field_as_double(3);
double Tz = in.field_as_double(4);
/// Quaternion
double Qx = in.field_as_double(5);
double Qy = in.field_as_double(6);
double Qz = in.field_as_double(7);
double Qw = in.field_as_double(8);
setup_transform_from_translation_and_quaternion(Tx,Ty,Tz,Qx,Qy,Qz,Qw);
read_ply_file(filename);
}
}
}
int main(int argc, char** argv) {
if(argc != 2) { return -1 ; }
if(strstr(argv[1],".conf")) {
read_conf_file(argv[1]);
} else {
int max_i = atoi(argv[1]) ;
for(int i=0; i<=max_i; i++) {
if(!read_frames_file(i)) {
read_pose_file(i) ;
}
read_points_file(i) ;
}
}
return 0 ;
}
Okay so here is my solution since none of the above worked for me (note this is in python using blender's bpy). It seems that I need to transpose the rotation part of my 4x4 transformation matrix (note I am using a standard way to convert quaternion to rotation matrix and not the one from zipper). Also note since I am using blender when importing or using any model it only stores the models local coordinates relative to the objects world transformation so you do not have to do this point = objWorld * point, it is blender specific.
#loop
for meshName, transform in zip(plyFile, transformations):
#Build Quaternion
#transform structure [x, y, z, qx, qy, qz, qw]
Rt = mathutils.Quaternion((transform[6], transform[3], transform[4], transform[5])).to_matrix().to_4x4()
Rt.normalize()
Rt.transpose()
Rt[0][3] = transform[0]
Rt[1][3] = transform[1]
Rt[2][3] = transform[2]
bpy.ops.object.select_all(action='DESELECT')
#import the ply mesh into blender
bpy.ops.import_mesh.ply(filepath=baseDir + meshName)
#get the ply object
obj = bpy.context.object
#get objects world matrix
objWorld = obj.matrix_world
for index in range(len(obj.data.vertices)):
#get local point
point = mathutils.Vector([obj.data.vertices[index].co[0],obj.data.vertices[index].co[1], obj.data.vertices[index].co[2], 1.])
#convert local point to world
point = objWorld * point
#apply ply transformation
point = Rt * point
#update the point in the mesh
obj.data.vertices[index].co[0] = point[0]
obj.data.vertices[index].co[1] = point[1]
obj.data.vertices[index].co[2] = point[2]
#all vertex positions should be updated correctly
As mentioned in other answers, the Stanford 3D repository gives some info about the data organization in the '.conf' files but, the transformation for the bunny model were not working properly when using the quaternion data provided.
I was also stuck in this registration problem for the bunny model, and based on my tests I have some extra considerations to add up. When applying the transformation - rotations to be more specific - I have realized that quaternion values were not rotating the cloud in the correct direction but, when using the corresponding Euler notation, by changing the sign of one specific axis of rotation, I got the correct registration. So, back to the quaternion notation used in the '.conf' file, after some tests I have noticed that by changing the sign of the 'w' component in the quaternion, in each 'bmesh' row, but the first (bun000.ply), the rotation by quaternion can be used.
Furthermore, for some reason, when registering the dragon (dragon_stand and dragon_side) and armadillo (armadillo_stand) stanford point clouds, in order to get the correct result I had to use a different sequence for reading the quaternion data in the ‘.conf’ file. It seems to be stored as:
tx ty tz qw qx qy qz
where 't' refers to a translation value and 'q' refers to a quaternion value. Just to be clear, I have just tested these three models, therefore, I don’t know what is the default pattern for the quaternion values. Besides, for these last two point cloud models, I did not need to change the '.conf' file.
I hope this could be useful for someone else trying to do the same
Just in case someone is looking for a full python implementation on the basis of what #DanceIgel found out, here is some code in python 3.9.1, also generating a figure in mathplotlib:
# Python 3.9.1
import numpy as np
import sys
import math
import glob
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import open3d as o3d
def get_pointcloud_files(path):
files = list()
for f in glob.glob(path + '/*.ply'):
files.append(f)
return files
def get_pointcloud_from_file(path, filename):
cloud = o3d.io.read_point_cloud(path + '/' + filename)
return cloud
def get_transformations_from_file(path, filename):
with open(path + '/' + filename) as f:
lines = (line for line in f)
source = np.loadtxt(lines, delimiter=' ', skiprows=1, dtype='str')
source = np.delete(source, 0, 1) #remove camera
filenames = source[:,0]
source = source[filenames.argsort()]
filenames = np.sort(filenames)
translations = list()
for row in source[:,1:4]:
translations.append(np.reshape(row, [3,1]).astype(np.float32))
quaternions = list()
for row in source[:,4:]:
quaternions.append(np.reshape(row, [4,1]).astype(np.float32))
return filenames, translations, quaternions
def quaternion_rotation_matrix(Q):
# Extract the values from Q
q0 = Q[3]
q1 = Q[0]
q2 = Q[1]
q3 = Q[2]
# calculate unit quarternion
magnitude = math.sqrt(q0*q0 + q1*q1 + q2*q2 + q3*q3)
q0 = q0 / magnitude
q1 = q1 / magnitude
q2 = q2 / magnitude
q3 = q3 / magnitude
# First row of the rotation matrix
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
# Second row of the rotation matrix
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
# Third row of the rotation matrix
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
# 3x3 rotation matrix
rot_matrix = np.array([[r00, r01, r02],
[r10, r11, r12],
[r20, r21, r22]])
rot_matrix = np.transpose(rot_matrix)
return rot_matrix
if __name__=="__main__": # $python visualization_bunny.py bunny/data
path = sys.argv[1]
# load transformations and filenames from file
filenames, translations, quaternions = get_transformations_from_file(path, 'bun.conf')
curr_transformation = np.zeros([3,4])
clouds = list()
for curr_filename, curr_quaternion, curr_translation in zip(filenames, quaternions, translations): # go through input files
curr_cloud = get_pointcloud_from_file(path, curr_filename)
# convert cloud to numpy
curr_cloud = np.asarray(curr_cloud.points)
# compute rotation matrix from quaternions
curr_rotation_matr = quaternion_rotation_matrix(curr_quaternion)
curr_rotation_matr = np.squeeze(curr_rotation_matr)
curr_translation = np.squeeze(curr_translation)
# create transformation matrix
curr_transformation[:,0:3] = curr_rotation_matr
curr_transformation[:,3] = curr_translation
# transform current cloud
for i in range(curr_cloud.shape[0]):
# apply rotation
curr_point = np.matmul(curr_rotation_matr, np.transpose(curr_cloud[i,:]))
# apply translation
curr_point = curr_point + curr_translation
curr_cloud[i,0] = curr_point[0]
curr_cloud[i,1] = curr_point[1]
curr_cloud[i,2] = curr_point[2]
# add current cloud to list of clouds
clouds.append(curr_cloud)
#plot separate point clouds in same graph
ax = plt.axes(projection='3d')
for cloud in clouds:
ax.plot(cloud[:,0], cloud[:,1], cloud[:,2], 'bo', markersize=0.005)
#ax.view_init(elev=90, azim=270)
ax.view_init(elev=100, azim=270)
plt.axis('off')
plt.savefig("ZZZ_Stanford_Bunny_PointCloud.png", bbox_inches='tight')
plt.show()

Is this part of a real IFFT process really optimal?

When calculating (I)FFT it is possible to calculate "N*2 real" data points using a ordinary complex (I)FFT of N data points.
Not sure about my terminology here, but this is how I've read it described.
There are several posts about this on stackoverflow already.
This can speed things up a bit when only dealing with such "real" data which is often the case when dealing with for example sound (re-)synthesis.
This increase in speed is offset by the need for a pre-processing step that somehow... uhh... fidaddles? the data to achieve this. Look I'm not even going to try to convince anyone I fully understand this but thanks to previously mentioned threads, I came up with the following routine, which does the job nicely (thank you!).
However, on my microcontroller this costs a bit more than I'd like even though trigonometric functions are already optimized with LUTs.
But the routine itself just looks like it should be possible to optimize mathematically to minimize processing. To me it seems similar to plain 2d rotation. I just can't quite wrap my head around it, but it just feels like this could be done with fewer both trigonometric calls and arithmetic operations.
I was hoping perhaps someone else might easily see what I don't and provide some insight into how this math may be simplified.
This particular routine is for use with IFFT, before the bit-reversal stage.
pseudo-version:
INPUT
MAG_A/B = 0 TO 1
PHA_A/B = 0 TO 2PI
INDEX = 0 TO PI/2
r = MAG_A * sin(PHA_A)
i = MAG_B * sin(PHA_B)
rsum = r + i
rdif = r - i
r = MAG_A * cos(PHA_A)
i = MAG_B * cos(PHA_B)
isum = r + i
idif = r - i
r = -cos(INDEX)
i = -sin(INDEX)
rtmp = r * isum + i * rdif
itmp = i * isum - r * rdif
OUTPUT rsum + rtmp
OUTPUT itmp + idif
OUTPUT rsum - rtmp
OUTPUT itmp - idif
original working code, if that's your poison:
void fft_nz_set(fft_complex_t complex[], unsigned bits, unsigned index, int32_t mag_lo, int32_t pha_lo, int32_t mag_hi, int32_t pha_hi) {
unsigned size = 1 << bits;
unsigned shift = SINE_TABLE_BITS - (bits - 1);
unsigned n = index; // index for mag_lo, pha_lo
unsigned z = size - index; // index for mag_hi, pha_hi
int32_t rsum, rdif, isum, idif, r, i;
r = smmulr(mag_lo, sine(pha_lo)); // mag_lo * sin(pha_lo)
i = smmulr(mag_hi, sine(pha_hi)); // mag_hi * sin(pha_hi)
rsum = r + i; rdif = r - i;
r = smmulr(mag_lo, cosine(pha_lo)); // mag_lo * cos(pha_lo)
i = smmulr(mag_hi, cosine(pha_hi)); // mag_hi * cos(pha_hi)
isum = r + i; idif = r - i;
r = -sinetable[(1 << SINE_BITS) - (index << shift)]; // cos(pi_c * (index / size) / 2)
i = -sinetable[index << shift]; // sin(pi_c * (index / size) / 2)
int32_t rtmp = smmlar(r, isum, smmulr(i, rdif)) << 1; // r * isum + i * rdif
int32_t itmp = smmlsr(i, isum, smmulr(r, rdif)) << 1; // i * isum - r * rdif
complex[n].r = rsum + rtmp;
complex[n].i = itmp + idif;
complex[z].r = rsum - rtmp;
complex[z].i = itmp - idif;
}
// For reference, this would be used as follows to generate a sawtooth (after IFFT)
void synth_sawtooth(fft_complex_t *complex, unsigned fft_bits) {
unsigned fft_size = 1 << fft_bits;
fft_sym_dc(complex, 0, 0); // sets dc bin [0]
for(unsigned n = 1, z = fft_size - 1; n <= fft_size >> 1; n++, z--) {
// calculation of amplitude/index (sawtooth) for both n and z
fft_sym_magnitude(complex, fft_bits, n, 0x4000000 / n, 0x4000000 / z);
}
}

Julia Set - Cuda , improve the performance failed

Recently I am learning the examples in the book CUDA by JASON SANDERS.
the example of Juila Set makes a bad performance of 7032ms.
Here is the program:
#include <cuda.h>
#include <cuda_runtime.h>
#include <cpu_bitmap.h>
#include <book.h>
#define DIM 1024
struct cuComplex{
float r;
float i;
__device__ cuComplex(float a, float b) : r(a),i(b){
}
__device__ float magnitude2(void){
return r*r+i*i;
}
__device__ cuComplex operator *(const cuComplex& a){
return cuComplex(r*a.r-i*a.i, i*a.r+r*a.i);
}
__device__ cuComplex operator +(const cuComplex& a){
return cuComplex(r+a.r,i+a.i);
}
};
__device__ int julia(int x,int y){
const float scale = 1.5;
float jx = scale * (float)(DIM/2 - x)/(DIM/2);
float jy = scale * (float)(DIM/2 - y)/(DIM/2);
cuComplex c(-0.8,0.156);
cuComplex a(jx,jy);
int i = 0;
for(i = 0; i<200; i++){
a = a*a + c;
if(a.magnitude2() > 1000){
return 0;
}
}
return 1;
}
__global__ void kernel(unsigned char *ptr){
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y*gridDim.x;
int juliaValue = julia(x,y);
ptr[offset*4 + 0] = 255*juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 1;
ptr[offset*4 + 3] = 255;
}
int main(void){
CPUBitmap bitmap(DIM,DIM);
unsigned char * dev_bitmap;
dim3 grid(DIM,DIM);
dim3 blocks(DIM/16,DIM/16);
dim3 threads(16,16);
dim3 thread(DIM,DIM);
cudaEvent_t start,stop;
cudaEvent_t bitmapCpy_start,bitmapCpy_stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventCreate(&bitmapCpy_start));
HANDLE_ERROR(cudaEventCreate(&bitmapCpy_stop));
HANDLE_ERROR(cudaMalloc((void **)&dev_bitmap,bitmap.image_size()));
HANDLE_ERROR(cudaEventRecord(start,0));
kernel<<<grid,1>>>(dev_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(),dev_bitmap,bitmap.image_size(),cudaMemcpyDeviceToHost));
//HANDLE_ERROR(cudaEventRecord(bitmapCpy_stop,0));
//HANDLE_ERROR(cudaEventSynchronize(bitmapCpy_stop));
// float copyTime;
// HANDLE_ERROR(cudaEventElapsedTime(&copyTime,bitmapCpy_start,bitmapCpy_stop));
HANDLE_ERROR(cudaEventRecord(stop,0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float elapsedTime;
HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime,start,stop));
//printf("Total time is %3.1f ms, time for copying is %3.1f ms \n",elapsedTime,copyTime);
printf("Total time is %3.1f ms\n",elapsedTime);
bitmap.display_and_exit();
HANDLE_ERROR(cudaEventDestroy(start));
HANDLE_ERROR(cudaEventDestroy(stop));
HANDLE_ERROR(cudaEventDestroy(bitmapCpy_start));
HANDLE_ERROR(cudaEventDestroy(bitmapCpy_stop));
HANDLE_ERROR(cudaFree(dev_bitmap));
}
I think the main factor that influences the performance is that the program above just run 1 thread in every block:
kernel<<<grid,1>>>(dev_bitmap);
so I change the kernel like the following:
__global__ void kernel(unsigned char *ptr){
int x = threadIdx.x + blockIdx.x*blockDim.x;
int y = threadIdx.y + blockIdx.y*blockDim.y;
int offset = x + y*gridDim.x*blockIdx.x;
int juliaValue = julia(x,y);
ptr[offset*4 + 0] = 255*juliaValue;
ptr[offset*4 + 1] = 0;
ptr[offset*4 + 2] = 1;
ptr[offset*4 + 3] = 255;
}
and call kernel:
dim3 blocks(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<blocks,threads>>>(dev_bitmap);
I think this change is not a big deal, but when I ran it, it acted like that it ran into some endless loops, no image appeared and I couldn't do anything with my screen, just blocked there.
toolkit: cuda 5.5
system: ubuntu 12.04
When I run the original code you have posted here, I get a correct display and a time of ~340ms.
When I make your kernel change, I get an "unspecified launch error" on the kernel launch.
In your modified kernel, you have the following which is an incorrect computation:
int offset = x + y*gridDim.x*blockIdx.x;
When I change it to:
int offset = x + y*gridDim.x*blockDim.x;
I get normal execution and results, and an indicated time of ~10ms.

Resources