How to use FF_CONSTANT for Force feedback for Linux? - linux-kernel

I am not able to utilize FF_CONSTANT force effect. My try code is:
struct ff_effect joy_effect_, joy_effect_2;
if (iwantconstantforce)
{
joy_effect_.id = -1;
joy_effect_.type = FF_CONSTANT;
joy_effect_.direction = 0x0000; // down
joy_effect_.replay.length = 100;
joy_effect_.replay.delay = 0;
joy_effect_.trigger.button = 0;
joy_effect_.trigger.interval = 100;
joy_effect_.u.constant.level = 65535;
joy_effect_.u.constant.envelope.attack_length = joy_effect_.replay.length / 10;
joy_effect_.u.constant.envelope.fade_length = joy_effect_.replay.length / 10;
joy_effect_.u.constant.envelope.attack_level = joy_effect_.u.constant.level / 10;
joy_effect_.u.constant.envelope.fade_level = joy_effect_.u.constant.level / 10;
}
I am able to produce FF_SPRING and FF_DAMPER effects with following codes.
if (youwantdampereffect)
{
joy_effect_.id = -1;
joy_effect_.direction = 0; // down
joy_effect_.type = FF_DAMPER;
joy_effect_.replay.length = 20;
joy_effect_.replay.delay = 0;
joy_effect_.u.condition[0].right_saturation = 65535;
joy_effect_.u.condition[0].left_saturation = 65535;
joy_effect_.u.condition[0].right_coeff = 65535 / 2;
joy_effect_.u.condition[0].left_coeff = 65535 / 2;
joy_effect_.u.condition[0].deadband = 0;
joy_effect_.u.condition[0].center = 0;
int ret = ioctl(ff_fd_, EVIOCSFF, &joy_effect_); // upload the effect
}
if (youwantspringeffect)
{
joy_effect_2.id = -1;
joy_effect_2.direction = 0; // down
joy_effect_2.type = FF_SPRING;
joy_effect_2.replay.length = 20;
joy_effect_2.replay.delay = 0;
joy_effect_2.u.condition[0].right_saturation = 65535 / 2;
joy_effect_2.u.condition[0].left_saturation = 65535 / 2;
joy_effect_2.u.condition[0].right_coeff = 32767;
joy_effect_2.u.condition[0].left_coeff = 32767;
joy_effect_2.u.condition[0].deadband = 0;
joy_effect_2.u.condition[0].center = 0;
int ret = ioctl(ff_fd_, EVIOCSFF, &joy_effect_2); // upload the effect
}
I do not find any info about what is constant force effect feels like or when it makes sense to use it.
Can somebody brief its importance and usage?
Thanks :)

Related

Reading EXR file

I'm trying to create a IWICBitmap from an EXR file (error checks removed).
#pragma pack(push,1)
struct fl
{
float r, g, b, a;
};
#pragma pack(pop)
HRESULT Open(const char* f,IWICBitmap** d)
{
exr_context_initializer_t ctxtinit = EXR_DEFAULT_CONTEXT_INITIALIZER;
exr_context_t myfile = {};
exr_result_t rv = exr_start_read(&myfile, f, &ctxtinit);
int part_index = 0;
const exr_attr_chlist_t* chl = 0;
exr_get_channels(myfile, part_index, &chl);
int32_t ck = 0;
rv = exr_get_chunk_count(myfile, part_index, &ck);
int32_t sl = 0;
rv = exr_get_scanlines_per_chunk(myfile, part_index, &sl);
int y = 0;
int wi = 0;
int he = 0;
std::vector<fl> data; // put here the floats
exr_decode_pipeline_t dec = {};
for (int32_t cuk = 0; cuk < ck; cuk++)
{
exr_chunk_info_t ch = {};
exr_read_scanline_chunk_info(myfile, part_index, y, &ch);
wi = ch.width;
he += ch.height;
y += sl;
bool first = 0;
if (dec.decompress_fn == 0)
{
rv = exr_decoding_initialize(myfile, part_index, &ch, &dec);
rv = exr_decoding_choose_default_routines(myfile, part_index, &dec);
first = 1;
}
if (!first)
rv = exr_decoding_update(myfile, part_index,&ch,&dec);
rv = exr_decoding_run(myfile, part_index, &dec);
int NumPixels = (wi * ch.height);
auto BytesPerPixel = ch.unpacked_size / NumPixels;
if (true)
{
// RGB(A)
if (chl->entries[0].pixel_type == EXR_PIXEL_HALF)
{
if (BytesPerPixel == chl->num_channels * 2)
{
auto ds = data.size();
data.resize(ds + NumPixels);
auto p = data.data() + ds;
char* x = (char*)dec.unpacked_buffer;
for (int j = 0; j < NumPixels; j++)
{
uint16_t* u = (uint16_t*)x;
p->a = 1.0f;
for (int jH = 0; jH < chl->num_channels; jH++)
{
half ha(Imath_3_2::half::FromBits,*u);
ha.setBits(*u);
if (strcmp(chl->entries[jH].name.str, "R") == 0) p->r = ha.operator float();
if (strcmp(chl->entries[jH].name.str, "G") == 0) p->g = ha.operator float();
if (strcmp(chl->entries[jH].name.str, "B") == 0) p->b = ha.operator float();
if (strcmp(chl->entries[jH].name.str, "A") == 0) p->a = ha.operator float();
u++;
}
x += BytesPerPixel;
p++;
}
}
else
break;
}
if (chl->entries[0].pixel_type == EXR_PIXEL_FLOAT)
{
// code removed for simplicity, I guess the same issue happens here unless it's a problem of the half-float
}
}
}
rv = exr_decoding_destroy(myfile, &dec);
exr_finish(&myfile);
CComPtr<IWICImagingFactory2> wbfact = 0;
CoCreateInstance(CLSID_WICImagingFactory2, 0, CLSCTX_INPROC_SERVER,
__uuidof(IWICImagingFactory2), (void**)&wbfact);
return wbfact->CreateBitmapFromMemory(wi, he, GUID_WICPixelFormat128bppPRGBAFloat, wi * 16,(UINT)data.size()*16, (BYTE*)data.data(), d);
}
What am I doing wrong? The pixel number I'm reading is correct (in this image 800x800).
My result:
Photoshop:
Is there a problem with the half-float? I'm just using the OpenEXR's IMath implementation.

Windows.h GDI - BITMAPINFO alloca indicates failure

I want to create an image from the desktop and set it to gray using BITMAPINFO, but it keeps showing me the warning, "Warning C6255 _alloca indicates failure by raising a stack overflow exception. Consider using _malloca instead".
I would appreciate any advice.
HBITMAP CreateGreyscaleBitmap(int cx, int cy)
{
BITMAPINFO* pbmi = (BITMAPINFO*)alloca(sizeof(BITMAPINFOHEADER) + sizeof(RGBQUAD) * 256);
pbmi->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
pbmi->bmiHeader.biWidth = cx;
pbmi->bmiHeader.biHeight = cy;
pbmi->bmiHeader.biPlanes = 1;
pbmi->bmiHeader.biBitCount = 8;
pbmi->bmiHeader.biCompression = BI_RGB;
pbmi->bmiHeader.biSizeImage = 0;
pbmi->bmiHeader.biXPelsPerMeter = 14173;
pbmi->bmiHeader.biYPelsPerMeter = 14173;
pbmi->bmiHeader.biClrUsed = 0;
pbmi->bmiHeader.biClrImportant = 0;
for (int i = 0; i < 256; i++)
{
pbmi->bmiColors[i].rgbRed = i;
pbmi->bmiColors[i].rgbGreen = i;
pbmi->bmiColors[i].rgbBlue = i;
pbmi->bmiColors[i].rgbReserved = 0;
}
PVOID pv;
return CreateDIBSection(NULL, pbmi, DIB_RGB_COLORS, &pv, NULL, 0);
}

Fully Associative Cache implementation

I don't understand why my code for the fully associative cache doesn't match the trace files that I'm given.
The parameters are each cache line is 32 bytes and the total cache size is 16KB.
My implementations for set associative caches of 2,4,8,and 16 all work perfectly (using least recently used replacement policy). But for fully associative, which could also just be described as a set associative of 32, is VERY close to the trace file but not quite. Frankly, I don't know how to debug this one since there's a vast amount of steps (at least the way I did it)
Here's the relevant parts of my code (excuse the inefficiency)
//Fully Associative
int **fullyAssoc;
fullyAssoc = new int*[64]; //where fullyAssoc[0][index] is way 0, fullyAssoc[2][index] is way 1 etc..
int **LRU32;
LRU32 = new int*[32];
for (int i = 0; i < 64; ++i){ //Initialize all entries in fullyAssoc to 0
fullyAssoc[i] = new int[16 * CACHE_LINE / 32];
}
for (int i = 0; i < 16; i++){ //Initialize LRU array
LRU32[0][i] = 0;
LRU32[1][i] = 1;
LRU32[2][i] = 2;
LRU32[3][i] = 3;
LRU32[4][i] = 4;
LRU32[5][i] = 5;
LRU32[6][i] = 6;
LRU32[7][i] = 7;
LRU32[8][i] = 8;
LRU32[9][i] = 9;
LRU32[10][i] = 10;
LRU32[11][i] = 11;
LRU32[12][i] = 12;
LRU32[13][i] = 13;
LRU32[14][i] = 14;
LRU32[15][i] = 15;
LRU32[16][i] = 16;
LRU32[17][i] = 17;
LRU32[18][i] = 18;
LRU32[19][i] = 19;
LRU32[20][i] = 20;
LRU32[21][i] = 21;
LRU32[22][i] = 22;
LRU32[23][i] = 23;
LRU32[24][i] = 24;
LRU32[25][i] = 25;
LRU32[26][i] = 26;
LRU32[27][i] = 27;
LRU32[28][i] = 28;
LRU32[29][i] = 29;
LRU32[30][i] = 30;
LRU32[31][i] = 31;
}
int fullyAssocLRU = 0;
int memCount = 0;
while(getline(fileIn, line)){
stringstream s(line);
s >> instruction >> hex >> address;
int indexFull;
int tagFull;
unsigned long long address, addressFull;
address = address >> 5; //Byte offset
addressFull = address;
indexFull = addressFull % 16;
tagFull = addressFull >> 4;
if (assocCache(fullyAssoc, indexFull, 32, tagFull, LRU32) == 1){
fullyAssocLRU++;
}
}
void LRU_update(int **lru, int index, int way, int ways){
int temp = 0;
int temp2[ways];
int temp_index = 0;
int i = 0;
while(i < ways){
if (lru[i][index] == way/2){
temp = lru[i][index];
i++;
continue;
}
else{
temp2[temp_index] = lru[i][index];
temp_index++;
}
i++;
}
for (int j = 0; j < ways - 1; j++){
lru[j][index] = temp2[j];
}
lru[ways - 1][index] = temp;
}
bool assocCache(int **block, int index, int ways, int tag, int **lru){
bool retVal = false;
for(int i = 0; i < 2*ways; i = i + 2){
if (block[i][index] == 0){
block[i][index] = 1;
block[i+1][index] = tag;
LRU_update(lru, index, i, ways);
return retVal;
}
else{
if (block[i+1][index] == tag){
retVal = true;
LRU_update(lru, index, i, ways);
return retVal;
}
else{
continue;
}
}
}
int head = 2 * lru[0][index];
block[head][index] = 1;
block[head+1][index] = tag;
LRU_update(lru, index, head, ways);
return retVal;
}
The trace files is supposed to be:
837589,1122102; 932528,1122102; 972661,1122102; 1005547,1122102; //For direct mapped
993999,1122102; 999852,1122102; 999315,1122102; 1000092,1122102; //For set associative
1000500,1122102; //For fully associative (LRU)
My output is:
837589,1122102; 932528,1122102; 972661,1122102; 1005547,1122102;
939999,1122102; 999852,1122102; 999315,1122102; 1000092,1122102;
1000228,1122102;
As you can see, for the fully associative one, it's only 272 off the correct output. Why would it be off when switching from 16 ways to 32 ways?
Ah, I mistakenly though a fully associative cache for a 32 line size cache of 16KB cache size is 32 ways, when it's actually 512 ways.

Terminal command to show connected displays/monitors/resolutions?

Is there a way to get the information about connected monitors and displays and their resolutions via the terminal in OS X?
I have some installations that run on multiple monitors and sometimes with a GFLW window that seem to not open if a monitor is not attached - I would like to have a check for whether a monitor is attached properly and maybe dump that to a log file, but I haven't found a programmatic way to do that without getting deep into Obj-C/Cocoa
You can use system_profiler SPDisplaysDataType or defaults read /Library/Preferences/com.apple.windowserver.plist:
$ system_profiler SPDisplaysDataType
Graphics/Displays:
NVIDIA GeForce GT 640M:
Chipset Model: NVIDIA GeForce GT 640M
Type: GPU
Bus: PCIe
PCIe Lane Width: x16
VRAM (Total): 512 MB
Vendor: NVIDIA (0x10de)
Device ID: 0x0fd8
Revision ID: 0x00a2
ROM Revision: 3707
Displays:
iMac:
Display Type: LCD
Resolution: 1920 x 1080
Pixel Depth: 32-Bit Color (ARGB8888)
Main Display: Yes
Mirror: Off
Online: Yes
Built-In: Yes
Connection Type: DisplayPort
$ defaults read /Library/Preferences/com.apple.windowserver.plist
{
DisplayResolutionEnabled = 1;
DisplaySets = (
(
{
Active = 1;
Depth = 4;
DisplayID = 69731456;
DisplayProductID = 40978;
DisplaySerialNumber = 0;
DisplayVendorID = 1552;
Height = 1080;
IODisplayLocation = "IOService:/AppleACPIPlatformExpert/PCI0#0/AppleACPIPCI/P0P2#1/IOPCI2PCIBridge/GFX0#0/NVDA,Display-A#0/NVDA";
IOFlags = 7;
LimitsHeight = 1080;
LimitsOriginX = 0;
LimitsOriginY = 0;
LimitsWidth = 1920;
MirrorID = 0;
Mirrored = 0;
Mode = {
BitsPerPixel = 32;
BitsPerSample = 8;
DepthFormat = 4;
Height = 1080;
IODisplayModeID = "-2147479552";
IOFlags = 7;
Mode = 1;
PixelEncoding = "--------RRRRRRRRGGGGGGGGBBBBBBBB";
RefreshRate = 0;
SamplesPerPixel = 3;
UsableForDesktopGUI = 1;
Width = 1920;
kCGDisplayBytesPerRow = 7680;
kCGDisplayHorizontalResolution = 103;
kCGDisplayModeIsInterlaced = 0;
kCGDisplayModeIsSafeForHardware = 1;
kCGDisplayModeIsStretched = 0;
kCGDisplayModeIsTelevisionOutput = 0;
kCGDisplayModeIsUnavailable = 0;
kCGDisplayModeSuitableForUI = 1;
kCGDisplayPixelsHigh = 1080;
kCGDisplayPixelsWide = 1920;
kCGDisplayResolution = 1;
kCGDisplayVerticalResolution = 103;
};
OriginX = 0;
OriginY = 0;
PixelEncoding = "--------RRRRRRRRGGGGGGGGBBBBBBBB";
Resolution = 1;
Unit = 0;
UnmirroredHeight = 1080;
UnmirroredLimitsHeight = 1080;
UnmirroredLimitsOriginX = 0;
UnmirroredLimitsOriginY = 0;
UnmirroredLimitsWidth = 1920;
UnmirroredMode = {
BitsPerPixel = 32;
BitsPerSample = 8;
DepthFormat = 4;
Height = 1080;
IODisplayModeID = "-2147479552";
IOFlags = 7;
Mode = 1;
PixelEncoding = "--------RRRRRRRRGGGGGGGGBBBBBBBB";
RefreshRate = 0;
SamplesPerPixel = 3;
UsableForDesktopGUI = 1;
Width = 1920;
kCGDisplayBytesPerRow = 7680;
kCGDisplayHorizontalResolution = 103;
kCGDisplayModeIsInterlaced = 0;
kCGDisplayModeIsSafeForHardware = 1;
kCGDisplayModeIsStretched = 0;
kCGDisplayModeIsTelevisionOutput = 0;
kCGDisplayModeIsUnavailable = 0;
kCGDisplayModeSuitableForUI = 1;
kCGDisplayPixelsHigh = 1080;
kCGDisplayPixelsWide = 1920;
kCGDisplayResolution = 1;
kCGDisplayVerticalResolution = 103;
};
UnmirroredOriginX = 0;
UnmirroredOriginY = 0;
UnmirroredResolution = 1;
UnmirroredWidth = 1920;
Width = 1920;
}
)
);
ForceOldStyleMemoryManagement = 0;
}
You can also use a command-line tool called cscreen:
# install homebrew if you don't have it already
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# install cscreen
brew install --cask cscreen
# list displays
cscreen -l
output:
DisplayID Index Depth Width Height Refresh
1 1 32 1440 900 60
2 2 32 1920 1080 60
use -h to display all usage options
Note:
The first time you try to open the cscreen the os will not allow it because it isn't signed.
You have to go to Security & Privacy in System Preferences and allow it.
Homebrew package search

modifying the lengthY of some cubes is slow

I want to write a 3d version of a fft. (Like this:https://wiki.mozilla.org/File:Fft.png)
So I created a few bars and in an outside function, my first aproach was to set the lengthY to a value. Then I call bar.modified() to force it to be repainted.
If I now use more than 50 bars, it is horrible slow (on my 4 core CPU). I guess there's a better way to do it, right?
Source:
var elements = new Array();
create3d = function(len) {
var r = new X.renderer3D();
r.init();
if(a.length == 0){
for ( var y = 0; y < len; y++) {
var c = new X.cube();
a.push(c);
}
}
for ( var i = 0; i < len; i++) {
a[i].center = [i*2 , 0, 0];
a[i].lengthX = 1;
a[i].lengthY = 20;
a[i].lengthZ = 1;
a[i].color = [i%2,0,0];
r.add(a[i]);
}
r.render();
};
function setVal(index,val){
var element = a[index];
element.lengthY = val;
element.modified();
}
I created a JSFiddle on how to do that and it is pretty fast for 1000 cubes
http://jsfiddle.net/haehn/6fVRC/

Resources