Terminal command to show connected displays/monitors/resolutions? - macos

Is there a way to get the information about connected monitors and displays and their resolutions via the terminal in OS X?
I have some installations that run on multiple monitors and sometimes with a GFLW window that seem to not open if a monitor is not attached - I would like to have a check for whether a monitor is attached properly and maybe dump that to a log file, but I haven't found a programmatic way to do that without getting deep into Obj-C/Cocoa

You can use system_profiler SPDisplaysDataType or defaults read /Library/Preferences/com.apple.windowserver.plist:
$ system_profiler SPDisplaysDataType
Graphics/Displays:
NVIDIA GeForce GT 640M:
Chipset Model: NVIDIA GeForce GT 640M
Type: GPU
Bus: PCIe
PCIe Lane Width: x16
VRAM (Total): 512 MB
Vendor: NVIDIA (0x10de)
Device ID: 0x0fd8
Revision ID: 0x00a2
ROM Revision: 3707
Displays:
iMac:
Display Type: LCD
Resolution: 1920 x 1080
Pixel Depth: 32-Bit Color (ARGB8888)
Main Display: Yes
Mirror: Off
Online: Yes
Built-In: Yes
Connection Type: DisplayPort
$ defaults read /Library/Preferences/com.apple.windowserver.plist
{
DisplayResolutionEnabled = 1;
DisplaySets = (
(
{
Active = 1;
Depth = 4;
DisplayID = 69731456;
DisplayProductID = 40978;
DisplaySerialNumber = 0;
DisplayVendorID = 1552;
Height = 1080;
IODisplayLocation = "IOService:/AppleACPIPlatformExpert/PCI0#0/AppleACPIPCI/P0P2#1/IOPCI2PCIBridge/GFX0#0/NVDA,Display-A#0/NVDA";
IOFlags = 7;
LimitsHeight = 1080;
LimitsOriginX = 0;
LimitsOriginY = 0;
LimitsWidth = 1920;
MirrorID = 0;
Mirrored = 0;
Mode = {
BitsPerPixel = 32;
BitsPerSample = 8;
DepthFormat = 4;
Height = 1080;
IODisplayModeID = "-2147479552";
IOFlags = 7;
Mode = 1;
PixelEncoding = "--------RRRRRRRRGGGGGGGGBBBBBBBB";
RefreshRate = 0;
SamplesPerPixel = 3;
UsableForDesktopGUI = 1;
Width = 1920;
kCGDisplayBytesPerRow = 7680;
kCGDisplayHorizontalResolution = 103;
kCGDisplayModeIsInterlaced = 0;
kCGDisplayModeIsSafeForHardware = 1;
kCGDisplayModeIsStretched = 0;
kCGDisplayModeIsTelevisionOutput = 0;
kCGDisplayModeIsUnavailable = 0;
kCGDisplayModeSuitableForUI = 1;
kCGDisplayPixelsHigh = 1080;
kCGDisplayPixelsWide = 1920;
kCGDisplayResolution = 1;
kCGDisplayVerticalResolution = 103;
};
OriginX = 0;
OriginY = 0;
PixelEncoding = "--------RRRRRRRRGGGGGGGGBBBBBBBB";
Resolution = 1;
Unit = 0;
UnmirroredHeight = 1080;
UnmirroredLimitsHeight = 1080;
UnmirroredLimitsOriginX = 0;
UnmirroredLimitsOriginY = 0;
UnmirroredLimitsWidth = 1920;
UnmirroredMode = {
BitsPerPixel = 32;
BitsPerSample = 8;
DepthFormat = 4;
Height = 1080;
IODisplayModeID = "-2147479552";
IOFlags = 7;
Mode = 1;
PixelEncoding = "--------RRRRRRRRGGGGGGGGBBBBBBBB";
RefreshRate = 0;
SamplesPerPixel = 3;
UsableForDesktopGUI = 1;
Width = 1920;
kCGDisplayBytesPerRow = 7680;
kCGDisplayHorizontalResolution = 103;
kCGDisplayModeIsInterlaced = 0;
kCGDisplayModeIsSafeForHardware = 1;
kCGDisplayModeIsStretched = 0;
kCGDisplayModeIsTelevisionOutput = 0;
kCGDisplayModeIsUnavailable = 0;
kCGDisplayModeSuitableForUI = 1;
kCGDisplayPixelsHigh = 1080;
kCGDisplayPixelsWide = 1920;
kCGDisplayResolution = 1;
kCGDisplayVerticalResolution = 103;
};
UnmirroredOriginX = 0;
UnmirroredOriginY = 0;
UnmirroredResolution = 1;
UnmirroredWidth = 1920;
Width = 1920;
}
)
);
ForceOldStyleMemoryManagement = 0;
}

You can also use a command-line tool called cscreen:
# install homebrew if you don't have it already
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# install cscreen
brew install --cask cscreen
# list displays
cscreen -l
output:
DisplayID Index Depth Width Height Refresh
1 1 32 1440 900 60
2 2 32 1920 1080 60
use -h to display all usage options
Note:
The first time you try to open the cscreen the os will not allow it because it isn't signed.
You have to go to Security & Privacy in System Preferences and allow it.
Homebrew package search

Related

Windows.h GDI - BITMAPINFO alloca indicates failure

I want to create an image from the desktop and set it to gray using BITMAPINFO, but it keeps showing me the warning, "Warning C6255 _alloca indicates failure by raising a stack overflow exception. Consider using _malloca instead".
I would appreciate any advice.
HBITMAP CreateGreyscaleBitmap(int cx, int cy)
{
BITMAPINFO* pbmi = (BITMAPINFO*)alloca(sizeof(BITMAPINFOHEADER) + sizeof(RGBQUAD) * 256);
pbmi->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
pbmi->bmiHeader.biWidth = cx;
pbmi->bmiHeader.biHeight = cy;
pbmi->bmiHeader.biPlanes = 1;
pbmi->bmiHeader.biBitCount = 8;
pbmi->bmiHeader.biCompression = BI_RGB;
pbmi->bmiHeader.biSizeImage = 0;
pbmi->bmiHeader.biXPelsPerMeter = 14173;
pbmi->bmiHeader.biYPelsPerMeter = 14173;
pbmi->bmiHeader.biClrUsed = 0;
pbmi->bmiHeader.biClrImportant = 0;
for (int i = 0; i < 256; i++)
{
pbmi->bmiColors[i].rgbRed = i;
pbmi->bmiColors[i].rgbGreen = i;
pbmi->bmiColors[i].rgbBlue = i;
pbmi->bmiColors[i].rgbReserved = 0;
}
PVOID pv;
return CreateDIBSection(NULL, pbmi, DIB_RGB_COLORS, &pv, NULL, 0);
}

How to use FF_CONSTANT for Force feedback for Linux?

I am not able to utilize FF_CONSTANT force effect. My try code is:
struct ff_effect joy_effect_, joy_effect_2;
if (iwantconstantforce)
{
joy_effect_.id = -1;
joy_effect_.type = FF_CONSTANT;
joy_effect_.direction = 0x0000; // down
joy_effect_.replay.length = 100;
joy_effect_.replay.delay = 0;
joy_effect_.trigger.button = 0;
joy_effect_.trigger.interval = 100;
joy_effect_.u.constant.level = 65535;
joy_effect_.u.constant.envelope.attack_length = joy_effect_.replay.length / 10;
joy_effect_.u.constant.envelope.fade_length = joy_effect_.replay.length / 10;
joy_effect_.u.constant.envelope.attack_level = joy_effect_.u.constant.level / 10;
joy_effect_.u.constant.envelope.fade_level = joy_effect_.u.constant.level / 10;
}
I am able to produce FF_SPRING and FF_DAMPER effects with following codes.
if (youwantdampereffect)
{
joy_effect_.id = -1;
joy_effect_.direction = 0; // down
joy_effect_.type = FF_DAMPER;
joy_effect_.replay.length = 20;
joy_effect_.replay.delay = 0;
joy_effect_.u.condition[0].right_saturation = 65535;
joy_effect_.u.condition[0].left_saturation = 65535;
joy_effect_.u.condition[0].right_coeff = 65535 / 2;
joy_effect_.u.condition[0].left_coeff = 65535 / 2;
joy_effect_.u.condition[0].deadband = 0;
joy_effect_.u.condition[0].center = 0;
int ret = ioctl(ff_fd_, EVIOCSFF, &joy_effect_); // upload the effect
}
if (youwantspringeffect)
{
joy_effect_2.id = -1;
joy_effect_2.direction = 0; // down
joy_effect_2.type = FF_SPRING;
joy_effect_2.replay.length = 20;
joy_effect_2.replay.delay = 0;
joy_effect_2.u.condition[0].right_saturation = 65535 / 2;
joy_effect_2.u.condition[0].left_saturation = 65535 / 2;
joy_effect_2.u.condition[0].right_coeff = 32767;
joy_effect_2.u.condition[0].left_coeff = 32767;
joy_effect_2.u.condition[0].deadband = 0;
joy_effect_2.u.condition[0].center = 0;
int ret = ioctl(ff_fd_, EVIOCSFF, &joy_effect_2); // upload the effect
}
I do not find any info about what is constant force effect feels like or when it makes sense to use it.
Can somebody brief its importance and usage?
Thanks :)

Fully Associative Cache implementation

I don't understand why my code for the fully associative cache doesn't match the trace files that I'm given.
The parameters are each cache line is 32 bytes and the total cache size is 16KB.
My implementations for set associative caches of 2,4,8,and 16 all work perfectly (using least recently used replacement policy). But for fully associative, which could also just be described as a set associative of 32, is VERY close to the trace file but not quite. Frankly, I don't know how to debug this one since there's a vast amount of steps (at least the way I did it)
Here's the relevant parts of my code (excuse the inefficiency)
//Fully Associative
int **fullyAssoc;
fullyAssoc = new int*[64]; //where fullyAssoc[0][index] is way 0, fullyAssoc[2][index] is way 1 etc..
int **LRU32;
LRU32 = new int*[32];
for (int i = 0; i < 64; ++i){ //Initialize all entries in fullyAssoc to 0
fullyAssoc[i] = new int[16 * CACHE_LINE / 32];
}
for (int i = 0; i < 16; i++){ //Initialize LRU array
LRU32[0][i] = 0;
LRU32[1][i] = 1;
LRU32[2][i] = 2;
LRU32[3][i] = 3;
LRU32[4][i] = 4;
LRU32[5][i] = 5;
LRU32[6][i] = 6;
LRU32[7][i] = 7;
LRU32[8][i] = 8;
LRU32[9][i] = 9;
LRU32[10][i] = 10;
LRU32[11][i] = 11;
LRU32[12][i] = 12;
LRU32[13][i] = 13;
LRU32[14][i] = 14;
LRU32[15][i] = 15;
LRU32[16][i] = 16;
LRU32[17][i] = 17;
LRU32[18][i] = 18;
LRU32[19][i] = 19;
LRU32[20][i] = 20;
LRU32[21][i] = 21;
LRU32[22][i] = 22;
LRU32[23][i] = 23;
LRU32[24][i] = 24;
LRU32[25][i] = 25;
LRU32[26][i] = 26;
LRU32[27][i] = 27;
LRU32[28][i] = 28;
LRU32[29][i] = 29;
LRU32[30][i] = 30;
LRU32[31][i] = 31;
}
int fullyAssocLRU = 0;
int memCount = 0;
while(getline(fileIn, line)){
stringstream s(line);
s >> instruction >> hex >> address;
int indexFull;
int tagFull;
unsigned long long address, addressFull;
address = address >> 5; //Byte offset
addressFull = address;
indexFull = addressFull % 16;
tagFull = addressFull >> 4;
if (assocCache(fullyAssoc, indexFull, 32, tagFull, LRU32) == 1){
fullyAssocLRU++;
}
}
void LRU_update(int **lru, int index, int way, int ways){
int temp = 0;
int temp2[ways];
int temp_index = 0;
int i = 0;
while(i < ways){
if (lru[i][index] == way/2){
temp = lru[i][index];
i++;
continue;
}
else{
temp2[temp_index] = lru[i][index];
temp_index++;
}
i++;
}
for (int j = 0; j < ways - 1; j++){
lru[j][index] = temp2[j];
}
lru[ways - 1][index] = temp;
}
bool assocCache(int **block, int index, int ways, int tag, int **lru){
bool retVal = false;
for(int i = 0; i < 2*ways; i = i + 2){
if (block[i][index] == 0){
block[i][index] = 1;
block[i+1][index] = tag;
LRU_update(lru, index, i, ways);
return retVal;
}
else{
if (block[i+1][index] == tag){
retVal = true;
LRU_update(lru, index, i, ways);
return retVal;
}
else{
continue;
}
}
}
int head = 2 * lru[0][index];
block[head][index] = 1;
block[head+1][index] = tag;
LRU_update(lru, index, head, ways);
return retVal;
}
The trace files is supposed to be:
837589,1122102; 932528,1122102; 972661,1122102; 1005547,1122102; //For direct mapped
993999,1122102; 999852,1122102; 999315,1122102; 1000092,1122102; //For set associative
1000500,1122102; //For fully associative (LRU)
My output is:
837589,1122102; 932528,1122102; 972661,1122102; 1005547,1122102;
939999,1122102; 999852,1122102; 999315,1122102; 1000092,1122102;
1000228,1122102;
As you can see, for the fully associative one, it's only 272 off the correct output. Why would it be off when switching from 16 ways to 32 ways?
Ah, I mistakenly though a fully associative cache for a 32 line size cache of 16KB cache size is 32 ways, when it's actually 512 ways.

How to convert the Cartesian coordinate to image coordinate in FreeType

Recently, I worded on OCR in Chinese characters, I wanna use FreeType(2.3.5) to collect the samples of character, here's my code :
FT_Library fontLibrary;
FT_Face fontFace;
int fontSize = 64;
// Initialize
FT_Init_FreeType(&fontLibrary);
FT_New_Face(fontLibrary, "C:\\Windows\\Fonts\\simhei.ttf", 0, &fontFace);
// Setup
FT_Select_Charmap(fontFace, FT_ENCODING_UNICODE);
FT_Set_Pixel_Sizes(fontFace, fontSize, 0);
FT_Load_Char(fontFace, 'H', FT_LOAD_RENDER);
// Retrieve data
FT_GlyphSlot & glyphSlot = fontFace->glyph;
FT_Bitmap charBitmap = glyphSlot->bitmap;
int charWidth = charBitmap.width;
int charHeight = charBitmap.rows;
unsigned char* charBuffer = charBitmap.buffer;
// Construct image
Mat fontImage(fontSize, fontSize, CV_8UC1);
fontImage = Scalar::all(0);
for (int y = 0; y < charHeight; y++)
{
int row = fontSize - glyphSlot->bitmap_top + y;
for (int x = 0; x < charWidth; x++)
{
int col = glyphSlot->bitmap_left + x;
fontImage.at<uchar>(row, col) = charBuffer[y*charWidth + x];
}
}
imshow("Font Image", fontImage);
waitKey(0);
// Uninitialize
FT_Done_Face(fontFace);
FT_Done_FreeType(fontLibrary);
The problem is : the character is not center aligned in image, the coordinate of character image looks strange, in this example, the coordinate of 'H' character is (fontSize = 64) :
bitmap_left = 3
bitmap_top = 44
bitmap.width = 26
bitmap.rows = 43
then convert to the coordinate of image :
ROI.left = bitmap_left = 3;
ROI.right = bitmap_left + bitmap.width = 29;
ROI.top = fontSize - bitmap_top = 20;
ROI.bottom = fontSize - bitmap_top + bitmap.rows = 63;
so the margin in 4-direction is :
ROI.leftMargin = 3;
ROI.rightMargin = 64 - 29 = 35;
ROI.topMargin = 20;
ROI.bottomMargin = 64 - 63 = 1;
IT IS NOT CENTER ALIGNED !!!
I solve the problem by myself, the left-top coordinate of image is stored in glyphSlot, here's the blog :
http://kang.blog.com/2013/09/21/how-to-convert-the-cartier-coordinate-to-image-coordinate-in-freetype/

Converting image to grayscale in windows phone 8

How i can convert a normal image to Grayscale image in windows phone 8. Is there any provision for grayscale conversion is available in WritableBitmapEx.
try this extension method...
public static WriteableBitmap ToGrayScale(this WriteableBitmap bitmapImage)
{
for (var y = 0; y < bitmapImage.PixelHeight; y++)
{
for (var x = 0; x < bitmapImage.PixelWidth; x++)
{
var pixelLocation = bitmapImage.PixelWidth * y + x;
var pixel = bitmapImage.Pixels[pixelLocation];
var pixelbytes = BitConverter.GetBytes(pixel);
var bwPixel = (byte)(.299 * pixelbytes[2] + .587 * pixelbytes[1] + .114 * pixelbytes[0]);
pixelbytes[0] = bwPixel;
pixelbytes[1] = bwPixel;
pixelbytes[2] = bwPixel;
bitmapImage.Pixels[pixelLocation] = BitConverter.ToInt32(pixelbytes, 0);
}
}
return bitmapImage;
}
I don't think that there is a method, but you can convert it yourself. There are bunch of resources online on how to accomplish that. Start by reading this. One of the simpler methods could be:
for (int i = 0; i < oldBitmap.Pixels.Length; i++)
{
var c = oldBitmap.Pixels[i];
var a = (byte)(c >> 24);
var r = (byte)(c >> 16);
var g = (byte)(c >> 8);
var b = (byte)(c);
byte gray = (byte)((r * 0.3) + (g * 0.59) + (b * 0.11));
oldBitmap.Pixels[i] = (a << 24) | (gray << 16) | (gray << 8) | gray;
}
It's simple, quick, and you convert it in-place.

Resources