I've been trying to implement a systray icon using straight C and Xlib, going along with the freedesktop specification [0]. I can't seem to get my Window to embed into my desktop manager's systray[1], while other apps seem to be able to do it. I am not sure how to go forward debugging this, but I've provided minimal sample code below.
I haven't been able to find any sample code using straight Xlib and C, and all the suggestions I've seen have been with regard to some framework like Gtk/Qt/Mono/whatever, but I want to understand what is supposed to be happening here as per the spec, and what I'm doing wrong.
#include <X11/Xutil.h>
#include <string.h>
#define MIN(A, B) ((A) < (B) ? (A) : (B))
/* --------- XEMBED and systray stuff */
#define SYSTEM_TRAY_REQUEST_DOCK 0
#define SYSTEM_TRAY_BEGIN_MESSAGE 1
#define SYSTEM_TRAY_CANCEL_MESSAGE 2
static int trapped_error_code = 0;
static int (*old_error_handler) (Display *, XErrorEvent *);
static int
error_handler(Display *display, XErrorEvent *error) {
trapped_error_code = error->error_code;
return 0;
}
void
trap_errors(void) {
trapped_error_code = 0;
old_error_handler = XSetErrorHandler(error_handler);
}
int
untrap_errors(void) {
XSetErrorHandler(old_error_handler);
return trapped_error_code;
}
void
send_systray_message(Display* dpy, Window w, long message, long data1, long data2, long data3) {
XEvent ev;
memset(&ev, 0, sizeof(ev));
ev.xclient.type = ClientMessage;
ev.xclient.window = w;
ev.xclient.message_type = XInternAtom (dpy, "_NET_SYSTEM_TRAY_OPCODE", False );
ev.xclient.format = 32;
ev.xclient.data.l[0] = CurrentTime;
ev.xclient.data.l[1] = message;
ev.xclient.data.l[2] = data1;
ev.xclient.data.l[3] = data2;
ev.xclient.data.l[4] = data3;
trap_errors();
XSendEvent(dpy, w, False, NoEventMask, &ev);
XSync(dpy, False);
if (untrap_errors()) {
/* Handle errors */
}
}
/* ------------ Regular X stuff */
int
main(int argc, char **argv) {
int width, height;
XWindowAttributes wa;
XEvent ev;
Display *dpy;
int screen;
Window root, win;
/* init */
if (!(dpy=XOpenDisplay(NULL)))
return 1;
screen = DefaultScreen(dpy);
root = RootWindow(dpy, screen);
if(!XGetWindowAttributes(dpy, root, &wa))
return 1;
width = height = MIN(wa.width, wa.height);
/* create window */
win = XCreateSimpleWindow(dpy, root, 0, 0, width, height, 0, 0, 0xFFFF9900);
send_systray_message(dpy, win, SYSTEM_TRAY_REQUEST_DOCK, win, 0, 0);
XMapWindow(dpy, win);
XSync(dpy, False);
/* run */
while(1) {
while(XPending(dpy)) {
XNextEvent(dpy, &ev); /* just waiting until we error because window closed */
}
}
}
Any help would be greatly appreciated. I think this problem is language-agnostic, and more to do with me misunderstanding the protocols, so answers in any language are acceptable, as long as they help me iron out this XEvent stuff.
[0] https://specifications.freedesktop.org/systemtray-spec/systemtray-spec-0.2.html
[1] I'm using dwm with the systray patch http://dwm.suckless.org/patches/systray
You are sending the message to a wrong window. The documentation isn't really helpful, it makes no sense whatsoever to send a tray embed message to your own window!. You need to send it to the tray window.
Here's a a fixed send_systray_message
void
send_systray_message(Display* dpy, long message, long data1, long data2, long data3) {
XEvent ev;
Atom selection_atom = XInternAtom (dpy,"_NET_SYSTEM_TRAY_S0",False);
Window tray = XGetSelectionOwner (dpy,selection_atom);
if ( tray != None)
XSelectInput (dpy,tray,StructureNotifyMask);
memset(&ev, 0, sizeof(ev));
ev.xclient.type = ClientMessage;
ev.xclient.window = tray;
ev.xclient.message_type = XInternAtom (dpy, "_NET_SYSTEM_TRAY_OPCODE", False );
ev.xclient.format = 32;
ev.xclient.data.l[0] = CurrentTime;
ev.xclient.data.l[1] = message;
ev.xclient.data.l[2] = data1; // <--- your window is only here
ev.xclient.data.l[3] = data2;
ev.xclient.data.l[4] = data3;
trap_errors();
XSendEvent(dpy, tray, False, NoEventMask, &ev);
XSync(dpy, False);
usleep(10000);
if (untrap_errors()) {
/* Handle errors */
}
}
and a call to it
send_systray_message(dpy, SYSTEM_TRAY_REQUEST_DOCK, win, 0, 0); // pass win only once
Credits: http://distro.ibiblio.org/vectorlinux/Uelsk8s/GAMBAS/gambas-svn/gambas2/gb.gtk/src/gtrayicon.cpp
Related
I'm trying to take a screenshot of a particular window (HWND) on Windows using C++. The following code works on Notepad but not on another specific process. Instead, the code returns a completely different screenshot for the other process:
#include <Windows.h>
HBITMAP dump_client_window(const HWND window_handle)
{
RECT window_handle_rectangle;
GetClientRect(window_handle, &window_handle_rectangle);
const HDC hdc_screen = GetDC(nullptr);
const HDC hdc = CreateCompatibleDC(hdc_screen);
const auto cx = window_handle_rectangle.right - window_handle_rectangle.left;
const auto cy = window_handle_rectangle.bottom - window_handle_rectangle.top;
const HBITMAP bitmap = CreateCompatibleBitmap(hdc_screen, cx, cy);
SelectObject(hdc, bitmap);
const auto old_bitmap = SelectObject(hdc, bitmap);
PrintWindow(window_handle, hdc, PW_CLIENTONLY);
// Cleanup
SelectObject(hdc, old_bitmap);
DeleteDC(hdc);
ReleaseDC(nullptr, hdc_screen);
return bitmap;
}
What could be the reason for it? If I use DirectX11 for taking the screenshot of the window, it works correctly for both processes:
#include <dxgi.h>
#include <inspectable.h>
#include <dxgi1_2.h>
#include <d3d11.h>
#include <winrt/Windows.System.h>
#include <winrt/Windows.Graphics.Capture.h>
#include <Windows.Graphics.Capture.Interop.h>
#include <windows.graphics.directx.direct3d11.interop.h>
#include <roerrorapi.h>
#include <ShlObj_core.h>
#include <dwmapi.h>
#include <filesystem>
#include "ImageFormatConversion.hpp"
#pragma comment(lib, "Dwmapi.lib")
#pragma comment(lib, "windowsapp.lib")
void capture_window(HWND window_handle, const std::wstring& output_file_path)
{
// Init COM
init_apartment(winrt::apartment_type::multi_threaded);
// Create Direct 3D Device
winrt::com_ptr<ID3D11Device> d3d_device;
winrt::check_hresult(D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, D3D11_CREATE_DEVICE_BGRA_SUPPORT,
nullptr, 0, D3D11_SDK_VERSION, d3d_device.put(), nullptr, nullptr));
winrt::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice device;
const auto dxgiDevice = d3d_device.as<IDXGIDevice>();
{
winrt::com_ptr<IInspectable> inspectable;
winrt::check_hresult(CreateDirect3D11DeviceFromDXGIDevice(dxgiDevice.get(), inspectable.put()));
device = inspectable.as<winrt::Windows::Graphics::DirectX::Direct3D11::IDirect3DDevice>();
}
auto idxgi_device2 = dxgiDevice.as<IDXGIDevice2>();
winrt::com_ptr<IDXGIAdapter> adapter;
winrt::check_hresult(idxgi_device2->GetParent(winrt::guid_of<IDXGIAdapter>(), adapter.put_void()));
winrt::com_ptr<IDXGIFactory2> factory;
winrt::check_hresult(adapter->GetParent(winrt::guid_of<IDXGIFactory2>(), factory.put_void()));
ID3D11DeviceContext* d3d_context = nullptr;
d3d_device->GetImmediateContext(&d3d_context);
RECT rect{};
DwmGetWindowAttribute(window_handle, DWMWA_EXTENDED_FRAME_BOUNDS, &rect, sizeof(RECT));
const auto size = winrt::Windows::Graphics::SizeInt32{ rect.right - rect.left, rect.bottom - rect.top };
winrt::Windows::Graphics::Capture::Direct3D11CaptureFramePool m_frame_pool =
winrt::Windows::Graphics::Capture::Direct3D11CaptureFramePool::Create(
device,
winrt::Windows::Graphics::DirectX::DirectXPixelFormat::B8G8R8A8UIntNormalized,
2,
size);
const auto activation_factory = winrt::get_activation_factory<
winrt::Windows::Graphics::Capture::GraphicsCaptureItem>();
auto interop_factory = activation_factory.as<IGraphicsCaptureItemInterop>();
winrt::Windows::Graphics::Capture::GraphicsCaptureItem capture_item = { nullptr };
interop_factory->CreateForWindow(window_handle, winrt::guid_of<ABI::Windows::Graphics::Capture::IGraphicsCaptureItem>(),
winrt::put_abi(capture_item));
auto is_frame_arrived = false;
winrt::com_ptr<ID3D11Texture2D> texture;
const auto session = m_frame_pool.CreateCaptureSession(capture_item);
m_frame_pool.FrameArrived([&](auto& frame_pool, auto&)
{
if (is_frame_arrived)
{
return;
}
auto frame = frame_pool.TryGetNextFrame();
struct __declspec(uuid("A9B3D012-3DF2-4EE3-B8D1-8695F457D3C1"))
IDirect3DDxgiInterfaceAccess : ::IUnknown
{
virtual HRESULT __stdcall GetInterface(GUID const& id, void** object) = 0;
};
auto access = frame.Surface().as<IDirect3DDxgiInterfaceAccess>();
access->GetInterface(winrt::guid_of<ID3D11Texture2D>(), texture.put_void());
is_frame_arrived = true;
return;
});
session.StartCapture();
// Message pump
MSG message;
while (!is_frame_arrived)
{
if (PeekMessage(&message, nullptr, 0, 0, PM_REMOVE) > 0)
{
DispatchMessage(&message);
}
}
session.Close();
D3D11_TEXTURE2D_DESC captured_texture_desc;
texture->GetDesc(&captured_texture_desc);
captured_texture_desc.Usage = D3D11_USAGE_STAGING;
captured_texture_desc.BindFlags = 0;
captured_texture_desc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
captured_texture_desc.MiscFlags = 0;
winrt::com_ptr<ID3D11Texture2D> user_texture = nullptr;
winrt::check_hresult(d3d_device->CreateTexture2D(&captured_texture_desc, nullptr, user_texture.put()));
d3d_context->CopyResource(user_texture.get(), texture.get());
D3D11_MAPPED_SUBRESOURCE resource;
winrt::check_hresult(d3d_context->Map(user_texture.get(), NULL, D3D11_MAP_READ, 0, &resource));
BITMAPINFO l_bmp_info;
// BMP 32 bpp
ZeroMemory(&l_bmp_info, sizeof(BITMAPINFO));
l_bmp_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
l_bmp_info.bmiHeader.biBitCount = 32;
l_bmp_info.bmiHeader.biCompression = BI_RGB;
l_bmp_info.bmiHeader.biWidth = captured_texture_desc.Width;
l_bmp_info.bmiHeader.biHeight = captured_texture_desc.Height;
l_bmp_info.bmiHeader.biPlanes = 1;
l_bmp_info.bmiHeader.biSizeImage = captured_texture_desc.Width * captured_texture_desc.Height * 4;
std::unique_ptr<BYTE> p_buf(new BYTE[l_bmp_info.bmiHeader.biSizeImage]);
UINT l_bmp_row_pitch = captured_texture_desc.Width * 4;
auto sptr = static_cast<BYTE*>(resource.pData);
auto dptr = p_buf.get() + l_bmp_info.bmiHeader.biSizeImage - l_bmp_row_pitch;
UINT l_row_pitch = std::min<UINT>(l_bmp_row_pitch, resource.RowPitch);
for (size_t h = 0; h < captured_texture_desc.Height; ++h)
{
memcpy_s(dptr, l_bmp_row_pitch, sptr, l_row_pitch);
sptr += resource.RowPitch;
dptr -= l_bmp_row_pitch;
}
// Save bitmap buffer into the file
WCHAR l_my_doc_path[MAX_PATH];
winrt::check_hresult(SHGetFolderPathW(nullptr, CSIDL_PERSONAL, nullptr, SHGFP_TYPE_CURRENT, l_my_doc_path));
FILE* lfile = nullptr;
if (auto lerr = _wfopen_s(&lfile, output_file_path.c_str(), L"wb"); lerr != 0)
{
return;
}
if (lfile != nullptr)
{
BITMAPFILEHEADER bmp_file_header;
bmp_file_header.bfReserved1 = 0;
bmp_file_header.bfReserved2 = 0;
bmp_file_header.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) + l_bmp_info.bmiHeader.biSizeImage;
bmp_file_header.bfType = 'MB';
bmp_file_header.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER);
fwrite(&bmp_file_header, sizeof(BITMAPFILEHEADER), 1, lfile);
fwrite(&l_bmp_info.bmiHeader, sizeof(BITMAPINFOHEADER), 1, lfile);
fwrite(p_buf.get(), l_bmp_info.bmiHeader.biSizeImage, 1, lfile);
fclose(lfile);
convert_image_encoding(output_file_path, L"png");
}
}
Why is the DirectX11 code so complex/long and slow (about 800ms - 1s per call including cold start initialization)? Also, the latter version causes blinking borders around the captured window which I might want to get rid of. I also seem to have to take the more inefficient route of storing the BMP image to the disk and then loading it back in order to convert it to PNG and then storing it again to produce the final result on the disk which I like to have.
Any suggestions or help with any of these things are welcome, especially why the first screenshot capture code can yield unexpected images depending on the window being captured. Other than that, I like the first version for its speed, brevity and simplicity.
I am making a program that creates lines, how can I erase a line? Is there a function for it?
I have already tried using SDL_RenderClear() but it did not work.
(please note that i am a beginner and that im NOT native american, also i only am 10 years old so explain in a simple way.)
Here is my line creating code:
#include <iostream>
#include <SDL.h>
using namespace std;
int main( int argc, char * argv[] )
{
SDL_Init(SDL_INIT_EVERYTHING);
SDL_Window *janela = NULL;
SDL_Renderer *renderer = NULL;
bool roda = true;
SDL_Event evento;
int x;
int y;
int x2;
int y2;
janela = SDL_CreateWindow( "janela" , SDL_WINDOWPOS_CENTERED , SDL_WINDOWPOS_CENTERED ,500 , 500 , SDL_WINDOW_RESIZABLE);
renderer = SDL_CreateRenderer(janela , -1 , SDL_RENDERER_ACCELERATED);
while (roda) {
SDL_Event evento;
while (SDL_PollEvent(&evento)) {
switch(evento.type){
case SDL_QUIT:
roda = false;
break;
case SDL_MOUSEBUTTONDOWN:{
x = evento.motion.x;
y = evento.motion.y;
break;}
case SDL_MOUSEBUTTONUP:
while(roda){
x2 = evento.motion.x;
y2 = evento.motion.y;
SDL_SetRenderDrawColor(renderer , 255 , 0 , 0 , 255);
SDL_SetRenderDrawColor(renderer , 125 , 234 , 253 , 255);
SDL_RenderDrawLine(renderer , x , y , x2 , y2);
SDL_RenderPresent(renderer);
break;}
case SDL_KEYDOWN:{
switch(evento.key.keysym.sym){
case SDLK_b:{
SDL_RenderClear(renderer);
}
}
}
}
}
}
SDL_DestroyWindow(janela);
janela = NULL;
SDL_DestroyRenderer(renderer);
renderer = NULL;
SDL_Quit();
return 1;
}
What I expect is the lines to be erased but it did not happen.
Typical main loop is basically this:
while(!quit) {
while(pool_event()) {
// change state to react to event
}
clear_previous_frame();
for each line {
draw_line();
}
present();
}
So you should always clear and redraw your lines, adding new line state if events says so (but not drawing it yet - event processing shouldn't do that, or it becomes very compilcated). It is problemmatic to keep previous frame's contents and just add more lines; short explaination is because SDL_RenderPresent documentation says The backbuffer should be considered invalidated after each present (and long explaination is basically a list of reasons why it says so).
To keep track of lines to draw you need to save them somewhere. std::vector could be a simple option, if your requirements aren't against it.
To sum it up, your question's modified code with a bit of comments:
#include <vector>
#include <SDL.h>
struct line {
int x0, y0, x1, y1;
};
int main( int argc, char * argv[] )
{
SDL_Init(SDL_INIT_EVERYTHING);
SDL_Window *janela = NULL;
SDL_Renderer *renderer = NULL;
bool roda = true;
int x;
int y;
janela = SDL_CreateWindow( "janela" , SDL_WINDOWPOS_CENTERED , SDL_WINDOWPOS_CENTERED ,500 , 500 , SDL_WINDOW_RESIZABLE);
renderer = SDL_CreateRenderer(janela , -1 , SDL_RENDERER_ACCELERATED);
std::vector<line> lines;
while (roda) {
SDL_Event evento;
while (SDL_PollEvent(&evento)) {
switch(evento.type){
case SDL_QUIT:
roda = false;
break;
case SDL_MOUSEBUTTONDOWN:
// save starting coordinates
x = evento.motion.x;
y = evento.motion.y;
break;
case SDL_MOUSEBUTTONUP: {
// add new line to draw
line l = { x, y, evento.motion.x, evento.motion.y };
lines.push_back(l);
} break;
case SDL_KEYDOWN:
if(evento.key.keysym.sym == SDLK_b) {
// drop lines
lines.resize(0);
}
break;
}
}
// clear previous contents - in most cases, screen content is
// invalidated after RenderPresent and you need to draw again
SDL_SetRenderDrawColor(renderer, 255, 0, 0, 255); // your 'clear' colour
SDL_RenderClear(renderer);
// draw all accumulated lines
SDL_SetRenderDrawColor(renderer, 125, 234, 253, 255); // your lines colour
for(const line &l : lines) {
SDL_RenderDrawLine(renderer, l.x0, l.y0, l.x1, l.y1);
}
// all drawn - present
SDL_RenderPresent(renderer);
}
SDL_DestroyWindow(janela);
SDL_DestroyRenderer(renderer);
SDL_Quit();
return 0; // 0 is 'success' return code, non-0 is failure
}
I have been trying to make a program that takses the active window, and displays it in its window.
I have successfully exceeded my goal. But the problem is, it uses a lot of ram, and it keeps using more every frame update(20fps).
Here is the source code:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <gdk/gdkx.h>
#include <gtk/gtk.h>
int funcfinished = 1;
GtkWidget *window;
GdkPixbuf *fupdate_pixbuf;
GtkStyle *fupdate_style;
GdkPixmap *fupdate_background;
gint fupdate_xorig;
gint fupdate_yorig;
gint fupdate_width;
gint fupdate_height;
GdkPixbuf *fupdate_screenshot;
GdkWindow *fupdate_window;
gboolean frameupdate()
{
if(funcfinished == 1)
{
/*********[FuncFinish]*********/
funcfinished = 0;
fupdate_pixbuf = NULL;
fupdate_style = NULL;
fupdate_background = NULL;
fupdate_screenshot = NULL;
fupdate_window = NULL;
fupdate_xorig = 0;
fupdate_yorig = 0;
fupdate_width = 0;
fupdate_height = 0;
/*********[Func]*********/
fupdate_window = gdk_screen_get_active_window(gdk_screen_get_default());
gdk_drawable_get_size(fupdate_window, &fupdate_width, &fupdate_height);
fupdate_pixbuf = gdk_pixbuf_get_from_drawable(NULL, fupdate_window, NULL, 0, 0, 0, 0, fupdate_width, fupdate_height);
gdk_pixbuf_render_pixmap_and_mask(fupdate_pixbuf, &fupdate_background, NULL, 0);
fupdate_style = gtk_style_new();
fupdate_style->bg_pixmap[0] = fupdate_background;
gtk_widget_set_style(GTK_WIDGET(window), GTK_STYLE(fupdate_style));
/*********[FuncFinish]*********/
fupdate_pixbuf = NULL;
fupdate_style = NULL;
fupdate_background = NULL;
fupdate_screenshot = NULL;
fupdate_window = NULL;
fupdate_xorig = 0;
fupdate_yorig = 0;
fupdate_width = 0;
fupdate_height = 0;
funcfinished = 1;
}
else
{
printf("Skipped 1 frame update");
}
return TRUE;
}
int main(int argc, char *argv[])
{
gtk_init(&argc, &argv);
window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_title(GTK_WINDOW(window), "Hay Day Autobot");
gtk_window_set_default_size(GTK_WINDOW(window), 400, 300);
g_signal_connect(window, "destroy", G_CALLBACK (gtk_main_quit), NULL);
g_timeout_add(50, frameupdate, 0);
gtk_widget_show(window);
gtk_main();
return 0;
}
I also made a video of it in action, showing off the problem:
https://www.youtube.com/watch?v=GNCwNetLLBM
You are not releasing the memory that you create during the frame update function. For each function that you use there, you should look it up in the documentation and see what it says under "return value".
For example, gdk_screen_get_active_window() lists its return value as "transfer full". That means that "full" ownership of the return value is "transferred" to you when you call that function; ownership means that you are responsible for freeing the memory. Usually the documentation will also say how to do that. In this case you can read
The returned window should be unrefed using g_object_unref() when no longer needed.
On the other hand, gdk_screen_get_default() is "transfer none", so you don't need to do anything there.
I can find questions/answers for iPhone/Windows but none for X11.
Also if there is anyone with a ton of OpenGL experience who can explain the general concepts involved for any windowing system?
Yes it is possible. I modified a example program written by fungus to create a RGBA OpenGL window. If a compositor is enabled the results look like in the video I posted here: http://www.youtube.com/watch?v=iHZfH1Qhonk
/*------------------------------------------------------------------------
The simplest possible Linux OpenGL program? Maybe...
Modification for creating a RGBA window (transparency with compositors)
by Wolfgang 'datenwolf' Draxinger
(c) 2002 by FTB. See me in comp.graphics.api.opengl
(c) 2011 Wolfgang Draxinger. See me in comp.graphics.api.opengl and on StackOverflow
License agreement: This source code is provided "as is". You
can use this source code however you want for your own personal
use. If you give this source code to anybody else then you must
leave this message in it.
--
<\___/>
/ O O \
\_____/ FTB.
--
datenwolf
------------------------------------------------------------------------*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <GL/gl.h>
#include <GL/glu.h>
#include <GL/glut.h>
#include <GL/glx.h>
#include <X11/Xatom.h>
#include <X11/extensions/Xrender.h>
typedef struct
{
Visual *visual;
VisualID visualid;
int screen;
unsigned int depth;
int klass;
unsigned long red_mask;
unsigned long green_mask;
unsigned long blue_mask;
int colormap_size;
int bits_per_rgb;
} XVisualInfo_CPP;
/*------------------------------------------------------------------------
Something went horribly wrong
------------------------------------------------------------------------*/\
static void fatalError(const char *why)
{
fprintf(stderr, "%s", why);
exit(0x666);
}
/*------------------------------------------------------------------------
Global vars
------------------------------------------------------------------------*/
static int Xscreen;
static Atom del_atom;
static Colormap cmap;
static Display *Xdisplay;
static XVisualInfo_CPP *visual;
static XRenderPictFormat *pictFormat;
static GLXFBConfig *fbconfigs, fbconfig;
static int numfbconfigs;
static GLXContext RenderContext;
static Window Xroot, WindowHandle, GLXWindowHandle;
static int width, height; /* Size of the window */
int const tex_width=512;
int const tex_height=512;
static GLuint texture;
static int VisData[] = {
GLX_RENDER_TYPE, GLX_RGBA_BIT,
GLX_DRAWABLE_TYPE, GLX_WINDOW_BIT,
GLX_DOUBLEBUFFER, True,
GLX_RED_SIZE, 1,
GLX_GREEN_SIZE, 1,
GLX_BLUE_SIZE, 1,
GLX_ALPHA_SIZE, 1,
GLX_DEPTH_SIZE, 1,
None
};
/*------------------------------------------------------------------------
Create a window
------------------------------------------------------------------------*/
static Bool WaitForMapNotify(Display *d, XEvent *e, char *arg)
{
return (e->type == MapNotify) && (e->xmap.window == *(Window*)arg);
}
static void createTheWindow()
{
XEvent event;
int x,y, attr_mask;
XSizeHints hints;
XWMHints *StartupState;
XTextProperty textprop;
XSetWindowAttributes attr;
static char *title = "FTB's little OpenGL example";
/* Connect to the X server */
Xdisplay = XOpenDisplay(NULL);
if (!Xdisplay) {
fatalError("Couldn't connect to X server\n");
}
Xscreen = DefaultScreen(Xdisplay);
Xroot = RootWindow(Xdisplay, Xscreen);
fbconfigs = glXChooseFBConfig(Xdisplay, Xscreen, VisData, &numfbconfigs);
for(int i = 0; i<numfbconfigs; i++) {
visual = (XVisualInfo_CPP*) glXGetVisualFromFBConfig(Xdisplay, fbconfigs[i]);
if(!visual)
continue;
pictFormat = XRenderFindVisualFormat(Xdisplay, visual->visual);
if(!pictFormat)
continue;
if(pictFormat->direct.alphaMask > 0) {
fbconfig = fbconfigs[i];
break;
}
}
/* Create a colormap - only needed on some X clients, eg. IRIX */
cmap = XCreateColormap(Xdisplay, Xroot, visual->visual, AllocNone);
/* Prepare the attributes for our window */
attr.colormap = cmap;
attr.border_pixel = 0;
attr.event_mask =
StructureNotifyMask |
EnterWindowMask |
LeaveWindowMask |
ExposureMask |
ButtonPressMask |
ButtonReleaseMask |
OwnerGrabButtonMask |
KeyPressMask |
KeyReleaseMask;
attr.background_pixmap = None;
attr_mask =
CWBackPixmap|
CWColormap|
CWBorderPixel|
CWEventMask; /* What's in the attr data */
/* Create the window */
width = DisplayWidth(Xdisplay, DefaultScreen(Xdisplay))/2;
height = DisplayHeight(Xdisplay, DefaultScreen(Xdisplay))/2;
x=width/2, y=height/2;
/* Create the window */
WindowHandle = XCreateWindow( Xdisplay, /* Screen */
Xroot, /* Parent */
x, y, width, height,/* Position */
1,/* Border */
visual->depth,/* Color depth*/
InputOutput,/* klass */
visual->visual,/* Visual */
attr_mask, &attr);/* Attributes*/
if( !WindowHandle ) {
fatalError("Couldn't create the window\n");
}
/* Configure it... (ok, ok, this next bit isn't "minimal") */
textprop.value = (unsigned char*)title;
textprop.encoding = XA_STRING;
textprop.format = 8;
textprop.nitems = strlen(title);
hints.x = x;
hints.y = y;
hints.width = width;
hints.height = height;
hints.flags = USPosition|USSize;
StartupState = XAllocWMHints();
StartupState->initial_state = NormalState;
StartupState->flags = StateHint;
XSetWMProperties(Xdisplay, WindowHandle,&textprop, &textprop,/* Window title/icon title*/
NULL, 0,/* Argv[], argc for program*/
&hints, /* Start position/size*/
StartupState,/* Iconised/not flag */
NULL);
XFree(StartupState);
/* Open it, wait for it to appear */
XMapWindow(Xdisplay, WindowHandle);
XIfEvent(Xdisplay, &event, WaitForMapNotify, (char*)&WindowHandle);
/* Set the kill atom so we get a message when the user tries to close the window */
if ((del_atom = XInternAtom(Xdisplay, "WM_DELETE_WINDOW", 0)) != None) {
XSetWMProtocols(Xdisplay, WindowHandle, &del_atom, 1);
}
}
/*------------------------------------------------------------------------
Create the OpenGL rendering context
------------------------------------------------------------------------*/
static void createTheRenderContext()
{
/* See if we can do OpenGL on this visual */
int dummy;
if (!glXQueryExtension(Xdisplay, &dummy, &dummy)) {
fatalError("OpenGL not supported by X server\n");
}
/* Create the OpenGL rendering context */
RenderContext = glXCreateNewContext(Xdisplay, fbconfig, GLX_RGBA_TYPE, 0, True);
if (!RenderContext) {
fatalError("Failed to create a GL context\n");
}
GLXWindowHandle = glXCreateWindow(Xdisplay, fbconfig, WindowHandle, NULL);
/* Make it current */
if (!glXMakeContextCurrent(Xdisplay, GLXWindowHandle, GLXWindowHandle, RenderContext)) {
fatalError("glXMakeCurrent failed for window\n");
}
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, tex_width, tex_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
/*------------------------------------------------------------------------
Window messages
------------------------------------------------------------------------*/
static int updateTheMessageQueue()
{
XEvent event;
XConfigureEvent *xc;
while (XPending(Xdisplay))
{
XNextEvent(Xdisplay, &event);
switch (event.type)
{
case ClientMessage:
if (event.xclient.data.l[0] == del_atom)
{
return 0;
}
break;
case ConfigureNotify:
xc = &(event.xconfigure);
width = xc->width;
height = xc->height;
break;
}
}
return 1;
}
/*------------------------------------------------------------------------
Redraw the window
------------------------------------------------------------------------*/
float const light_dir[]={1,1,1,0};
float const light_color[]={1,0.95,0.9,1};
static void redrawTheWindow()
{
int size;
static float a=0;
static float b=0;
static float c=0;
glViewport(0,0,width,height);
/* Clear the screen */
// glClearColor(0.750,0.750,1.0,0.5);
glClearColor(0.0,0.0,0.0,0.);
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45, (float)width/(float)height, 1, 10);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glEnable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
glLightfv(GL_LIGHT0, GL_POSITION, light_dir);
glLightfv(GL_LIGHT0, GL_DIFFUSE, light_color);
glTranslatef(0,0,-5);
glRotatef(a, 1, 0, 0);
glRotatef(b, 0, 1, 0);
glRotatef(c, 0, 0, 1);
glEnable(GL_LIGHT0);
glEnable(GL_LIGHTING);
glutSolidTeapot(1);
a=fmod(a+0.1, 360.);
b=fmod(b+0.5, 360.);
c=fmod(c+0.25, 360.);
/* Swapbuffers */
glXSwapBuffers(Xdisplay, GLXWindowHandle);
}
/*------------------------------------------------------------------------
Program entry point
------------------------------------------------------------------------*/
int main(int argc, char *argv[])
{
/* instead of a triangle I wanted a teapot. GLUT has it.
GLUT is NOT used for window creation, but just the teapot
primitive. Nevertheless it must be initialized */
glutInit(&argc, argv);
createTheWindow();
createTheRenderContext();
while (updateTheMessageQueue()) {
redrawTheWindow();
}
return 0;
}
Hey there StackOverflow people!
I'm making an IOCP server and I have ironed out most issues so far but one still remain and I do not know where to start looking at. When I run the client/server on my machine everything is fine and dandy. It matches the speed of the Windows SDK Sample maybe a little bit faster and definitely uses less CPU cycle. However when I run the client from a separate computer, transfer speed caps at 37 KB/s and has a roundtrip latency of 200ms (as opposed to 0). Now if I connect the client to the SDK Sample server, I don't have that problem so there is something wrong with my code. As far as I know, the sockets are initialized the exact same way with the same options. I have also ran my server in a profiler to check for bottleneck but I couldn't find any. Also, the computers I have tried it on were connected to the same gigabit switch (with gigabit adapter). I know this is kind of vague but that's because I couldn't pinpoint the problem so far and I would be eternally grateful if any of you guys could point me in the right direction.
Cheers,
-Roxy
EDIT2:
After following Mike's advise, I did some research on the code and found out that when a remote client connects to the server most of the time the code is waiting on GetQueuedCompletionStatus. This suggest that IO request are simply taking a long time to complete but I still don't understand why. This only occurs only when the client is on a remote computer. I'm thinking this has something to do with how a setup the sockets or how I'm posting the request but I don't see any difference with the sample code.
Any ideas?
EDIT (Added sample code):
Alright, here it is! It ain't pretty though!
If you have the Windows SDK installed, you can connect to it using the iocpclient sample (Program Files\Microsoft SDKs\Windows\v7.1\Samples\netds\winsock\iocp\client) and changing it's default port at line 73 to 5000.
Weird things I've just noticed when trying it myself is that it seems the sample iocpclient doesn't cause the same caps at 37KB/s issue... However it looks like the sample code has a limit set to around 800KB/s. I'll post a client if that can be of any help.
#pragma comment(lib, "Ws2_32.lib")
#include <WinSock2.h>
#include <stdio.h>
unsigned int connection = 0;
unsigned int upload = 0;
unsigned int download = 0;
#define IO_CONTEXT_COUNT 5
class NetClientHost
{
friend class gNetProtocolHost;
public:
enum Operation
{
kOperationUnknown,
kOperationRead,
kOperationWrite,
};
struct ClientData
{
SOCKET socket;
};
struct IOContext
{
WSAOVERLAPPED overlapped;
WSABUF wsaReceiveBuf;
WSABUF wsaSendBuf;
char *buf;
char *TESTbuf;
unsigned long bytesReceived;
unsigned long bytesSent;
unsigned long flags;
unsigned int bytesToSendTotal;
unsigned int remainingBytesToSend;
unsigned int chunk;
Operation operation;
};
NetClientHost()
{
memset((void *) &m_clientData, 0, sizeof(m_clientData));
}
NetClientHost::IOContext *NetClientHost::AcquireContext()
{
while (true)
{
for (int i = 0; i < IO_CONTEXT_COUNT; ++i)
{
if (!(m_ioContexts + i)->inUse)
{
InterlockedIncrement(&(m_ioContexts + i)->inUse);
//ResetEvent(*(m_hContextEvents + i));
if ((m_ioContexts + i)->ioContext.TESTbuf == 0)
Sleep(1);
return &(m_ioContexts + i)->ioContext;
}
}
//++g_blockOnPool;
//WaitForMultipleObjects(IO_CONTEXT_COUNT, m_hContextEvents, FALSE, INFINITE);
}
}
const ClientData *NetClientHost::GetClientData() const
{
return &m_clientData;
};
void NetClientHost::Init(unsigned int bufferSize)
{
_InitializeIOContexts(bufferSize ? bufferSize : 1024);
}
void NetClientHost::ReleaseContext(IOContext *ioContext)
{
int i = sizeof(_IOContextData), j = sizeof(IOContext);
_IOContextData *contextData = (_IOContextData *) (((char *) ioContext) - (i - j));
InterlockedDecrement(&contextData->inUse);
//SetEvent(*(m_hContextEvents + contextData->index));
}
struct _IOContextData
{
unsigned int index;
volatile long inUse;
IOContext ioContext;
};
ClientData m_clientData;
_IOContextData *m_ioContexts;
HANDLE *m_hContextEvents;
void _InitializeIOContexts(unsigned int bufferSize)
{
m_ioContexts = new _IOContextData[IO_CONTEXT_COUNT];
m_hContextEvents = new HANDLE[IO_CONTEXT_COUNT];
memset((void *) m_ioContexts, 0, sizeof(_IOContextData) * IO_CONTEXT_COUNT);
for (int i = 0; i < IO_CONTEXT_COUNT; ++i)
{
(m_ioContexts + i)->index = i;
(m_ioContexts + i)->ioContext.buf = new char[bufferSize];
(m_ioContexts + i)->ioContext.wsaReceiveBuf.len = bufferSize;
(m_ioContexts + i)->ioContext.wsaReceiveBuf.buf = (m_ioContexts + i)->ioContext.buf;
(m_ioContexts + i)->ioContext.TESTbuf = new char[10000];
(m_ioContexts + i)->ioContext.wsaSendBuf.buf = (m_ioContexts + i)->ioContext.TESTbuf;
*(m_hContextEvents + i) = CreateEvent(0, TRUE, FALSE, 0);
}
}
void _SetSocket(SOCKET socket)
{
m_clientData.socket = socket;
}
};
bool WriteChunk(const NetClientHost *clientHost, NetClientHost::IOContext *ioContext)
{
int status;
status = WSASend(clientHost->GetClientData()->socket, &ioContext->wsaSendBuf, 1, &ioContext->bytesSent, ioContext->flags, &ioContext->overlapped, 0);
if (status == SOCKET_ERROR && WSAGetLastError() != WSA_IO_PENDING)
{
// ...
return false;
}
return true;
}
bool Write(NetClientHost *clientHost, void *buffer, unsigned int size, unsigned int chunk)
{
//__ASSERT(m_clientHost);
//__ASSERT(m_clientHost->GetClientData()->remainingBytesToSend == 0);
NetClientHost::IOContext *ioContext = clientHost->AcquireContext();
if (!chunk)
chunk = size;
ioContext->wsaSendBuf.buf = ioContext->TESTbuf;
ioContext->operation = NetClientHost::kOperationWrite;
ioContext->flags = 0;
ioContext->wsaSendBuf.buf = new char[size];
memcpy((void *) ioContext->wsaSendBuf.buf, buffer, chunk);
ioContext->wsaSendBuf.len = chunk;
ioContext->chunk = chunk;
ioContext->bytesToSendTotal = size;
ioContext->remainingBytesToSend = size;
return WriteChunk(clientHost, ioContext);
}
void Read(NetClientHost *clientHost)
{
NetClientHost::IOContext *ioContext = clientHost->AcquireContext();
int status;
memset((void *) ioContext, 0, sizeof(NetClientHost::IOContext));
ioContext->buf = new char[1024];
ioContext->wsaReceiveBuf.len = 1024;
ioContext->wsaReceiveBuf.buf = ioContext->buf;
ioContext->flags = 0;
ioContext->operation = NetClientHost::kOperationRead;
status = WSARecv(clientHost->GetClientData()->socket, &ioContext->wsaReceiveBuf, 1, &ioContext->bytesReceived, &ioContext->flags, &ioContext->overlapped, 0);
int i = WSAGetLastError();
if (status == SOCKET_ERROR && WSAGetLastError() != WSA_IO_PENDING)
{
// ...
}
}
bool AddSocket(HANDLE hIOCP, SOCKET socket)
{
++connection;
int bufSize = 0;
LINGER lingerStruct;
lingerStruct.l_onoff = 1;
lingerStruct.l_linger = 0;
setsockopt(socket, SOL_SOCKET, SO_SNDBUF, (char *) &bufSize, sizeof(int));
setsockopt(socket, SOL_SOCKET, SO_RCVBUF, (char *) &bufSize, sizeof(int));
setsockopt(socket, SOL_SOCKET, SO_LINGER, (char *) &lingerStruct, sizeof(lingerStruct) );
NetClientHost *clientHost = new NetClientHost;
clientHost->_InitializeIOContexts(1024);
clientHost->Init(0);
clientHost->_SetSocket(socket);
// Add this socket to the IO Completion Port
CreateIoCompletionPort((HANDLE) socket, hIOCP, (DWORD_PTR) clientHost, 0);
Read(clientHost);
return true;
}
int read = 0, write = 0;
DWORD WINAPI WorkerThread(LPVOID param)
{
LPOVERLAPPED overlapped;
NetClientHost *clientHost;
HANDLE hIOCP = (HANDLE) param;
DWORD ioSize;
BOOL status;
while (true)
{
status = GetQueuedCompletionStatus(hIOCP, &ioSize, (PULONG_PTR) &clientHost, (LPOVERLAPPED *) &overlapped, INFINITE);
if (!(status || ioSize))
{
--connection;
//_CloseConnection(clientHost);
continue;
}
NetClientHost::IOContext *ioContext = (NetClientHost::IOContext *) overlapped;
switch (ioContext->operation)
{
case NetClientHost::kOperationRead:
download += ioSize;
Write(clientHost, ioContext->wsaReceiveBuf.buf, ioSize, 0);
write++;
clientHost->ReleaseContext(ioContext);
break;
case NetClientHost::kOperationWrite:
upload += ioSize;
if (ioContext->remainingBytesToSend)
{
ioContext->remainingBytesToSend -= ioSize;
ioContext->wsaSendBuf.len = ioContext->chunk <= ioContext->remainingBytesToSend ? ioContext->chunk : ioContext->remainingBytesToSend; // equivalent to min(clientData->chunk, clientData->remainingBytesToSend);
ioContext->wsaSendBuf.buf += ioContext->wsaSendBuf.len;
}
if (ioContext->remainingBytesToSend)
{
WriteChunk(clientHost, ioContext);
}
else
{
clientHost->ReleaseContext(ioContext);
Read(clientHost);
read++;
}
break;
}
}
return 0;
}
DWORD WINAPI ListenThread(LPVOID param)
{
SOCKET sdListen = (SOCKET) param;
HANDLE hIOCP = CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0, 0);
CreateThread(0, 0, WorkerThread, hIOCP, 0, 0);
CreateThread(0, 0, WorkerThread, hIOCP, 0, 0);
CreateThread(0, 0, WorkerThread, hIOCP, 0, 0);
CreateThread(0, 0, WorkerThread, hIOCP, 0, 0);
while (true)
{
SOCKET as = WSAAccept(sdListen, 0, 0, 0, 0);
if (as != INVALID_SOCKET)
AddSocket(hIOCP, as);
}
}
int main()
{
SOCKET sdListen;
SOCKADDR_IN si_addrlocal;
int nRet;
int nZero = 0;
LINGER lingerStruct;
WSADATA wsaData;
WSAStartup(0x202, &wsaData);
sdListen = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_IP, NULL, 0, WSA_FLAG_OVERLAPPED);
si_addrlocal.sin_family = AF_INET;
si_addrlocal.sin_port = htons(5000);
si_addrlocal.sin_addr.s_addr = htonl(INADDR_ANY);
nRet = bind(sdListen, (struct sockaddr *)&si_addrlocal, sizeof(si_addrlocal));
nRet = listen(sdListen, 5);
nZero = 0;
nRet = setsockopt(sdListen, SOL_SOCKET, SO_SNDBUF, (char *) &nZero, sizeof(nZero));
nZero = 0;
nRet = setsockopt(sdListen, SOL_SOCKET, SO_RCVBUF, (char *)&nZero, sizeof(nZero));
lingerStruct.l_onoff = 1;
lingerStruct.l_linger = 0;
nRet = setsockopt(sdListen, SOL_SOCKET, SO_LINGER, (char *)&lingerStruct, sizeof(lingerStruct) );
CreateThread(0, 0, ListenThread, (LPVOID) sdListen, 0, 0);
HANDLE console = GetStdHandle(STD_OUTPUT_HANDLE);
while (true)
{
COORD c = {0};
SetConsoleCursorPosition(console, c);
printf("Connections: %i \nUpload: %iKB/s \nDownload: %iKB/s ", connection, upload * 2 / 1024, download * 2 / 1024);
upload = 0;
download = 0;
Sleep(500);
}
return 0;
}
This kind of asynchronous system should be able to run at full datalink speed. Problems I've found wrong are such as:
timeout settings causing needless retransmissions
in the receiving process, received message A might trigger a database update, such that received message B has to wait, causing an unnecessary delay in the response to message B back to the sender, when the DB update could actually be done in idle time.
There's something called wireshark that can give you some visibility into the message traffic.
I used to do it the hard way, with time-stamped message logs.
BTW: I would first use this method on the individual processes, to clean out any bottlenecks, before doing the asynchronous analysis. If you haven't done this, you can bet they're in there.
Just any old profiler isn't reliable. There are good ones, including Zoom.