i have implemented my ClientSocket class from CAsyncSocket:
class ClientSocket : public CAsyncSocket
{
// this socket sends data back to "backSocket" which points to this only for
// testing but it can send data to other sockets like that too.
ClientSocket * backSocket;
// store some data in backupData untill connection is established.
StringBuilder * backupData;
public:
virtual void OnClose(int);
virtual void OnReceive(int);
ClientSocket(void);
bool ConnectToBACK();
virtual ~ClientSocket(void);
};
ClientSocket::ClientSocket(void)
{
// DONOT run to back !!! recursive calls otherwise.
backSocket = NULL;
backupData = NULL;
}
bool ClientSocket::ConnectToBACK()
{
if(this->backSocket != NULL)
return true;
// just for debugging :)
this->backSocket = this;
return true;
}
ClientSocket::~ClientSocket(void)
{
this->Close();
if(this->backSocket)
{
this->backSocket->Close();
delete this->backSocket;
this->backSocket = NULL;
}
}
void ClientSocket::OnClose(int nErrorCode)
{
if(this->backSocket != NULL)
{
this->backSocket->Close();
}
CAsyncSocket::OnClose(nErrorCode);
}
void ClientSocket::OnReceive(int nErrorCode)
{
if(nErrorCode == 0)
{
char *buffer = new char[2049];
int bufLen = sizeof(buffer)/sizeof(buffer[0]);
int received = this->Receive(buffer, bufLen-1, 0);
if(received == SOCKET_ERROR)
{
return ;
}
if(this->ConnectToback())
{
if(backupData)
{
int backupLen;
char *backup = backupData->ToString(&backupLen);
this->backSocket->Send(backup, backupLen);
delete backupData;
delete [] backup;
backupData = NULL;
}
this->backSocket->Send(buffer, received);
delete buffer;
}
else
{
if(backupData == NULL)
{
backupData = new StringBuilder();
}
backupData->Insert(buffer, received);
}
}
CAsyncSocket::OnReceive(nErrorCode);
}
I have not associated any GUI to this as i thought that it would be good for no overheads.
I donot require it. I have also done AfxSocketIback() in main and from a thread started another ListeningSocket .
netstat -a shows proper binding at the port of ListeningSocket and status as Listening
// ListeningSocket inherits public CAsyncSocket
void ListeningSocket::OnAccept(int nErrorCode)
{
#ifdef DEBUG
std::cout << "\nOnAccepting Proxy Server :)";
#endif
if(nErrorCode == 0)
{
ClientSocket *FromCliet = new ClientSocket();
FromCliet->value = 100;
if(this->Accept(*FromCliet, NULL, NULL))
{
// Connection just has ClientSocket * client
Connection * connection = new Connection(FromCliet);
// a list<Connection *> is stored in ListeningSocket
this->clients.push_front(connection);
}
else
{
std::cerr << "\nFailed to accept connection from Client";
}
}
CAsyncSocket::OnAccept(nErrorCode);
}
When putting brakepoints in ListenSocket::OnAccept, it never comes here.
EDIT:
static DWORD WINAPI StartListening(LPVOID param)
{
ListeningSocket *app = (ListeningSocket *)param;
if(false == app->Create(7897, SOCK_STREAM, 31, "127.0.0.1"))
{
std::cerr << "\nCould not create\bind to port";
delete app;
return -1;
}
if(false == app->Listen())
{
std::cerr << "\nCould not listen";
app->Close();
delete app;
return -1;
}
return 0;
}
int ListeningSocket::Start()
{
if(NULL == CreateThread(NULL,0, StartListening, (LPVOID)this,0, NULL))
{
return -1;
}
return 0;
}
I have NOT made it like MFC Wizard solution. I have simple project and main().
My ListeningSocket Class is Singletone Class:
class ListeningSocket : public CAsyncSocket
{
private:
static ListeningSocket * ListeningSocket;
std::list<Connection *> clients;
ListeningSocket(void);
public:
// overrides
virtual void OnAccept(int);
virtual void OnClose(int);
static ListeningSocket * GetListeningSocket();
virtual ~ListeningSocket(void);
virtual void Close();
int Start(void);
};
CAsyncSocket class internally uses Windows messages for firing events. You need to create CAsyncSocket-derived class in a thread with message loop. In this case events will be called. Pseudo-code:
// This function runs in the context of worker thread
void MyClass::ThreadFunction()
{
mySocket.Create(...); // creating CAsyncSocket-derived class
// Run message loop.
BOOL bRes = FALSE;
MSG msg;
while((bRes = GetMessage( &msg, NULL, 0, 0 )) != 0)
{
if (bRes == -1)
{
break;
}
else
{
TranslateMessage(&msg);
DispatchMessage(&msg);
}
}
}
To stop this thread, use PostQuitMessage function.
Edit.
I didn't post all multi-threading details, assuming that you are familiar with them. Generally, CreateThread requires global function as parameter (or class static function). To call regular class method, use "this" as CreateThread parameter, which is passed as void* to global thread function. Cast it back to the class pointer and call regular class method.
I've also run into this same issue - with someone's re-implementation of CAsyncSocket called CAsyncSocketEx -- built to be a function replacement for CAsyncSocket.
Just this week, I thought I'd see if I could use this code again, and I ran into this very same problem. Since the async-window was not created inside a thread that has a message loop, the events were not firing from WSAAsyncSelect()...
see this: http://support.microsoft.com/kb/90975
Related
I'm developing a driver for a device with Qt. I have already done that many times and I have a code that I use every time. But, this time, when I open the QserialPort, it seems to work correctly, but it is not : I can write, the device receives commands, but I cannot receive on the soft : the signal QSerialPort::ReadyRead is never triggered.
When I open the serial port with Putty (just open it without sending anything) and close it just after, my Qt soft work perfectly when I reconnect it : I can now receive correctly...
Do you have an idea of what putty do of different/more than my soft when opening the port ?
(I have the same parameters and I'm on windows and Qt 5.15.2).
My code for opening :
_serial->setPortName(com);
_serial->setBaudRate(QSerialPort::Baud115200);
_serial->setDataBits(QSerialPort::Data8);
_serial->setParity(QSerialPort::NoParity);
_serial->setStopBits(QSerialPort::OneStop);
_serial->setFlowControl(QSerialPort::NoFlowControl);
if(!_serial->open(QIODevice::ReadWrite))
{
emit error(tr("Unable to open port"));
return;
}
_serial->clear();
My code for write (simple string like "hello") :
_serial->write("Hello");
My code for connect the signal :
connect(_serial, &QSerialPort::readyRead, this, &device::processCommand);
My code for read serial (processCommand()):
QString bufferData;
if (_serial->isOpen())
{
_datas.append(_serial->readAll());
bufferData = _datas.constData();
}
EDIT : The Qt exemple 'Terminal' do not works on windows with my device but works on ubuntu...
EDIT 2 : SOLUTION : I have finally find the solution, just add _serial->setDataTerminalReady(true); after opening the QSerialPort.
Thanks.
I ran into the same problem where the read signal was not detected in the virtual USB port. In the end I came to the conclusion that the QSerialPort class shouldn't.
I solved it using QThread and Win32 API.
#pragma once
#include <windows.h>
#include <QMutex>
#include <QThread>
#include <QWaitCondition>
#define SERIAL_RX_BUF_SIZE 2047
class SerialThread : public QThread
{
Q_OBJECT
public:
explicit SerialThread(QObject *parent = nullptr);
~SerialThread();
bool startThread(const QString& portName);
void stopThread();
void request(const QString& command);
signals:
void response(char* text);
void timeout();
private:
void run() override;
bool writeCommand(const QString& command);
QString m_portName;
QString m_command;
QMutex m_mutex;
QWaitCondition m_wait;
volatile bool m_quit = false;
HANDLE m_hComm;
char m_buf[SERIAL_RX_BUF_SIZE + 1];
};
#include "serial_thread.h"
#include <QDebug>
SerialThread::SerialThread(QObject *parent) :
QThread(parent)
{
memset(m_buf, 0, sizeof(m_buf));
}
SerialThread::~SerialThread()
{
}
bool SerialThread::startThread(const QString &portName)
{
const QMutexLocker locker(&m_mutex);
m_hComm = CreateFileA(portName.toStdString().c_str(), // PORT NAME
GENERIC_READ | GENERIC_WRITE, // READ/WRITE
0, // NO SHARING
NULL, // NO SECURITY
OPEN_EXISTING, // OPEN EXISTING PORT ONLY
0, // NON OVERLAPPED I/O
NULL); // NULL FOR COMM DEVICES
if (m_hComm == INVALID_HANDLE_VALUE)
{
return false;
}
m_portName = portName;
if (!SetCommMask(m_hComm, EV_RXCHAR | EV_ERR))
{
qCritical() << "SetCommMask failed";
CloseHandle(m_hComm);
return false;
}
COMMTIMEOUTS comm_timeouts;
if (!GetCommTimeouts(m_hComm, &comm_timeouts))
{
qCritical() << "GetCommTimeouts failed";
CloseHandle(m_hComm);
return false;
}
comm_timeouts.ReadIntervalTimeout = 1;
comm_timeouts.ReadTotalTimeoutMultiplier = 0;
comm_timeouts.ReadTotalTimeoutConstant = 500;
comm_timeouts.WriteTotalTimeoutMultiplier = 0;
comm_timeouts.WriteTotalTimeoutConstant = 0;
if (!SetCommTimeouts(m_hComm, &comm_timeouts))
{
qCritical() << "SetCommTimeouts failed";
CloseHandle(m_hComm);
return false;
}
start();
return true;
}
void SerialThread::stopThread()
{
m_mutex.lock();
m_quit = true;
m_mutex.unlock();
m_wait.wakeAll();
wait();
}
void SerialThread::request(const QString& command)
{
m_mutex.lock();
m_command = command;
m_mutex.unlock();
m_wait.wakeAll();
}
void SerialThread::run()
{
DWORD dwEvtMask, nRead;
while (!m_quit)
{
m_mutex.lock();
m_wait.wait(&m_mutex);
m_mutex.unlock();
{
const QMutexLocker locker(&m_mutex);
if (m_command.isEmpty())
{
continue;
}
if (!writeCommand(m_command))
{
continue;
}
if (WaitCommEvent(m_hComm, &dwEvtMask, NULL))
{
if (dwEvtMask & EV_ERR)
{
qCritical() << "Wait failed with error: " << GetLastError();
break;
}
if (dwEvtMask & EV_RXCHAR)
{
if (!ReadFile(m_hComm, &m_buf, SERIAL_RX_BUF_SIZE, &nRead, NULL))
{
qCritical() << "ReadFile error: " << GetLastError();
}
else
{
m_buf[nRead] = 0;
qDebug() << "Read: " << nRead;
emit response(m_buf);
}
}
}
else
{
DWORD dwRet = GetLastError();
if (ERROR_IO_PENDING == dwRet)
{
qDebug() << "RX timeout";
emit timeout();
}
else
{
qCritical() << "WaitCommEvent failed: " << dwRet;
}
}
m_command.clear();
}
}
CloseHandle(m_hComm);
m_quit = false;
}
bool SerialThread::writeCommand(const QString& command)
{
std::string s = command.toStdString();
DWORD n;
if (!WriteFile(m_hComm, s.data(), s.length(), &n, NULL))
{
qCritical() << "WriteFile error";
return false;
}
return true;
}
hope you had all had nice holidays.
This questions is related to my earlier question: std::condition_variable - Wait for several threads to notify observer
I'm trying to implement a threadpool based on my own mutable thread implementation below:
class MutableThread
{
private:
std::thread m_Thread;
std::function<void()> m_Function;
bool m_bRun;
std::mutex m_LockMutex;
std::mutex m_WaitMutex;
std::condition_variable m_CV;
IAsyncTemplateObserver<MutableThread>* m_Observer = nullptr;
private:
void Execute()
{
while (m_bRun)
{
{
std::unique_lock<std::mutex> wait(m_WaitMutex);
m_CV.wait(wait);
}
std::lock_guard<std::mutex> lock(m_LockMutex);
if (m_bRun && m_Function)
{
m_Function();
m_Function = std::function<void()>();
if (m_Observer != nullptr)
{
m_Observer->Signal(this);
}
}
}
}
public:
HDEBUGNAME(TEXT("MutableThread"));
MutableThread(const MutableThread& thread) = delete;
MutableThread(IAsyncTemplateObserver<MutableThread>* _Observer)
{
m_Observer = _Observer;
m_bRun = true;
m_Thread = std::thread(&MutableThread::Execute, this);
}
MutableThread()
{
m_Observer = nullptr;
m_bRun = true;
m_Thread = std::thread(&MutableThread::Execute, this);
}
~MutableThread()
{
m_bRun = false;
m_CV.notify_one();
try
{
if (m_Thread.joinable())
m_Thread.join();
}
catch (std::system_error& ex)
{
HWARNINGD(TEXT("%s"), ex.what());
}
}
inline bool Start(const std::function<void()>& f)
{
std::lock_guard<std::mutex> lock(m_LockMutex);
if (m_Function != nullptr)
return false;
m_Function = f;
m_CV.notify_one();
return true;
}
The IAsyncTemplateObserver simply derives from my IAsyncObserver class posted in the earlier question and adds a virtual function:
template <typename T>
class IAsyncTemplateObserver : public IAsyncObserver
{
public:
virtual void Signal(T* _Obj) = 0;
};
What I want to do is, signal the ThreadPool that the function has finished execution and a new task is assigned to the mutable thread:
class MutableThread;
struct Task
{
std::function<void()> m_Function;
uint32_t m_uPriority;
Task(const std::function<void()>& _Function, uint32_t _uPriority)
{
m_Function = _Function;
m_uPriority = _uPriority;
}
};
inline bool operator<(const Task& lhs, const Task& rhs)
{
return lhs.m_uPriority < rhs.m_uPriority;
}
class ThreadPool : public IAsyncTemplateObserver<MutableThread>
{
private:
std::list<MutableThread* > m_FreeThreads;
std::list<MutableThread* > m_UsedThreads;
std::set<Task> m_Tasks;
std::mutex m_LockMutex;
public:
ThreadPool()
{
//Grow(std::thread::hardware_concurrency() - 1);
}
ThreadPool(size_t n)
{
Grow(n);
}
~ThreadPool()
{
//std::lock_guard<std::mutex> lock(m_Mutex);
for (MutableThread* pUsed : m_UsedThreads)
{
HSAFE_DELETE(pUsed);
}
for (MutableThread* pFree : m_FreeThreads)
{
HSAFE_DELETE(pFree);
}
}
inline void Grow(size_t n)
{
std::lock_guard<std::mutex> lock(m_LockMutex);
for (size_t i = 0; i < n; i++)
{
m_FreeThreads.push_back(new MutableThread(this));
}
}
inline void AddTask(const Task& _Task)
{
{
std::lock_guard<std::mutex> lock(m_LockMutex);
m_Tasks.insert(_Task);
}
AssignThreads();
}
virtual void Signal(MutableThread* _pThread)
{
{
std::lock_guard<std::mutex> lock(m_LockMutex);
m_UsedThreads.remove(_pThread);
m_FreeThreads.push_back(_pThread);
}
AssignThreads();
NotifyOne();
}
inline void WaitForAllThreads()
{
bool bWait = true;
do
{
{
//check if we have to wait
std::lock_guard<std::mutex> lock(m_LockMutex);
bWait = !m_UsedThreads.empty() || !m_Tasks.empty();
}
if (bWait)
{
std::unique_lock<std::mutex> wait(m_ObserverMutex);
m_ObserverCV.wait(wait);
}
} while (bWait);
}
private:
inline void AssignThreads()
{
std::lock_guard<std::mutex> lock(m_LockMutex);
if (m_FreeThreads.empty() || m_Tasks.empty())
return;
//Get free thread
MutableThread* pThread = m_FreeThreads.back();
m_FreeThreads.pop_back();
//park thread in used list
m_UsedThreads.push_back(pThread);
//get task with highest priority
std::set<Task>::iterator it = m_Tasks.end();
--it; //last entry has highest priority
//start the task
pThread->Start(it->m_Function);
//remove the task from the list
m_Tasks.erase(it);
}
The AddTask function is called several times by the same thread, but when a mutable thread signals the threadpool (via m_Observer->Signal(this) ) the application freezes at the lock_guard of the AssignThreads() function. Now the strange thing is unlike a normal deadlock, all callstack-views in Visual Studio are empty as soon is I try to step over the line with the lock_guard.
Can anyone explain this behaviour? Is there any major design flaw or just a simple mix up?
Thanks for your help!
Greetings,
Fabian
Edit: I've added a minimal visual studio solution that reproduces the problem: ThreadPoolTest.zip
Thanks to a friend, I was able to fix the problem by moving the call m_Observer->Signal(this) outside of the lock_guard scope in the MutableThread::Execute() function. Secondly I removed the lock_guard in the AssignThreads() function and moved its call into the scope of the lock_guard in the Signal()/AddTask function. Not really related but still a flaw: all condition_variables.wait() calls are now in a while(m_bNotified == false) loop.
Question
What can I do to get a locking mechanism that provides minimal and stable latency while guaranteeing that a thread cannot reacquire a resource before another thread has acquired and released it?
The desirability of answers to this question are ranked as follows:
Some combination of built-in C++11 features that work in MinGW on Windows 7 (note that the <thread> and <mutex> libraries do not work on a Windows platform)
Some combination of Windows API features
A modification to the FairLock listed below, my own attempt at implementing such a mechanism
Some features provided by a free, open-source library that does not require a .configure/make/make install process, (getting that to work in MSYS is more of an adventure than I care for)
Background
I am writing an application which is effectively a multi-stage producer/consumer. One thread generates input consumed by another thread, which produces output consumed by yet another thread. The application uses pairs of buffers so that, after an initial delay, all threads can work nearly simultaneously.
Since I am writing a Windows 7 application, I had been using CriticalSections to guard the buffers. The problem with using CriticalSections (or, so far as I can tell, any other Windows or C++11-built-in synchronization object) is that it does not allow for any provision that a thread that just released a lock cannot reacquire it until another thread has done so first. Because of this, many of my test drivers for the middle thread (the Encoder) never gave the Encoder a chance to acquire the test input buffers and completed without having tested them. The end result was a ridiculous process of trying to determine an artificial wait time that stochastically worked for my machine.
Since the structure of my application requires that each stage waits for the other stage to have acquired, finished using, and released the necessary buffers for getting to use the buffer again, I need, for lack of a better term, a fair locking mechanism. I took a crack at writing one (the source code is provided below). In testing, this FairLock allows my test driver to run my Encoder at the same speeds that I was able to achieve using the CriticalSection maybe 60% of the runs. The other 40% of the runs take anywhere between 10 to 100 ms longer, which is not acceptable for my application.
FairLock
// FairLock.hpp
#ifndef FAIRLOCK_HPP
#define FAIRLOCK_HPP
#include <atomic>
using namespace std;
class FairLock {
private:
atomic_bool owned {false};
atomic<DWORD> lastOwner {0};
public:
FairLock(bool owned);
bool inline hasLock() const;
bool tryLock();
void seizeLock();
void tryRelease();
void waitForLock();
};
#endif
// FairLock.cpp
#include <windows.h>
#include "FairLock.hpp"
#define ID GetCurrentThreadId()
FairLock::FairLock(bool owned) {
if (owned) {
this->owned = true;
this->lastOwner = ID;
} else {
this->owned = false;
this->lastOwner = 0;
}
}
bool inline FairLock::hasLock() const {
return owned && lastOwner == ID;
}
bool FairLock::tryLock() {
bool success = false;
DWORD id = ID;
if (owned) {
success = lastOwner == id;
} else if (
lastOwner != id &&
owned.compare_exchange_strong(success, true)
) {
lastOwner = id;
success = true;
} else {
success = false;
}
return success;
}
void FairLock::seizeLock() {
bool success = false;
DWORD id = ID;
if (!(owned && lastOwner == id)) {
while (!owned.compare_exchange_strong(success, true)) {
success = false;
}
lastOwner = id;
}
}
void FairLock::tryRelease() {
if (hasLock()) {
owned = false;
}
}
void FairLock::waitForLock() {
bool success = false;
DWORD id = ID;
if (!(owned && lastOwner == id)) {
while (lastOwner == id); // spin
while (!owned.compare_exchange_strong(success, true)) {
success = false;
}
lastOwner = id;
}
}
EDIT
DO NOT USE THIS FairLock CLASS; IT DOES NOT GUARANTEE MUTUAL EXCLUSION!
I reviewed the above code to compare it against The C++ Programming Language: 4th Edition text I had not read carefully and what CouchDeveloper's recommended Synchronous Queue. I realized that there are several sequences in which the thread that just released the FairLock can be tricked into thinking it still owns it. All it takes is interleaving instructions as follows:
New owner: set owned to true
Old owner: is owned true? yes
Old owner: am I the last owner? yes
New owner: set me as the last owner
At this point, the old and new owners both enter their critical sections.
I am considering whether this problem has a solution and whether it is worth attempting to solve this at all. In the meantime, don't use this unless you see a fix.
I would implement this in C++11 using a condition_variable-per-thread setup so that I could choose exactly which thread to wake up when (Live demo at Coliru):
class FairMutex {
private:
class waitnode {
std::condition_variable cv_;
waitnode* next_ = nullptr;
FairMutex& fmtx_;
public:
waitnode(FairMutex& fmtx) : fmtx_(fmtx) {
*fmtx.tail_ = this;
fmtx.tail_ = &next_;
}
~waitnode() {
for (waitnode** p = &fmtx_.waiters_; *p; p = &(*p)->next_) {
if (*p == this) {
*p = next_;
if (!next_) {
fmtx_.tail_ = &fmtx_.waiters_;
}
break;
}
}
}
void wait(std::unique_lock<std::mutex>& lk) {
while (fmtx_.held_ || fmtx_.waiters_ != this) {
cv_.wait(lk);
}
}
void notify() {
cv_.notify_one();
}
};
waitnode* waiters_ = nullptr;
waitnode** tail_ = &waiters_;
std::mutex mtx_;
bool held_ = false;
public:
void lock() {
auto lk = std::unique_lock<std::mutex>{mtx_};
if (held_ || waiters_) {
waitnode{*this}.wait(lk);
}
held_ = true;
}
bool try_lock() {
if (mtx_.try_lock()) {
std::lock_guard<std::mutex> lk(mtx_, std::adopt_lock);
if (!held_ && !waiters_) {
held_ = true;
return true;
}
}
return false;
}
void unlock() {
std::lock_guard<std::mutex> lk(mtx_);
held_ = false;
if (waiters_ != nullptr) {
waiters_->notify();
}
}
};
FairMutex models the Lockable concept so it can be used like any other standard library mutex type. Put simply, it achieves fairness by inserting waiters into a list in arrival order, and passing the mutex to the first waiter in the list when unlocking.
If it's useful:
This demonstrates *) an implementation of a "synchronous queue" using semaphores as synchronization primitives.
Note: the actually implementation uses semaphores implemented with GCD (Grand Central Dispatch):
using gcd::mutex;
using gcd::semaphore;
// A blocking queue in which each put must wait for a get, and vice
// versa. A synchronous queue does not have any internal capacity,
// not even a capacity of one.
template <typename T>
class simple_synchronous_queue {
public:
typedef T value_type;
enum result_type {
OK = 0,
TIMEOUT_NOT_DELIVERED = -1,
TIMEOUT_NOT_PICKED = -2,
TIMEOUT_NOTHING_OFFERED = -3
};
simple_synchronous_queue()
: sync_(0), send_(1), recv_(0)
{
}
void put(const T& v) {
send_.wait();
new (address()) T(v);
recv_.signal();
sync_.wait();
}
result_type put(const T& v, double timeout) {
if (send_.wait(timeout)) {
new (storage_) T(v);
recv_.signal();
if (sync_.wait(timeout)) {
return OK;
}
else {
return TIMEOUT_NOT_PICKED;
}
}
else {
return TIMEOUT_NOT_DELIVERED;
}
}
T get() {
recv_.wait();
T result = *address();
address()->~T();
sync_.signal();
send_.signal();
return result;
}
std::pair<result_type, T> get(double timeout) {
if (recv_.wait(timeout)) {
std::pair<result_type, T> result =
std::pair<result_type, T>(OK, *address());
address()->~T();
sync_.signal();
send_.signal();
return result;
}
else {
return std::pair<result_type, T>(TIMEOUT_NOTHING_OFFERED, T());
}
}
private:
using storage_t = typename std::aligned_storage<sizeof(T), std::alignment_of<T>::value>::type;
T* address() {
return static_cast<T*>(static_cast<void*>(&storage_));
}
storage_t storage_;
semaphore sync_;
semaphore send_;
semaphore recv_;
};
*) demonstrates: be carefully about potential issues, could be improved, etc. ... ;)
I accepted CouchDeveloper's answer since it pointed me down the right path. I wrote a Windows-specific C++11 implementation of a synchronous queue, and added this answer so that others could consider/use it if they so choose.
// SynchronousQueue.hpp
#ifndef SYNCHRONOUSQUEUE_HPP
#define SYNCHRONOUSQUEUE_HPP
#include <atomic>
#include <exception>
#include <windows>
using namespace std;
class CouldNotEnterException: public exception {};
class NoPairedCallException: public exception {};
template typename<T>
class SynchronousQueue {
private:
atomic_bool valueReady {false};
CRITICAL_SECTION getCriticalSection;
CRITICAL_SECTION putCriticalSection;
DWORD wait {0};
HANDLE getSemaphore;
HANDLE putSemaphore;
const T* address {nullptr};
public:
SynchronousQueue(DWORD waitMS): wait {waitMS}, address {nullptr} {
initializeCriticalSection(&getCriticalSection);
initializeCriticalSection(&putCriticalSection);
getSemaphore = CreateSemaphore(nullptr, 0, 1, nullptr);
putSemaphore = CreateSemaphore(nullptr, 0, 1, nullptr);
}
~SynchronousQueue() {
EnterCriticalSection(&getCriticalSection);
EnterCriticalSection(&putCriticalSection);
CloseHandle(getSemaphore);
CloseHandle(putSemaphore);
DeleteCriticalSection(&putCriticalSection);
DeleteCriticalSection(&getCriticalSection);
}
void put(const T& value) {
if (!TryEnterCriticalSection(&putCriticalSection)) {
throw CouldNotEnterException();
}
ReleaseSemaphore(putSemaphore, (LONG) 1, nullptr);
if (WaitForSingleObject(getSemaphore, wait) != WAIT_OBJECT_0) {
if (WaitForSingleObject(putSemaphore, 0) == WAIT_OBJECT_0) {
LeaveCriticalSection(&putCriticalSection);
throw NoPairedCallException();
} else {
WaitForSingleObject(getSemaphore, 0);
}
}
address = &value;
valueReady = true;
while (valueReady);
LeaveCriticalSection(&putCriticalSection);
}
T get() {
if (!TryEnterCriticalSection(&getCriticalSection)) {
throw CouldNotEnterException();
}
ReleaseSemaphore(getSemaphore, (LONG) 1, nullptr);
if (WaitForSingleObject(putSemaphore, wait) != WAIT_OBJECT_0) {
if (WaitForSingleObject(getSemaphore, 0) == WAIT_OBJECT_0) {
LeaveCriticalSection(&getCriticalSection);
throw NoPairedCallException();
} else {
WaitForSingleObject(putSemaphore, 0);
}
}
while (!valueReady);
T toReturn = *address;
valueReady = false;
LeaveCriticalSection(&getCriticalSection);
return toReturn;
}
};
#endif
I've tried substituting MFC's CSingleLock implementation with my own, but I now have a deadlock on windows XP which I don't have on windows 7 and which I did not have on either OSs with MFC's CSingleLock, besides looking into every Lock & Unlock in my app, what is missing from my implementation:
class CCriticalSection
{
CRITICAL_SECTION m_cs;
public:
CCriticalSection()
{
InitializeCriticalSection(&m_cs);
}
~CCriticalSection()
{
DeleteCriticalSection(&m_cs);
}
void Lock()
{
EnterCriticalSection(&m_cs);
}
BOOL TryLock()
{
return TryEnterCriticalSection(&m_cs);
}
void Unlock()
{
if(m_cs.LockCount > -1)
LeaveCriticalSection(&m_cs);
}
};
and
#include "CCriticalSection.h"
class CSingleLock {
CCriticalSection *m_cs;
public:
CSingleLock(CCriticalSection* cs = NULL, bool bLock = false)
{
m_cs = cs;
if(m_cs != NULL)
{
if(bLock)
m_cs->Lock();
}
}
void Unlock()
{
if(m_cs != NULL)
m_cs->Unlock();
}
void Lock()
{
if(m_cs != NULL)
m_cs->Lock();
}
~CSingleLock()
{
if(m_cs != NULL)
m_cs->Unlock();
}
};
To make the locking class really helpful, you need to improve it like this:
class CSingleLock
{
CCriticalSection *m_cs;
bool m_bLock;
public:
CSingleLock(CCriticalSection* cs = NULL, bool bLock = false)
{
m_cs = cs;
if(m_cs != NULL)
{
if(bLock)
m_cs->Lock();
m_bLock = bLock;
}
}
void Unlock()
{
if(!m_cs || !m_bLock)
return;
m_cs->Unlock();
m_bLock = false;
}
void Lock()
{
if(!m_cs || m_bLock)
return;
m_cs->Lock();
m_bLock = true;
}
~CSingleLock()
{
Unlock();
}
};
It should be intelligent enough to keep you away from trouble of incorrect CS use.
MSDN says "If a thread calls LeaveCriticalSection when it does not have ownership of the specified critical section object, an error occurs that may cause another thread using EnterCriticalSection to wait indefinitely.".
The destructor for CSingleLock calls CriticalSection::Unlock which calls LeaveCriticalSection but there's no check that the critical section is held by the current thread.
You need to keep track of ownership of the critical section.
How would you set the object data that is shared between threads and needs to be updated once after the complete cycle of (say) two threads in busy loop?
CRITICAL_SECTION critical_section_;
int value; //needs to be updated once after the cycle of any number of threads running in busy loop
void ThreadsFunction(int i)
{
while (true)
{
EnterCriticalSection(&critical_section_);
/* Lines of Code */
LeaveCriticalSection(&critical_section_);
}
}
Edit: The value can be an object of any class.
Two suggestions:
Make the object itself thread safe.
Pass the object into the thread as instance data
I'll use C++ as a reference in my example. You can easily transpose this to pure C if you want.
// MyObject is the core data you want to share between threads
struct MyObject
{
int value;
int othervalue;
// all all the other members you want here
};
class MyThreadSafeObject
{
private:
CRITICAL_SECTION _cs;
MyObject _myojbect;
bool _fLocked;
public:
MyThreadSafeObject()
{
_fLocked = false
InitializeCriticalSection();
}
~MYThreadSafeObject()
{
DeleteCriticalSection();
}
// add "getter and setter" methods for each member in MyObject
int SetValue(int x)
{
EnterCriticalSection(&_cs);
_myobject.value = x;
LeaveCriticalSection(&_cs);
}
int GetValue()
{
int x;
EnterCriticalSection(&_cs);
x = _myobject.value;
LeaveCriticalSection(&_cs);
return x;
}
// add "getter and setter" methods for each member in MyObject
int SetOtherValue(int x)
{
EnterCriticalSection(&_cs);
_myobject.othervalue = x;
LeaveCriticalSection(&_cs);
}
int GetOtherValue()
{
int x;
EnterCriticalSection(&_cs);
x = _myobject.othervalue;
LeaveCriticalSection(&_cs);
return x;
}
// and if you need to access the whole object directly without using a critsec lock on each variable access, add lock/unlock methods
bool Lock(MyObject** ppObject)
{
EnterCriticalSection(&_cs);
*ppObject = &_myobject;
_fLocked = true;
return true;
}
bool UnLock()
{
if (_fLocked == false)
return false;
_fLocked = false;
LeaveCriticalSection();
return true;
}
};
Then, create your object and thread as follows:
MyThreadSafeObject* pObjectThreadSafe;
MyObject* pObject = NULL;
// now initilaize your object
pObjectThreadSafe->Lock(&pObject);
pObject->value = 0; // initailze value and all the other members of pObject to what you want them to be.
pObject->othervalue = 0;
pObjectThreadSafe->Unlock();
pObject = NULL;
// Create your threads, passing the pointer to MyThreadSafeObject as your instance data
DWORD dwThreadID = 0;
HANDLE hThread = CreateThread(NULL, NULL, ThreadRoutine, pObjectThreadSafe, 0, &dwThreadID);
And your thread will operate as follows
DWORD __stdcall ThreadFunction(void* pData)
{
MyThreadSafeObject* pObjectThreadSafe = (MyThreadSafeObject*)pData;
MyObject* pObject = NULL;
while (true)
{
/* lines of code */
pObjectThreadSafe->SetValue(x);
/* lines of code */
}
}
If you want implement thread safe update of an integer you should better use InterlockedIncrement and InterlockedDecrement or InterlockedExchangeAdd functions. See http://msdn.microsoft.com/en-us/library/ms684122(VS.85).aspx.
If you do need use EnterCriticalSection and LeaveCriticalSection you will find an example in http://msdn.microsoft.com/en-us/library/ms686908(v=VS.85).aspx, but I recommend you to use EnterCriticalSection inside of __try block and LeaveCriticalSection inside of the __finally part of this blocks.