Consider the following code snippet
#include <future>
std::mutex asyncMut_;
std::atomic<bool> isAsyncOperAllowed = false;
std::condition_variable cv;
void asyncFunc()
{
while (isAsyncOperAllowed)
{
std::unique_lock<std::mutex> ul(asyncMut_);
cv.wait(ul, []()
{
return isAsyncOperAllowed == false;
});
}
}
int main()
{
isAsyncOperAllowed = true;
auto fut = std::async(std::launch::async, asyncFunc);
std::this_thread::sleep_for(std::chrono::seconds(3));
std::lock_guard<std::mutex> lg(asyncMut_);
isAsyncOperAllowed = false;
cv.notify_one();
fut.get();
}
I am expecting that once I change the status of the isAsyncOperAllowed variable and notify the condition variable, the condition variable inside the asyncFunc should exit the wait and asyncFync should return and the main should end.
I am observing that the condition variable keeps waiting indefinitely. What am I doing wrong?
P.S. I am on Win10 - VS2015
Deadlock: main() never unlocks lg so even though the cv in asyncFunc() gets notified it never gets an opportunity to run since it can't claim the lock.
Try:
int main()
{
isAsyncOperAllowed = true;
auto fut = std::async(std::launch::async, asyncFunc);
std::this_thread::sleep_for(std::chrono::seconds(3));
{
std::lock_guard<std::mutex> lg(asyncMut_);
isAsyncOperAllowed = false;
}
cv.notify_one();
fut.get();
}
Related
Could you please check the following code which is not exiting even after condition becomes false?
I'm trying to print numbers from 1 to 10 by first thread, 2 to 20 by second thread likewise & I have 10 threads, whenever count reaches to 100, my program should terminate safely by terminating all threads. But that is not happening, after printing, it stuck up and I don't understand why?
Is there any data race? Please guide.
#include<iostream>
#include<vector>
#include<thread>
#include<mutex>
#include<condition_variable>
std::mutex mu;
int count=1;
bool isDone = true;
std::condition_variable cv;
void Print10(int tid)
{
std::unique_lock<std::mutex> lock(mu);
while(isDone){
cv.wait(lock,[tid](){ return ((count/10)==tid);});
for(int i=0;i<10;i++)
std::cout<<"tid="<<tid<<" count="<<count++<<"\n";
isDone = count<100;//!(count == (((tid+1)*10)+1));
std::cout<<"tid="<<tid<<" isDone="<<isDone<<"\n";
cv.notify_all();
}
}
int main()
{
std::vector<std::thread> vec;
for(int i=0;i<10;i++)
{
vec.push_back(std::thread(Print10,i));
}
for(auto &th : vec)
{
if(th.joinable())
th.join();
}
}
I believe the following code should work for you
#include<iostream>
#include<vector>
#include<thread>
#include<mutex>
#include<condition_variable>
using namespace std;
mutex mu;
int count=1;
bool isDone = true;
condition_variable cv;
void Print10(int tid)
{
unique_lock<std::mutex> lock(mu);
// Wait until condition --> Wait till count/10 = tid
while(count/10 != tid)
cv.wait(lock);
// Core logic
for(int i=0;i<10;i++)
cout<<"tid="<<tid<<" count="<<count++<<"\n";
// Release the current thread thus ensuring serailization
cv.notify_one();
}
int main()
{
std::vector<std::thread> vec;
for(int i=0;i<10;i++)
{
vec.push_back(std::thread(Print10,i));
}
for(auto &th : vec)
{
if(th.joinable())
th.join();
}
return 0;
}
I want to know when dispatchhas finished with some specific work
service.dispatch(&some_work);
I want to know this because I need to restart some_work if it has finished.
struct work
{
std::shared_ptr<asio::io_service> io_service;
bool ready;
std::mutex m;
template <class F>
void do_some_work(F&& f)
{
if (io_service && ready) {
m.lock();
ready = false;
m.unlock();
io_service->dispatch([&f, this]() {
f();
m.lock();
ready = true;
m.unlock();
});
}
}
work(std::shared_ptr<asio::io_service> io_service)
: io_service(io_service)
, ready(true)
{
}
};
int
main()
{
auto service = std::make_shared<asio::io_service>();
auto w = std::make_shared<asio::io_service::work>(*service);
std::thread t1([&] { service->run(); });
work some_work{ service };
for (;;) {
some_work.do_some_work([] {
std::cout << "Start long draw on thread: " << std::this_thread::get_id()
<< std::endl;
std::this_thread::sleep_for(std::chrono::seconds(5));
std::cout << "End long draw on thread: " << std::this_thread::get_id()
<< std::endl;
});
}
w.reset();
t1.join();
}
There are some problems with the code, for example if some_workgoes out of scope, then the running taskwould still write to ready.
I am wondering if something like this already exists in Asio?
For lifetime issues, the common idiom is indeed to use shared pointers, examples:
Ensure no new wait is accepted by boost::deadline_timer unless previous wait is expired
Boost::Asio Async write failed
Other than that, the completion handler is already that event. So you would do:
void my_async_loop() {
auto This = shared_from_this();
socket_.async_read(buffer(m_buffer, ...,
[=,This](error_code ec, size_t transferred) {
if (!ec) {
// do something
my_async_loop();
}
}
);
}
This will re-schedule an (other?) async operation once the previous has completed.
On the subject of threadsafety, see Why do I need strand per connection when using boost::asio?
I have to implement A valve Open function (for specified duration).
I am using boost::asio::deadline_timer
My class member function to open valve is:
bool Valves::valveOpen(ValveType type)
{
switch (type)
{
case eVentValve:
tblMap_.digitalInput[eVentValveK1].setBit();
if (tblMap_.digitalOutput[eOutK1VentValve].getBit())
{
isVentOpen_ = true;
}
return isVentOpen_;
case eVacuumPumpValve:
....
....
}
Class member function to close the valve is:
bool Valves::valveClose(ValveType type)
{
switch (type)
{
case eVentValve:
tblMap_.digitalInput[eVentValveK1].clearBit();
if (!tblMap_.digitalOutput[eOutK1VentValve].getBit())
{
isVentOpen_ = false;
}
return !isVentOpen_;
case eVacuumPumpValve:
....
....
}
I am trying to achieve the timer action as below
bool Valves::valveTimedOpen(ValveType type, int sec)
{
boost::asio::io_service io;
switch (type)
{
case eVentValve:
{
std::bind(&Valves::valveOpen, this, type); //Here
boost::asio::deadline_timer t(io, boost::posix_time::seconds(sec));
t.async_wait(std::bind(&Valves::valveClose, this, type));
boost::thread th(boost::bind(&boost::asio::io_service::run, &io));
return true;
}
case eVacuumPumpValve:
.....
.....
}
The code hits the line Here i.e.
std::bind(&Valves::valveOpen, this, type); but it does not go to bool Valves::valveOpen(ValveType type) function.
Can someone let me know the issue with this code?
Variables io and t go out of scope as soon as valveTimedOpen exits. You need to rethink the way you interact with the boost asio components e.g. the io_service could be a member of your class, and the timer could be dynamically allocated and needs to be deleted in the completion handler.
Also, keep in mind that if you plan on re-using an io_service object, you also need to reset it before calling run again.
auto fn = std::bind(&Test::Open, shared_from_this(), std::placeholders::_1);
fn(type);
Calls the Open() correctly.
io_service and boost::deadline_timer I have to make class member as suggested by #Ralf
Working Code:
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/thread.hpp>
#include <boost/asio.hpp>
class Test : public std::enable_shared_from_this <Test>
{
public:
Test() :io(), timer(io){}
void Open(int num);
void Close(int num);
void TimedOpen(int num, int dur);
void Run();
private:
boost::asio::io_service io;
boost::asio::deadline_timer timer;
};
void Test::Open(int num)
{
std::cout << "Open for Number : " << num << std::endl;
}
void Test::Close(int num)
{
std::cout << "Close for Number : " << num << std::endl;
}
void Test::TimedOpen(int num, int dur)
{
io.reset();
auto fn = std::bind(&Test::Open, shared_from_this(), std::placeholders::_1);
fn(num);
timer.expires_from_now( boost::posix_time::seconds(dur));
timer.async_wait(std::bind(&Test::Close, shared_from_this(), num));
Run();
std::cout << "Function Exiting" << std::endl;
}
void Test::Run()
{
boost::thread th(boost::bind(&boost::asio::io_service::run, &io));
}
int main()
{
auto t = std::make_shared<Test>();
t->TimedOpen(5, 5);
char line[128];
while (std::cin.getline(line, 128))
{
if (strcmp(line, "\n")) break;
}
return 0;
}
hope you had all had nice holidays.
This questions is related to my earlier question: std::condition_variable - Wait for several threads to notify observer
I'm trying to implement a threadpool based on my own mutable thread implementation below:
class MutableThread
{
private:
std::thread m_Thread;
std::function<void()> m_Function;
bool m_bRun;
std::mutex m_LockMutex;
std::mutex m_WaitMutex;
std::condition_variable m_CV;
IAsyncTemplateObserver<MutableThread>* m_Observer = nullptr;
private:
void Execute()
{
while (m_bRun)
{
{
std::unique_lock<std::mutex> wait(m_WaitMutex);
m_CV.wait(wait);
}
std::lock_guard<std::mutex> lock(m_LockMutex);
if (m_bRun && m_Function)
{
m_Function();
m_Function = std::function<void()>();
if (m_Observer != nullptr)
{
m_Observer->Signal(this);
}
}
}
}
public:
HDEBUGNAME(TEXT("MutableThread"));
MutableThread(const MutableThread& thread) = delete;
MutableThread(IAsyncTemplateObserver<MutableThread>* _Observer)
{
m_Observer = _Observer;
m_bRun = true;
m_Thread = std::thread(&MutableThread::Execute, this);
}
MutableThread()
{
m_Observer = nullptr;
m_bRun = true;
m_Thread = std::thread(&MutableThread::Execute, this);
}
~MutableThread()
{
m_bRun = false;
m_CV.notify_one();
try
{
if (m_Thread.joinable())
m_Thread.join();
}
catch (std::system_error& ex)
{
HWARNINGD(TEXT("%s"), ex.what());
}
}
inline bool Start(const std::function<void()>& f)
{
std::lock_guard<std::mutex> lock(m_LockMutex);
if (m_Function != nullptr)
return false;
m_Function = f;
m_CV.notify_one();
return true;
}
The IAsyncTemplateObserver simply derives from my IAsyncObserver class posted in the earlier question and adds a virtual function:
template <typename T>
class IAsyncTemplateObserver : public IAsyncObserver
{
public:
virtual void Signal(T* _Obj) = 0;
};
What I want to do is, signal the ThreadPool that the function has finished execution and a new task is assigned to the mutable thread:
class MutableThread;
struct Task
{
std::function<void()> m_Function;
uint32_t m_uPriority;
Task(const std::function<void()>& _Function, uint32_t _uPriority)
{
m_Function = _Function;
m_uPriority = _uPriority;
}
};
inline bool operator<(const Task& lhs, const Task& rhs)
{
return lhs.m_uPriority < rhs.m_uPriority;
}
class ThreadPool : public IAsyncTemplateObserver<MutableThread>
{
private:
std::list<MutableThread* > m_FreeThreads;
std::list<MutableThread* > m_UsedThreads;
std::set<Task> m_Tasks;
std::mutex m_LockMutex;
public:
ThreadPool()
{
//Grow(std::thread::hardware_concurrency() - 1);
}
ThreadPool(size_t n)
{
Grow(n);
}
~ThreadPool()
{
//std::lock_guard<std::mutex> lock(m_Mutex);
for (MutableThread* pUsed : m_UsedThreads)
{
HSAFE_DELETE(pUsed);
}
for (MutableThread* pFree : m_FreeThreads)
{
HSAFE_DELETE(pFree);
}
}
inline void Grow(size_t n)
{
std::lock_guard<std::mutex> lock(m_LockMutex);
for (size_t i = 0; i < n; i++)
{
m_FreeThreads.push_back(new MutableThread(this));
}
}
inline void AddTask(const Task& _Task)
{
{
std::lock_guard<std::mutex> lock(m_LockMutex);
m_Tasks.insert(_Task);
}
AssignThreads();
}
virtual void Signal(MutableThread* _pThread)
{
{
std::lock_guard<std::mutex> lock(m_LockMutex);
m_UsedThreads.remove(_pThread);
m_FreeThreads.push_back(_pThread);
}
AssignThreads();
NotifyOne();
}
inline void WaitForAllThreads()
{
bool bWait = true;
do
{
{
//check if we have to wait
std::lock_guard<std::mutex> lock(m_LockMutex);
bWait = !m_UsedThreads.empty() || !m_Tasks.empty();
}
if (bWait)
{
std::unique_lock<std::mutex> wait(m_ObserverMutex);
m_ObserverCV.wait(wait);
}
} while (bWait);
}
private:
inline void AssignThreads()
{
std::lock_guard<std::mutex> lock(m_LockMutex);
if (m_FreeThreads.empty() || m_Tasks.empty())
return;
//Get free thread
MutableThread* pThread = m_FreeThreads.back();
m_FreeThreads.pop_back();
//park thread in used list
m_UsedThreads.push_back(pThread);
//get task with highest priority
std::set<Task>::iterator it = m_Tasks.end();
--it; //last entry has highest priority
//start the task
pThread->Start(it->m_Function);
//remove the task from the list
m_Tasks.erase(it);
}
The AddTask function is called several times by the same thread, but when a mutable thread signals the threadpool (via m_Observer->Signal(this) ) the application freezes at the lock_guard of the AssignThreads() function. Now the strange thing is unlike a normal deadlock, all callstack-views in Visual Studio are empty as soon is I try to step over the line with the lock_guard.
Can anyone explain this behaviour? Is there any major design flaw or just a simple mix up?
Thanks for your help!
Greetings,
Fabian
Edit: I've added a minimal visual studio solution that reproduces the problem: ThreadPoolTest.zip
Thanks to a friend, I was able to fix the problem by moving the call m_Observer->Signal(this) outside of the lock_guard scope in the MutableThread::Execute() function. Secondly I removed the lock_guard in the AssignThreads() function and moved its call into the scope of the lock_guard in the Signal()/AddTask function. Not really related but still a flaw: all condition_variables.wait() calls are now in a while(m_bNotified == false) loop.
Question
What can I do to get a locking mechanism that provides minimal and stable latency while guaranteeing that a thread cannot reacquire a resource before another thread has acquired and released it?
The desirability of answers to this question are ranked as follows:
Some combination of built-in C++11 features that work in MinGW on Windows 7 (note that the <thread> and <mutex> libraries do not work on a Windows platform)
Some combination of Windows API features
A modification to the FairLock listed below, my own attempt at implementing such a mechanism
Some features provided by a free, open-source library that does not require a .configure/make/make install process, (getting that to work in MSYS is more of an adventure than I care for)
Background
I am writing an application which is effectively a multi-stage producer/consumer. One thread generates input consumed by another thread, which produces output consumed by yet another thread. The application uses pairs of buffers so that, after an initial delay, all threads can work nearly simultaneously.
Since I am writing a Windows 7 application, I had been using CriticalSections to guard the buffers. The problem with using CriticalSections (or, so far as I can tell, any other Windows or C++11-built-in synchronization object) is that it does not allow for any provision that a thread that just released a lock cannot reacquire it until another thread has done so first. Because of this, many of my test drivers for the middle thread (the Encoder) never gave the Encoder a chance to acquire the test input buffers and completed without having tested them. The end result was a ridiculous process of trying to determine an artificial wait time that stochastically worked for my machine.
Since the structure of my application requires that each stage waits for the other stage to have acquired, finished using, and released the necessary buffers for getting to use the buffer again, I need, for lack of a better term, a fair locking mechanism. I took a crack at writing one (the source code is provided below). In testing, this FairLock allows my test driver to run my Encoder at the same speeds that I was able to achieve using the CriticalSection maybe 60% of the runs. The other 40% of the runs take anywhere between 10 to 100 ms longer, which is not acceptable for my application.
FairLock
// FairLock.hpp
#ifndef FAIRLOCK_HPP
#define FAIRLOCK_HPP
#include <atomic>
using namespace std;
class FairLock {
private:
atomic_bool owned {false};
atomic<DWORD> lastOwner {0};
public:
FairLock(bool owned);
bool inline hasLock() const;
bool tryLock();
void seizeLock();
void tryRelease();
void waitForLock();
};
#endif
// FairLock.cpp
#include <windows.h>
#include "FairLock.hpp"
#define ID GetCurrentThreadId()
FairLock::FairLock(bool owned) {
if (owned) {
this->owned = true;
this->lastOwner = ID;
} else {
this->owned = false;
this->lastOwner = 0;
}
}
bool inline FairLock::hasLock() const {
return owned && lastOwner == ID;
}
bool FairLock::tryLock() {
bool success = false;
DWORD id = ID;
if (owned) {
success = lastOwner == id;
} else if (
lastOwner != id &&
owned.compare_exchange_strong(success, true)
) {
lastOwner = id;
success = true;
} else {
success = false;
}
return success;
}
void FairLock::seizeLock() {
bool success = false;
DWORD id = ID;
if (!(owned && lastOwner == id)) {
while (!owned.compare_exchange_strong(success, true)) {
success = false;
}
lastOwner = id;
}
}
void FairLock::tryRelease() {
if (hasLock()) {
owned = false;
}
}
void FairLock::waitForLock() {
bool success = false;
DWORD id = ID;
if (!(owned && lastOwner == id)) {
while (lastOwner == id); // spin
while (!owned.compare_exchange_strong(success, true)) {
success = false;
}
lastOwner = id;
}
}
EDIT
DO NOT USE THIS FairLock CLASS; IT DOES NOT GUARANTEE MUTUAL EXCLUSION!
I reviewed the above code to compare it against The C++ Programming Language: 4th Edition text I had not read carefully and what CouchDeveloper's recommended Synchronous Queue. I realized that there are several sequences in which the thread that just released the FairLock can be tricked into thinking it still owns it. All it takes is interleaving instructions as follows:
New owner: set owned to true
Old owner: is owned true? yes
Old owner: am I the last owner? yes
New owner: set me as the last owner
At this point, the old and new owners both enter their critical sections.
I am considering whether this problem has a solution and whether it is worth attempting to solve this at all. In the meantime, don't use this unless you see a fix.
I would implement this in C++11 using a condition_variable-per-thread setup so that I could choose exactly which thread to wake up when (Live demo at Coliru):
class FairMutex {
private:
class waitnode {
std::condition_variable cv_;
waitnode* next_ = nullptr;
FairMutex& fmtx_;
public:
waitnode(FairMutex& fmtx) : fmtx_(fmtx) {
*fmtx.tail_ = this;
fmtx.tail_ = &next_;
}
~waitnode() {
for (waitnode** p = &fmtx_.waiters_; *p; p = &(*p)->next_) {
if (*p == this) {
*p = next_;
if (!next_) {
fmtx_.tail_ = &fmtx_.waiters_;
}
break;
}
}
}
void wait(std::unique_lock<std::mutex>& lk) {
while (fmtx_.held_ || fmtx_.waiters_ != this) {
cv_.wait(lk);
}
}
void notify() {
cv_.notify_one();
}
};
waitnode* waiters_ = nullptr;
waitnode** tail_ = &waiters_;
std::mutex mtx_;
bool held_ = false;
public:
void lock() {
auto lk = std::unique_lock<std::mutex>{mtx_};
if (held_ || waiters_) {
waitnode{*this}.wait(lk);
}
held_ = true;
}
bool try_lock() {
if (mtx_.try_lock()) {
std::lock_guard<std::mutex> lk(mtx_, std::adopt_lock);
if (!held_ && !waiters_) {
held_ = true;
return true;
}
}
return false;
}
void unlock() {
std::lock_guard<std::mutex> lk(mtx_);
held_ = false;
if (waiters_ != nullptr) {
waiters_->notify();
}
}
};
FairMutex models the Lockable concept so it can be used like any other standard library mutex type. Put simply, it achieves fairness by inserting waiters into a list in arrival order, and passing the mutex to the first waiter in the list when unlocking.
If it's useful:
This demonstrates *) an implementation of a "synchronous queue" using semaphores as synchronization primitives.
Note: the actually implementation uses semaphores implemented with GCD (Grand Central Dispatch):
using gcd::mutex;
using gcd::semaphore;
// A blocking queue in which each put must wait for a get, and vice
// versa. A synchronous queue does not have any internal capacity,
// not even a capacity of one.
template <typename T>
class simple_synchronous_queue {
public:
typedef T value_type;
enum result_type {
OK = 0,
TIMEOUT_NOT_DELIVERED = -1,
TIMEOUT_NOT_PICKED = -2,
TIMEOUT_NOTHING_OFFERED = -3
};
simple_synchronous_queue()
: sync_(0), send_(1), recv_(0)
{
}
void put(const T& v) {
send_.wait();
new (address()) T(v);
recv_.signal();
sync_.wait();
}
result_type put(const T& v, double timeout) {
if (send_.wait(timeout)) {
new (storage_) T(v);
recv_.signal();
if (sync_.wait(timeout)) {
return OK;
}
else {
return TIMEOUT_NOT_PICKED;
}
}
else {
return TIMEOUT_NOT_DELIVERED;
}
}
T get() {
recv_.wait();
T result = *address();
address()->~T();
sync_.signal();
send_.signal();
return result;
}
std::pair<result_type, T> get(double timeout) {
if (recv_.wait(timeout)) {
std::pair<result_type, T> result =
std::pair<result_type, T>(OK, *address());
address()->~T();
sync_.signal();
send_.signal();
return result;
}
else {
return std::pair<result_type, T>(TIMEOUT_NOTHING_OFFERED, T());
}
}
private:
using storage_t = typename std::aligned_storage<sizeof(T), std::alignment_of<T>::value>::type;
T* address() {
return static_cast<T*>(static_cast<void*>(&storage_));
}
storage_t storage_;
semaphore sync_;
semaphore send_;
semaphore recv_;
};
*) demonstrates: be carefully about potential issues, could be improved, etc. ... ;)
I accepted CouchDeveloper's answer since it pointed me down the right path. I wrote a Windows-specific C++11 implementation of a synchronous queue, and added this answer so that others could consider/use it if they so choose.
// SynchronousQueue.hpp
#ifndef SYNCHRONOUSQUEUE_HPP
#define SYNCHRONOUSQUEUE_HPP
#include <atomic>
#include <exception>
#include <windows>
using namespace std;
class CouldNotEnterException: public exception {};
class NoPairedCallException: public exception {};
template typename<T>
class SynchronousQueue {
private:
atomic_bool valueReady {false};
CRITICAL_SECTION getCriticalSection;
CRITICAL_SECTION putCriticalSection;
DWORD wait {0};
HANDLE getSemaphore;
HANDLE putSemaphore;
const T* address {nullptr};
public:
SynchronousQueue(DWORD waitMS): wait {waitMS}, address {nullptr} {
initializeCriticalSection(&getCriticalSection);
initializeCriticalSection(&putCriticalSection);
getSemaphore = CreateSemaphore(nullptr, 0, 1, nullptr);
putSemaphore = CreateSemaphore(nullptr, 0, 1, nullptr);
}
~SynchronousQueue() {
EnterCriticalSection(&getCriticalSection);
EnterCriticalSection(&putCriticalSection);
CloseHandle(getSemaphore);
CloseHandle(putSemaphore);
DeleteCriticalSection(&putCriticalSection);
DeleteCriticalSection(&getCriticalSection);
}
void put(const T& value) {
if (!TryEnterCriticalSection(&putCriticalSection)) {
throw CouldNotEnterException();
}
ReleaseSemaphore(putSemaphore, (LONG) 1, nullptr);
if (WaitForSingleObject(getSemaphore, wait) != WAIT_OBJECT_0) {
if (WaitForSingleObject(putSemaphore, 0) == WAIT_OBJECT_0) {
LeaveCriticalSection(&putCriticalSection);
throw NoPairedCallException();
} else {
WaitForSingleObject(getSemaphore, 0);
}
}
address = &value;
valueReady = true;
while (valueReady);
LeaveCriticalSection(&putCriticalSection);
}
T get() {
if (!TryEnterCriticalSection(&getCriticalSection)) {
throw CouldNotEnterException();
}
ReleaseSemaphore(getSemaphore, (LONG) 1, nullptr);
if (WaitForSingleObject(putSemaphore, wait) != WAIT_OBJECT_0) {
if (WaitForSingleObject(getSemaphore, 0) == WAIT_OBJECT_0) {
LeaveCriticalSection(&getCriticalSection);
throw NoPairedCallException();
} else {
WaitForSingleObject(putSemaphore, 0);
}
}
while (!valueReady);
T toReturn = *address;
valueReady = false;
LeaveCriticalSection(&getCriticalSection);
return toReturn;
}
};
#endif