How to display two images from a binocular camera synchronously with OpenCV - c++11

I meet a problem that one of image from a binocular camera has a time delay corresponding to another image. I use ros package usb-cam to get the images with the time stamps and then use the OpenCV fuction imshow() to show them.
Environment: Ubuntu 16.04 in the Vmware WorkStation 12 (in win10), ROS Kinetic, OpenCV 3.3.0. Binocular camera can support the mjepg format with 640*480 and 30fps.
Well, I am a beginner of vision SLAM and now trying to show images in the real time of a binocular camera. Something I have done is that I have used the usb-cam package to get the image data and shown them in the rviz and rqt. But I meet a time synchroniazation problem with the OpenCV imshow(). Specificlly, I want to use usb-cam to get the data and do some image procession with OpenCV and at first I try to display the images with OpenCV functions.
You will see the codes in the following that I notes some codes.
The code are referred with the open resource project on the github. It is a C++ SLAM project about INS, GPS and binocular camera. I mainly refer the data capture in the rosNodeTest.cpp. It is about a multi-thread coding however I am not familiar with it.
Please visit https://github.com/HKUST-Aerial-Robotics/VINS-Fusion
And there are my codes. It is a ros package and I am sure that it can run well. So there I don't paste the CMakeLists.txt and package.xml.
#include <ros/ros.h>
#include <sensor_msgs/Image.h>
#include <cv_bridge/cv_bridge.h>
#include <opencv2/opencv.hpp>
#include <queue>
#include <thread>
#include <mutex>
#include <iostream>
std::queue<sensor_msgs::ImageConstPtr> img0_buf;
std::queue<sensor_msgs::ImageConstPtr> img1_buf;
std::mutex m_buf;
//Test for rqt, 0 for cv, else for rviz
int flag = 0;
ros::Publisher pubImg0;
ros::Publisher pubImg1;
void img0_callback(const sensor_msgs::ImageConstPtr &img0)
{
m_buf.lock();
//For rqt
if(flag != 0)
{
sensor_msgs::Image img;
img.header=img0->header;
img.height = img0->height;
img.width = img0->width;
img.is_bigendian = img0->is_bigendian;
img.step = img0->step;
img.data=img0->data;
img.encoding=img0->encoding;
pubImg0.publish(img);
// std::cout<<"0."<<img.header<<std::endl;
}
else
{
img0_buf.push(img0);
}
m_buf.unlock();
}
void img1_callback(const sensor_msgs::ImageConstPtr &img1)
{
m_buf.lock();
if(flag != 0)
{
sensor_msgs::Image img;
img.header=img1->header;
img.height = img1->height;
img.width = img1->width;
img.is_bigendian = img1->is_bigendian;
img.step = img1->step;
img.data=img1->data;
img.encoding=img1->encoding;
pubImg1.publish(img);
// std::cout<<"1."<<img.header<<std::endl;
}
else
{
img1_buf.push(img1);
}
m_buf.unlock();
}
//Use the cv_bridge of ros to change the image data format from msgs to cv
cv::Mat msg2cv(const sensor_msgs::ImageConstPtr &img_msg)
{
cv_bridge::CvImageConstPtr ptr;
sensor_msgs::Image img_tmp;
img_tmp.header = img_msg->header;
img_tmp.height = img_msg->height;
img_tmp.width = img_msg->width;
img_tmp.is_bigendian = img_msg->is_bigendian;
img_tmp.step = img_msg->step;
img_tmp.data = img_msg->data;
img_tmp.encoding =img_msg->encoding;
ptr = cv_bridge::toCvCopy(img_tmp, sensor_msgs::image_encodings::BGR8);
cv::Mat img = ptr->image.clone();
return img;
}
//With reference of VINS rosNodeTest.cpp
void display()
{
while(1)
{
cv::Mat image0, image1;
// double t1,t2;
m_buf.lock();
if(!img0_buf.empty() && !img1_buf.empty())
{
ROS_INFO("Two cameras work");
image0=msg2cv(img0_buf.front());
// ROS_INFO("img0 %.9lf", img0_buf.front()->header.stamp.toSec());
// t1=img0_buf.front()->header.stamp.toSec();
img0_buf.pop();
imshow("camera1", image0);
image1=msg2cv(img1_buf.front());
// ROS_INFO("img1 %.9lf", img1_buf.front()->header.stamp.toSec());
// t2=img1_buf.front()->header.stamp.toSec();
img1_buf.pop();
cv::imshow("camera2", image1);
cv::waitKey(1);
}
m_buf.unlock();
// //display with cv
// if(!image0.empty())
// {
// imshow("camera1", image0);
// // cv::waitKey(1);
// }
// // else
// // { std::cout<<"image0 is empty!"<<std::endl;}
// if(!image1.empty())
// {
// imshow("camera2", image1);
// // cv::waitKey(1);
// }
// else
// {std::cout<<"image1 is empty!"<<std::endl;}
// cv::waitKey();
//std::chrono::milliseconds dura(2);
//std::this_thread::sleep_for(dura);
}
}
int main(int argc, char** argv)
{
//Initialize a ros node
ros::init(argc,argv,"demo");
ros::NodeHandle n;
ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Info);
//Subscribe the binocular camera raw data
ros::Subscriber sub_img0=n.subscribe("/camera1/usb_cam1/image_raw", 2000, img0_callback);
ros::Subscriber sub_img1=n.subscribe("/camera2/usb_cam2/image_raw", 2000, img1_callback);
ROS_INFO("Wait for camera data.");
if(flag != 0) //for rviz
{
pubImg0 = n.advertise<sensor_msgs::Image>("/Img0", 100);
pubImg1 = n.advertise<sensor_msgs::Image>("/Img1", 100);
std::cout<<"for rviz"<<std::endl;
}
else //for cv
{
//Synchronization and display
std::cout<<"for cv"<<std::endl;
std::thread sync_thread{display};
sync_thread.detach();
//display();
}
ros::spin();
return 1;
}
The result now is that there is a camera slow. Is there some one to say something about the problem? I am not sure that the problem is from the OpenCv display or multi-thread coding. Thank you!

Related

How to get pixel data out of an IDXGISurface created with GPU access only?

In broad strokes, what I'm trying to accomplish is capture (part of) the screen and transform the capture into a digital image format. The following steps outline what I believe to be the solution:
Set up a Direct3D11CaptureFramePool and subscribe to its FrameArrived event
Gain access to the pixel data in the FrameArrived event delegate
Pass image data into the Windows Imaging Component to do the encoding
My issue is with step 2: While I can get the captured frame, gaining CPU read access to the surface fails. This my FrameArrived event delegate implementation (full repro below):
void on_frame_arrived(Direct3D11CaptureFramePool const& frame_pool, winrt::Windows::Foundation::IInspectable const&)
{
if (auto const frame = frame_pool.TryGetNextFrame())
{
if (auto const surface = frame.Surface())
{
if (auto const interop = surface.as<::Windows::Graphics::DirectX::Direct3D11::IDirect3DDxgiInterfaceAccess>())
{
com_ptr<IDXGISurface> dxgi_surface { nullptr };
check_hresult(interop->GetInterface(IID_PPV_ARGS(&dxgi_surface)));
DXGI_MAPPED_RECT info = {};
// Fails with `E_INVALIDARG`
check_hresult(dxgi_surface->Map(&info, DXGI_MAP_READ));
}
}
}
}
The Map() call is failing with E_INVALIDARG, and the debug layer offers additional, helpful error diagnostics:
DXGI ERROR: IDXGISurface::Map: This object was not created with CPUAccess flags that allow CPU access. [ MISCELLANEOUS ERROR #42: ]
So, now that I know what's wrong, how do I solve this? Specifically, how do I pull the pixel data out of a surface created with GPU access only?
Following is a full repro. It was originally created using the "Windows Console Application (C++/WinRT)" project template. The only change applied is "Precompiled Header: Use (/Yu)" → "Precompiled Header: Not Using Precompiled Headers", to keep this a single file.
It creates a command line application that expects a window handle as its only argument, in decimal, hex, or octal.
#include <winrt/Windows.Foundation.h>
#include <winrt/Windows.Graphics.Capture.h>
#include <winrt/Windows.Graphics.DirectX.Direct3D11.h>
#include <winrt/Windows.Graphics.DirectX.h>
#include <Windows.Graphics.Capture.Interop.h>
#include <windows.graphics.capture.h>
#include <windows.graphics.directx.direct3d11.interop.h>
#include <Windows.h>
#include <d3d11.h>
#include <dxgi.h>
#include <cstdint>
#include <stdio.h>
#include <string>
using namespace winrt;
using namespace winrt::Windows::Graphics::Capture;
using namespace winrt::Windows::Graphics::DirectX;
using namespace winrt::Windows::Graphics::DirectX::Direct3D11;
void on_frame_arrived(Direct3D11CaptureFramePool const& frame_pool, winrt::Windows::Foundation::IInspectable const&)
{
wprintf(L"Frame arrived.\n");
if (auto const frame = frame_pool.TryGetNextFrame())
{
if (auto const surface = frame.Surface())
{
if (auto const interop = surface.as<::Windows::Graphics::DirectX::Direct3D11::IDirect3DDxgiInterfaceAccess>())
{
com_ptr<IDXGISurface> dxgi_surface { nullptr };
check_hresult(interop->GetInterface(IID_PPV_ARGS(&dxgi_surface)));
DXGI_MAPPED_RECT info = {};
// This is failing with `E_INVALIDARG`
check_hresult(dxgi_surface->Map(&info, DXGI_MAP_READ));
}
}
}
}
int wmain(int argc, wchar_t const* argv[])
{
init_apartment(apartment_type::single_threaded);
// Validate input
if (argc != 2)
{
wprintf(L"Usage: %s <HWND>\n", argv[0]);
return 1;
}
auto const target = reinterpret_cast<HWND>(static_cast<intptr_t>(std::stoi(argv[1], nullptr, 0)));
// Get `GraphicsCaptureItem` for `HWND`
auto interop = get_activation_factory<GraphicsCaptureItem, IGraphicsCaptureItemInterop>();
::ABI::Windows::Graphics::Capture::IGraphicsCaptureItem* capture_item_abi { nullptr };
check_hresult(interop->CreateForWindow(target, IID_PPV_ARGS(&capture_item_abi)));
// Move raw pointer into smart pointer
GraphicsCaptureItem const capture_item { capture_item_abi, take_ownership_from_abi };
// Create D3D device and request the `IDXGIDevice` interface...
com_ptr<ID3D11Device> device = { nullptr };
check_hresult(::D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr,
D3D11_CREATE_DEVICE_BGRA_SUPPORT | D3D11_CREATE_DEVICE_DEBUG, nullptr, 0,
D3D11_SDK_VERSION, device.put(), nullptr, nullptr));
auto dxgi_device = device.as<IDXGIDevice>();
// ... so that we can get an `IDirect3DDevice` (the capture frame pool
// speaks WinRT only)
com_ptr<IInspectable> d3d_device_interop { nullptr };
check_hresult(::CreateDirect3D11DeviceFromDXGIDevice(dxgi_device.get(), d3d_device_interop.put()));
auto d3d_device = d3d_device_interop.as<IDirect3DDevice>();
// Create a capture frame pool and capture session
auto const pool = Direct3D11CaptureFramePool::Create(d3d_device, DirectXPixelFormat::B8G8R8A8UIntNormalized, 1,
capture_item.Size());
auto const session = pool.CreateCaptureSession(capture_item);
[[maybe_unused]] auto const event_guard = pool.FrameArrived(auto_revoke, &on_frame_arrived);
// Start capturing
session.StartCapture();
// Have the system spin up a message loop for us
::MessageBoxW(nullptr, L"Stop capturing", L"Capturing...", MB_OK);
}
You must create a 2D texture that can be accessed by the CPU and copy the source frame into this 2D texture, which you can then Map. For example:
void on_frame_arrived(Direct3D11CaptureFramePool const& frame_pool, winrt::Windows::Foundation::IInspectable const&)
{
wprintf(L"Frame arrived.\n");
if (auto const frame = frame_pool.TryGetNextFrame())
{
if (auto const surface = frame.Surface())
{
if (auto const interop = surface.as<::Windows::Graphics::DirectX::Direct3D11::IDirect3DDxgiInterfaceAccess>())
{
com_ptr<IDXGISurface> surface;
check_hresult(interop->GetInterface(IID_PPV_ARGS(&surface)));
// get surface dimensions
DXGI_SURFACE_DESC desc;
check_hresult(surface->GetDesc(&desc));
// create a CPU-readable texture
// note: for max perf, the texture creation
// should be done once per surface size
// or allocate a big enough texture (like adapter-sized) and copy portions
D3D11_TEXTURE2D_DESC texDesc{};
texDesc.Width = desc.Width;
texDesc.Height = desc.Height;
texDesc.ArraySize = 1;
texDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
texDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
texDesc.MipLevels = 1;
texDesc.SampleDesc.Count = 1;
texDesc.Usage = D3D11_USAGE_STAGING;
com_ptr<ID3D11Device> device;
check_hresult(surface->GetDevice(IID_PPV_ARGS(&device))); // or get the one from D3D11CreateDevice
com_ptr<ID3D11Texture2D> tex;
check_hresult(device->CreateTexture2D(&texDesc, nullptr, tex.put()));
com_ptr<ID3D11Resource> input;
check_hresult(interop->GetInterface(IID_PPV_ARGS(&input)));
com_ptr<ID3D11DeviceContext> dc;
device->GetImmediateContext(dc.put()); // or get the one from D3D11CreateDevice
// copy frame into CPU-readable resource
// this and the Map call can be done at each frame
dc->CopyResource(tex.get(), input.get());
D3D11_MAPPED_SUBRESOURCE map;
check_hresult(dc->Map(tex.get(), 0, D3D11_MAP_READ, 0, &map));
// TODO do something with map
dc->Unmap(tex.get(), 0);
}
}
}
}

Convert an uploaded from desktop image to black and white in Qt

My program consists of two functions: first a user clicks a button (btn_image) to upload an image from desktop and it displays on the label (lbl_image). Secondly, I push another button (cnv_image) in order to change the colors of that uploaded image to black and white.
I have managed to implement the first function: the image chosen by a user successfully displays. However, I am confused how to convert that image to b&w. I wrote a function that is triggered after clicking the cnv_image button, but the problem is to refer to that uploaded image. So, when I click cnv_image buttom the uploaded image simply disappears.
I tried to use image.load (ui->lbl_image) to refer to the label which contains the image but it shows a mistake.
How can I implement my second function?
void MainWindow::on_btn_image_clicked()
{
QString fileName = QFileDialog::getOpenFileName(this, tr("Choose"), "", tr("Images (*.png *.jpg *jpeg)"));
if (QString::compare(fileName, QString()) != 0) {
QImage image;
bool valid = image.load(fileName);
if (valid) {
ui->lbl_image->setPixmap(QPixmap::fromImage(image));
}
}
}
void MainWindow::on_cnv_image_clicked()
{
QImage image;
image.load(ui->lbl_image);
QSize sizeImage = image.size();
int width = sizeImage.width(), height = sizeImage.height();
QRgb color;
for (int f1=0; f1<width; f1++) {
for (int f2=0; f2<height; f2++) {
int gray = qGray(color);
image.setPixel(f1, f2, qRgb(gray, gray, gray));
}
}
ui->lbl_image->setPixmap(QPixmap::fromImage(image));
}
I update your code, I add QImage image; as private member of MainWindow class so In mainwindow.h :
#ifndef MAINWINDOW_H
#define MAINWINDOW_H
#include <QMainWindow>
QT_BEGIN_NAMESPACE
namespace Ui
{
class MainWindow;
}
QT_END_NAMESPACE
class MainWindow: public QMainWindow
{
Q_OBJECT
public:
MainWindow(QWidget *parent = nullptr);
~MainWindow();
private slots:
void on_btn_image_clicked();
void on_cnv_image_clicked();
private:
Ui::MainWindow *ui;
QImage image;
};
#endif // MAINWINDOW_H
and in on_cnv_image_clicked function
void MainWindow::on_cnv_image_clicked()
{
QSize sizeImage = image.size();
int width = sizeImage.width(), height = sizeImage.height();
QRgb color;
int value;
for (int f1 = 0; f1 < width; f1++)
{
for (int f2 = 0; f2 < height; f2++)
{
color = image.pixel(f1, f2);
int gray = (qRed(color) + qGreen(color) + qBlue(color)) / 3;
image.setPixel(f1, f2, qRgb(gray, gray, gray));
}
}
ui->lbl_image->setPixmap(QPixmap::fromImage(image));
ui->lbl_image->setScaledContents(true);
}
Result :
Welcome to Stackoverflow!
First of all, it's a good idea to keep a copy of the QImage in your class when you load it. It helps to avoid extra conversions from QPixmap to QImage in next steps. I'll skip it because it's out of the scope of your question.
You can use QImage::convertTo to convert the format of a QImage in place. It means that, it does not create a new QImage. As per documentation it may detach the QImage. You can read more about Implicit Sharing if you are interested.
So, the implementation should be something like:
void MainWindow::on_cnv_image_clicked()
{
QImage image = ui->lbl_image->pixmap().toImage();
image.convertTo(QImage::Format_Grayscale8);
ui->lbl_image->setPixmap(QPixmap::fromImage(image));
}
Take a look at the list of QImage::Formats to evaluate other grayscale/mono options.

ArUco - unhandled exception

I have set up ArUco library and now want to write a small code to test if it is working correctly. The code is as below:
#include<opencv2/opencv.hpp>
#include<iostream>
#include<aruco.h>
#include<cvdrawingutils.h>
using namespace cv;
using namespace std;
using namespace aruco;
int main()
{
Mat image;
//Read image and display it
image = imread("E:/../Capture.PNG", 1);
if (image.empty()) //check whether the image is loaded or not
{
cout << "Error : Image cannot be loaded..!!" << endl;
//system("pause"); //wait for a key press
return -1;
}
namedWindow("Image", CV_WINDOW_AUTOSIZE);
imshow("Image", image);
waitKey(1000);
//Marker detection
MarkerDetector MDetector;
vector<Marker> Markers;
//I am not sure if we need to read the pattern of the marker. So I read it.
Markers = imread("E:/.../pattern.PNG", 1);
MDetector.detect(image, Markers);
//draw infor and its boundaries
for (int i = 0; i < Markers.size(); i++)
{
Markers[i].draw(image, Scalar(0, 0, 255), 2);
}
imshow("ouput", image);
waitKey(0);
}
This code builds with zero error, but when I run it, it gives me error.
Error is:
This is what I get when I hit break.
I use Windows 8.1, Microsoft visual studio 2013, opencv 3.0 and ArUco 1.3.0
Any help would be helpful. Thank you very much for help.
This question is solved. I was making use of wrong marker pattern. we are supposed to use the marker pattern provided by ArUco. You can find generate them here: http://terpconnect.umd.edu/~jwelsh12/enes100/markergen.html
There were mistake in the code too. The correct code is below:
#include<opencv2/opencv.hpp>
#include<iostream>
#include<aruco.h>
#include<cvdrawingutils.h>
using namespace cv;
using namespace std;
using namespace aruco;
int main()
{
Mat image;
//Read image and display it
image = imread("E:/Studies/Master Thesis/Markers/arucoTest.PNG", 1);
if (image.empty()) //check whether the image is loaded or not
{
cout << "Error : Image cannot be loaded..!!" << endl;
//system("pause"); //wait for a key press
return -1;
}
namedWindow("Image", CV_WINDOW_AUTOSIZE);
imshow("Image", image);
waitKey(1000);
//Marker detection
MarkerDetector MDetector;
vector<Marker> Markers;
MDetector.detect(image, Markers);
//draw information and its boundaries
for (unsigned int i = 0; i<Markers.size(); i++) {
cout << Markers[i] << endl;
Markers[i].draw(image, Scalar(0, 0, 255), 2);
}
imshow("ouput", image);
waitKey(0);
}
I hope this test code helps newbies to ArUco(others like me).

SDL 2.0 Issue - Trying to load image, but only get a white box

Okay, so I am trying to load a window and display a couple of images on it using a function. The window loads and my errors don't display for a failure to load the image, however the window just stays white. Any ideas why that might be? This is my code below.
#include "stdafx.h"
#include "SDL.h"
#include <iostream>
#include <string>
using namespace std;
const int Window_Width = 640;
const int Window_Height = 480;
SDL_Window *window = NULL;
SDL_Renderer *render = NULL;
SDL_Texture* loadImage(string imagename) //function that loads the image, useful for handling multiple image imports
{
SDL_Surface* loadedImage = NULL;
SDL_Texture* texture = NULL;
loadedImage = SDL_LoadBMP(imagename.c_str()); //loads the image with the passed file name
if (loadedImage == NULL) //checks for any errors loading the image
{
cout<<"The image failed to load.."<<endl;
}
texture = SDL_CreateTextureFromSurface(render, loadedImage);
SDL_FreeSurface(loadedImage);
return texture;
}
int main(int argc, char** argv)
{
if (SDL_Init(SDL_INIT_EVERYTHING) == -1)
{
cout << SDL_GetError() << endl;
return 1;
}
window = SDL_CreateWindow("Frogger", SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, Window_Width, Window_Height, SDL_WINDOW_SHOWN);
//creates a window in the centre of the screen, it uses const int's to define the size of the window
if (window == NULL)
{
cout << SDL_GetError()<<endl;
return 1;
}
render = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
//this renders the window
if (render == NULL)
{
cout << SDL_GetError()<<endl;
return 1;
}
//loading the images using the function
SDL_Texture* background = NULL;
SDL_Texture* frog = NULL;
background = loadImage("background.bmp");
frog = loadImage("frogger.bmp");
SDL_Delay(2000);
SDL_RenderClear(render);
SDL_RenderPresent(render);
SDL_UpdateWindowSurface(window);
//freeing the memory back up
SDL_DestroyRenderer(render);
SDL_DestroyWindow(window);
SDL_DestroyTexture(background);
SDL_DestroyTexture(frog);
SDL_Quit();
return 0;
}
You aren't rendering anything, you just load some textures and end the program.

Windows + Qt and how to capture webcam feed without OpenCV

I'm fighting with this problem from a long time.
I can't get OpenCV to work, and I have follow a lot of tutorials about it and how to use in Qt, so I get tired and I want to avoid the use of OpenCV for this.
Now, my requirement or question... I need to show a webcam feed (real time video, without audio) in a Qt GUI application with only one button: "Take Snapshot" which, obviusly, take a picture from the current feed and store it.
That's all.
Is there anyway to get this done without using OpenCV ?
System specification:
Qt 4.8
Windows XP 32 bits
USB 2.0.1.3M UVC WebCam (the one I'm using now, it should support other models too)
Hope anybody can help me with this because I'm getting crazy.
Thanks in advance!
Ok, I finally did it, so I will post here my solution so we have something clear about this.
I used a library called 'ESCAPI': http://sol.gfxile.net/escapi/index.html
This provide a extremely easy way to capture frames from the device. With this raw data, I just create a QImage which later show in a QLabel.
I created a simple object to handle this.
#include <QDebug>
#include "camera.h"
Camera::Camera(int width, int height, QObject *parent) :
QObject(parent),
width_(width),
height_(height)
{
capture_.mWidth = width;
capture_.mHeight = height;
capture_.mTargetBuf = new int[width * height];
int devices = setupESCAPI();
if (devices == 0)
{
qDebug() << "[Camera] ESCAPI initialization failure or no devices found";
}
}
Camera::~Camera()
{
deinitCapture(0);
}
int Camera::initialize()
{
if (initCapture(0, &capture_) == 0)
{
qDebug() << "[Camera] Capture failed - device may already be in use";
return -2;
}
return 0;
}
void Camera::deinitialize()
{
deinitCapture(0);
}
int Camera::capture()
{
doCapture(0);
while(isCaptureDone(0) == 0);
image_ = QImage(width_, height_, QImage::Format_ARGB32);
for(int y(0); y < height_; ++y)
{
for(int x(0); x < width_; ++x)
{
int index(y * width_ + x);
image_.setPixel(x, y, capture_.mTargetBuf[index]);
}
}
return 1;
}
And the header file:
#ifndef CAMERA_H
#define CAMERA_H
#include <QObject>
#include <QImage>
#include "escapi.h"
class Camera : public QObject
{
Q_OBJECT
public:
explicit Camera(int width, int height, QObject *parent = 0);
~Camera();
int initialize();
void deinitialize();
int capture();
const QImage& getImage() const { return image_; }
const int* getImageRaw() const { return capture_.mTargetBuf; }
private:
int width_;
int height_;
struct SimpleCapParams capture_;
QImage image_;
};
#endif // CAMERA_H
It's so simple, but just for example purposes.
The use should be something like:
Camera cam(320, 240);
cam.initialize();
cam.capture();
QImage img(cam.getImage());
ui->label->setPixmap(QPixmap::fromImage(img));
Of course, you can use a QTimer and update the frame in QLabel and you will have video there...
Hope it help! and thanks Nicholas for your help!

Resources