I would like to be able to display in a Dicom image in a Qt project with the same render as a Dicom Viewer Program could give.
I was able to do it but with a very bad contrast. I heard you need to operate on the pixels but I'm not sure. Do you have a working example ?
EDIT: I add my code in case it helps you, I commented a lot of things because I noticed the result was exactly the same
#include "mainwindow.h"
#include "ui_mainwindow.h"
#include <iostream>
#undef UNICODE
#undef _UNICODE
#include <dcmtk/config/osconfig.h>
#include <dcmtk/dcmdata/dctk.h>
#include <dcmtk/dcmimgle/dcmimage.h>
#include <QPixmap>
#include <QLabel>
#include <QImageReader>
using namespace std;
MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow)
{
ui->setupUi(this);
//int sizeX = 600;
// int sizeY = 600;
//initialize random seed
//srand (time(NULL));
//QImage image = QImage( sizeX, sizeY, QImage::Format_RGB32 );
/*for( int l=0; l<sizeX; l++ )
{
for( int c=0; c<sizeY; c++ )
{
///Random color for each pixel
//image.setPixel( l, c, qRgb(rand() % 256, rand() % 256, rand() % 256) );
///Fixed color for each pixel
image.setPixel( l, c, qRgb(100, 150, 200) );
}
}*/
const char *file = "/home/x4rkz/project/Laura/QTImage/IMG00000";
DicomImage *image = new DicomImage(file);
if (image != NULL)
{
if (image->getStatus() == EIS_Normal)
{
Uint8 *pixelData = (Uint8 *)(image->getOutputData(8 )); // bits per sample
// Uint8 is a pointer to internal memory buffer
if (pixelData != NULL)
{
// do something useful with the pixel data
QImage img(pixelData,image->getWidth(), image->getHeight(), QImage::Format_Indexed8 );
/*QColor color;
QImage *img;
void *pDicomDibits;
uchar *px;
// uchar pixel[4];
const int width = (int)(image->getWidth());
const int height = (int)(image->getHeight());
if (image->isMonochrome()){
img = new QImage(width, height, QImage::Format_Indexed8);
img->setColorCount(256);
// define gray palette here
for (int i=0; i<256; i++) {
color.setRgb(i, i, i);
img->setColor(i, color.rgb());
}
image->createWindowsDIB(pDicomDibits, 0, 0, 8, 0, 1);
unsigned char * pd;
pd=(unsigned char *)pDicomDibits;
for (int y=0; y < (long) height; y++)
{
px = img->scanLine(y);
for (int x=0; x < (long) width; x++)
{
px[x] = (unsigned char) (*pd);
pd++;
}
}*/
QGraphicsScene * graphic = new QGraphicsScene( this );
graphic->addPixmap( QPixmap::fromImage( img ) );
ui->graphicsView->setScene(graphic);
/* }else
cout << "Non monochrome image" << endl;*/
}
} else
cerr << "Error: cannot load DICOM image (" << DicomImage::getString(image->getStatus()) << ")" << endl;
}
}
MainWindow::~MainWindow()
{
delete ui;
}
#include "mainwindow.h"
#include <QApplication>
#include <iostream>
#undef UNICODE
#undef _UNICODE
#include <dcmtk/config/osconfig.h>
#include <dcmtk/dcmdata/dctk.h>
#include <dcmtk/dcmimgle/dcmimage.h>
#include <QPixmap>
#include <QLabel>
#include <QImageReader>
using namespace std;
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
MainWindow w;
w.show();
return a.exec();
}
As you cant see, the result has no constrast.
As you cant see, the result has no constrast.
If the rendered image has such a low contrast, you should try to set an appropriate VOI (Value of Interest) window, e.g. using image->setMinMaxWndow(). See API documentation for details.
Related
I've been trying to parallelize my class and class constructor by using CUDA.
You can find the both serial version and parallelized version of my class. It compiles correctly and I wonder if there are any improvements in my parallelized code.
Serial Code Ray.h:
#pragma once
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <math.h>
#include <vector>
#include <algorithm>
#include <complex>
#include "arithmatic_operations.h"
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
class Ray
{
public:
std::vector<std::vector<double>>Point = { { 0,0,0 } ,{ 0,0,0 } ,{ 0,0,0 } };
std::vector<std::vector<double>>Direction = { { 0,0,0 } ,{ 0,0,0 } ,{ 0,0,0 } };
double no_bounces = -1;
double length = -1;
std::vector<double>E_thei = { 0,0,0 };
std::vector<double>E_phii = { 0,0,0 };
std::complex<double> Er_the_the = 0;
std::complex<double> Er_phi_the = 0;
std::complex<double> Er_the_phi = 0;
std::complex<double> Er_phi_phi = 0;
double Ai = 0;
Ray(std::vector<double>OO, std::vector<std::vector<double>>DD, double delta)
{
//Point.push_back({ 0,0,0 });
this->Point[no_bounces + 1][0] = OO[0];
this->Point[no_bounces + 1][1] = OO[1];
this->Point[no_bounces + 1][2] = OO[2];
std::vector<double>first_row_DD = { 0,0,0 };
first_row_DD[0] = DD[0][0];
first_row_DD[1] = DD[0][1];
first_row_DD[2] = DD[0][2];
//Direction.push_back({ 0,0,0 });
this->Direction[no_bounces + 1][0] = DD[0][0] / norm(first_row_DD);
this->Direction[no_bounces + 1][1] = DD[0][1] / norm(first_row_DD);
this->Direction[no_bounces + 1][2] = DD[0][2] / norm(first_row_DD);
this->E_thei[0] = DD[1][0];
this->E_thei[1] = DD[1][1];
this->E_thei[2] = DD[1][2];
this->E_phii[0] = DD[2][0];
this->E_phii[1] = DD[2][1];
this->E_phii[2] = DD[2][2];
this->Ai = pow(delta, 2);
}
};
and below there is the code I parallelized:
Ray.cuh:
#include <iostream>
#include <array>
#include <fstream>
#include <string>
#include <sstream>
#include <cstddef>
#include <utility>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform.h>
#include <thrust/functional.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/zip_function.h>
#include <thrust/execution_policy.h>
__device__ double square(double& x) { return x * x; }
struct myPow
{
__device__
double operator()(double& x) const { return square(x); }
};
struct CalculateNormValues
{
__device__
double operator()(const thrust::tuple<double, double, double>& t) const
{
double x = thrust::get<0>(t);
double y = thrust::get<1>(t);
double z = thrust::get<2>(t);
return (double)sqrt(x * x + y * y + z * z);
}
};
struct Normalize
{
__device__
thrust::tuple<double, double, double> operator()(const thrust::tuple<double, double, double, double>& t) const
{
double x = thrust::get<0>(t);
double y = thrust::get<1>(t);
double z = thrust::get<2>(t);
double norm = thrust::get<3>(t);
return thrust::make_tuple(x / norm, y / norm, z / norm);
}
};
class Ray {
static constexpr int n_dims = 3;
static constexpr int cn_dims = 2;
using Container = thrust::device_vector<double>;
using Vectors = std::array<Container, n_dims>;
using Matrices = std::array<Container, n_dims* n_dims>;
using Complexes = std::array<Container, cn_dims>;
public:
std::ptrdiff_t n_rays{};
Vectors E_thei;
Vectors E_phii;
Matrices Point;
Matrices Direction;
Complexes Er_the_the;
Complexes Er_phi_the;
Complexes Er_the_phi;
Complexes Er_phi_phi;
Container Ai;
Container no_bounces;
Container length;
Container normValues;
Ray(thrust::device_vector<double>& OO_0,
thrust::device_vector<double>& OO_1,
thrust::device_vector<double>& OO_2,
thrust::device_vector<double>& DD_00,
thrust::device_vector<double>& DD_01,
thrust::device_vector<double>& DD_02,
thrust::device_vector<double>& DD_10,
thrust::device_vector<double>& DD_11,
thrust::device_vector<double>& DD_12,
thrust::device_vector<double>& DD_20,
thrust::device_vector<double>& DD_21,
thrust::device_vector<double>& DD_22,
thrust::device_vector<double>& delta) :
n_rays{ static_cast<std::ptrdiff_t>(OO_0.size()) },
Direction{ std::move(DD_00), //Normalize direction components later
std::move(DD_01),
std::move(DD_02) },
Point{ std::move(OO_0),
std::move(OO_1),
std::move(OO_2) },
E_thei{ std::move(DD_10),
std::move(DD_11),
std::move(DD_12) },
E_phii{ std::move(DD_20),
std::move(DD_21),
std::move(DD_22) },
Ai{ std::move(delta) } //Multiply Ai values later
{
thrust::transform(Ai.begin(), Ai.end(), Ai.begin(), myPow()); //Multiplies Ai values
//Calculate normalized values
normValues.resize(3);
thrust::transform(
thrust::make_zip_iterator(thrust::make_tuple(Direction[0].begin(), Direction[1].begin(), Direction[2].begin())),
thrust::make_zip_iterator(thrust::make_tuple(Direction[0].end(), Direction[1].end(), Direction[2].end())),
normValues.begin(),
CalculateNormValues{});
//Normalize Direction
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(Direction[0].begin(), Direction[1].begin(), Direction[2].begin(), normValues.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(Direction[0].end(), Direction[1].end(), Direction[2].end(), normValues.end())),
thrust::make_zip_iterator(
thrust::make_tuple(Direction[0].begin(), Direction[1].begin(), Direction[2].begin())),
Normalize{});
}
};
The program compiles but I'd like to ask some questions.
When I use thrust::transform I know that the thrust library does the memory allocation and copying on the device for me. I wonder after the operation is done, does it copy back to the host? After
thrust::transform(Ai.begin(), Ai.end(), Ai.begin(), myPow());
if I write a line like this Ai[0]=5. Is this line executed on CPU or GPU?
My second question is I wonder if I can write a device function in my parallelized class by using __global__ and cuda threads. If the answer is yes, for example after moving device_vectors OO_1 and OO_2 to the member Point, If I want to do math on these device vectors in __global__ function since device_vectors are host only I need to copy them to C arrays and allocate memory on device and do the math right?
There is an android_camera.c file in the ffmpeg project. I want to call NDK camera through ffmpeg, take out YUV data, and write it to the file. Currently, the code has been blocking the function wait_for_image_format.
android_camera.c
#include <stdio.h>
#include <math.h>
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#include <libavformat/avformat.h>
#include <libavdevice/avdevice.h>
#include <libavutil/dict.h>
int main(int argc, char **argv) {
int ret;
AVFormatContext *fmtCtx = NULL;
AVPacket pkt1, *pcaket = &pkt1;
/*1、注册*/
avcodec_register_all();
avdevice_register_all();
/*2、连接视频源*/
AVInputFormat *inputFmt = av_find_input_format("android_camera");
if (NULL != inputFmt) {
printf("input device name:%s\n",inputFmt->name);
} else {
printf("Null point!\n");
}
#if 1
AVDictionary *avdict = NULL;
AVDictionaryEntry *t = av_dict_get(avdict, "video_size", NULL, AV_DICT_IGNORE_SUFFIX);
printf("ok1\n");
av_dict_set(&avdict, "video_size", "hd720", 0);
av_dict_set_int(&avdict, "camera_index",1, 0);
av_dict_set_int(&avdict, "input_queue_size",2, 0);
printf("ok2\n");
/*3、打开视频采集设备*/
//ret = avformat_open_input(&fmtCtx, "video=/dev/video1", inputFmt, avdict);
//ret = avformat_open_input(&fmtCtx, "android_camera", inputFmt, avdict);
fmtCtx = avformat_alloc_context();
if (NULL == fmtCtx) {
printf("Open input device seccess!\n");
}
printf("ok3\n");
//if (avformat_find_stream_info(fmtCtx, NULL) < 0) {
// printf("Could not find stream information\n");
//}
//printf("ok4\n");
ret = avformat_open_input(&fmtCtx, NULL, inputFmt, &avdict);
printf("ok4\n");
av_dict_free(&avdict);
#else
fmtCtx = avformat_alloc_context();
if(!fmtCtx)
{
printf("avformat_alloc_contest error\n");
exit(1);
}
#endif
printf("ok5\n");
//ret = av_demuxer_open(fmtCtx);
if(ret<0)
{
printf("av_demuxer_open error\n");
}
printf("ok6\n");
/*4、读取一帧数据,编码依据摄像头类型而定,我使用的摄像头输出的是yuv422格式*/
fmtCtx->flags &= (~0x4);
av_read_frame(fmtCtx, pcaket);
printf("ok7\n");
//printf("packet size:%d\n",(pcaket->size));
/*5、写入帧数据到文件*/
FILE *fp = NULL;
fp = fopen("out.yuv", "a+");
if (NULL != fp) {
//将数据写入文件
fwrite(pcaket->data, 1, pcaket->size, fp);
}
//关闭文件
fclose(fp);
/*6、释放读取的帧数据*/
av_free_packet(pcaket);
/*7、关闭视频输入源*/
avformat_close_input(&fmtCtx);
return 0;
}
I have encountered a problem of high memory usage when using ParseFromZeroCopyStream to load file in which a large buffer is written. Besides, the code snippet below uses 60Gb++ of RAM but failed as the system froze after reaching its RAM limit.
FYI, I am using protobuf as DLL.
scene.proto
syntax = "proto3";
package Recipe;
option cc_enable_arenas = true;
message Scene
{
repeated int32 image_data = 1 [packed=true];
}
source.cpp
#include <iostream>
#include <fstream>
#include <ostream>
#include <istream>
#include <string>
#include <cstdint>
#include "Scene.pb.h"
#include <google\protobuf\io\zero_copy_stream_impl.h>
#include <google\protobuf\io\gzip_stream.h>
#include <google\protobuf\arena.h>
int const _MIN = 0;
int const _MAX = 255;
unsigned int const _SIZE = 1280000000;
//unsigned int const _SIZE = 2000;
unsigned int const _COMPRESSION_LEVEL = 6;
void randWithinUnsignedCharSize(uint8_t * buffer, unsigned int size)
{
for (size_t i = 0; i < size; ++i)
{
buffer[i] = i;
}
}
using namespace google::protobuf::io;
int main()
{
GOOGLE_PROTOBUF_VERIFY_VERSION;
{
google::protobuf::Arena arena;
Recipe::Scene * scene = google::protobuf::Arena::CreateMessage<Recipe::Scene>(&arena);
uint8_t * imageData = new uint8_t[_SIZE];
randWithinUnsignedCharSize(imageData, _SIZE);
scene->mutable_image_data()->Resize(_SIZE, 0);
for (size_t i = 0; i < _SIZE; i++)
{
scene->set_image_data(i, imageData[i]);
}
std::cout << "done saving data to repeated field.\n";
{
std::fstream output("data.txt", std::ios::out | std::ios::trunc | std::ios::binary);
OstreamOutputStream outputFileStream(&output);
GzipOutputStream::Options options;
options.format = GzipOutputStream::GZIP;
options.compression_level = _COMPRESSION_LEVEL;
GzipOutputStream gzipOutputStream(&outputFileStream, options);
if (!scene->SerializeToZeroCopyStream(&gzipOutputStream)) {
std::cerr << "Failed to write scene." << std::endl;
return -1;
}
}
delete[] imageData;
}
std::cout << "Finish serializing into data.txt\n";
{
google::protobuf::Arena arena1;
Recipe::Scene * scene1 = google::protobuf::Arena::CreateMessage<Recipe::Scene>(&arena1);
{
std::fstream input("data.txt", std::ios::in | std::ios::binary);
IstreamInputStream inputFileStream(&input);
GzipInputStream gzipInputStream(&inputFileStream);
if (!scene1->ParseFromZeroCopyStream(&gzipInputStream)) {
std::cerr << "Failed to parse scene." << std::endl;
return -1;
}
}
std::cout << "scene1->imagedata_size() " << scene1->image_data_size() << std::endl;
}
google::protobuf::ShutdownProtobufLibrary();
return 0;
}
I want to use transient scrollbar (Transient scroll bars appear when the content is scrolled and disappear when they are no longer needed) in Qt application. For this purpose I have inheritanced class QproxyStyle and reimplemented function styleHint. Code placed below.
File ScrollBar.h:
#include <QStyle>
#include <QCommonStyle>
#include <QProxyStyle>
class ScrollBarStyle : public QProxyStyle
{
public:
int styleHint(StyleHint sh, const QStyleOption *opt, const QWidget *widget, QStyleHintReturn *hret) const;
};
File ScrollBar.c:
#include "ScrollBar.h"
int ScrollBarStyle::styleHint(StyleHint sh, const QStyleOption *opt, const QWidget *widget,QStyleHintReturn *hret) const
{
int ret = 0;
switch (sh) {
case SH_ScrollBar_Transient:
ret = true;
break;
default:
return QProxyStyle::styleHint(sh, opt, widget, hret);
}
return ret;
}
File MainWindow.h:
#include <QMainWindow>
#include <QTextEdit>
class MainWindow : public QMainWindow
{
Q_OBJECT
public:
MainWindow(QWidget *parent = 0);
~MainWindow();
};
File MainWindow.cpp:
#include <QTextEdit>
#include "MainWindow.h"
MainWindow::MainWindow(QWidget *parent)
: QMainWindow(parent)
{
QTextEdit *l = new (std::nothrow) QTextEdit(this);
if (l == 0)
return;
setCentralWidget(l);
}
MainWindow::~MainWindow()
{
}
File main.cpp:
#include "ScrollBar.h"
#include <QApplication>
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
MainWindow w;
ScrollBarStyle *style = new (std::nothrow) ScrollBarStyle;
if(style == 0)
return -1;
style->setBaseStyle(a.style());
w.show();
return a.exec();
}
But I have got a problem: transient scrollbar has been appearing only once (when text doesn't fit in the text area) then it has been disappeared and never come back visible.
So how can I fix this problem?
Thanks!
You have forgotten to set the style to application.
a.setStyle(style);
I changed in my code avcodec_decode_audio3 to avcodec_decode_audio4 and added the frame handling. But now I cannot decode AAC frames anymore.
Why does avcodec_decode_audio4 return -22 (invalid argument)?
Following the answer below, does this have something to do with the parameters in AVContext that need to be set?
I had to use avcodec_decode_audio4 because I updated my ffmpeg and then got the following error:
[NULL # 0xb14f020] Custom get_buffer() for use withavcodec_decode_audio3() detected.
Overriding with avcodec_default_get_buffer
[NULL # 0xb14f020] Please port your application to avcodec_decode_audio4()
According to Buffer error in avcodec_decode_audio4() this is a regression, is there any other solution for this than going back to ffmpeg < 0.8 ?
The decoder using avcodec_decode_audio4:
AVCodec *codec;
AVCodecContext *avCtx;
AVFrame * decoded_frame = NULL;
uint8_t *outbuf = static_cast<uint8_t *>(malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE));
AVPacket avPacket;
main(){
av_register_all();
codec = avcodec_find_decoder(CODEC_ID_AAC);
//set parameters
avCtx = avcodec_alloc_context3(codec);
avCtx->channels = 1;
avCtx->sample_rate = 44100;
avCtx->bit_rate=16;
if (avcodec_open2(avCtx, codec, NULL) < 0) printf("Could not open codec\n");
av_init_packet(&avPacket);
//Main decoder loop
while(1)
my_frame_decoder();
return 0;
}
void my_frame_decoder() {
//get data
...
avPacket.size = numBytes;
avPacket.data = inputBytes;
int len;
while (avPacket.size > 0) {
int got_frame = 0;
if (!decoded_frame) {
if (!(decoded_frame = avcodec_alloc_frame())) {
printf("out of memory");
return;
}
} else {
avcodec_get_frame_defaults(decoded_frame);
}
//-------------------->> returns always -22
len = avcodec_decode_audio4(avCtx, decoded_frame, &got_frame, &avPacket);
//do something with the decoded frame
...
avPacket.size -= len;
avPacket.data += len;
}
return;
}
After hours of searching, i found out that the dec_ctx of the avcodec_decode_audio4 must be opened by a dec_codec initialised by av_find_best_stream()
1° av_find_best_stream(in_fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1,
&dec_codec, 0);<br>
2° dec_ctx = m_in_aud_strm->codec;<br>
3° av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);<br>
4° avcodec_open2(dec_ctx, dec_codec, NULL)<br>
.
.
.
5° avcodec_decode_audio4(dec_ctx, pFrame, &got_frame, &pkt);
I think the problem is the parameters set in your codec's context. Please refer to https://www.ffmpeg.org/doxygen/trunk/structAVCodecContext.html for setting the parameters, which has changed from avcodec_decode_audio3 to avcodec_decode_audio4.
No solution but a workaround is to go back to older builds. After testing various builds that work with avcodec_decode_audio3, I thought it might be useful for others to know that ffmpeg-0.10.14.tar.bz2 from https://ffmpeg.org/releases/ works.
<!-- language: c++ -->
#include <windows.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <errno.h>
#include <fcntl.h>
#include <ctype.h>
#include <math.h>
#include <wctype.h>
#include <wchar.h>
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <locale.h>
#include <signal.h>
#include <limits.h>
#include <float.h>
#include <iso646.h>
#undef NDEBUG
#include <assert.h>
// Use avcodec_send_packet() and avcodec_receive_frame().
//sample code
while (av_read_frame (FormatContext, packet_ptr) >= 0)
{
/* some code */
if (packet_ptr->stream_index == audiostream)
{
assert(NULL == decoded_frame);
decoded_frame = av_frame_alloc();
ret = avcodec_send_packet(pCodecCtx, packet_ptr);
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
{
av_packet_unref (packet_ptr);
if (decoded_frame)
{
av_frame_unref(decoded_frame);
decoded_frame = NULL;
}
continue;
}
else
{
if (0 <= ret)
packet_ptr->size = 0;
ret = avcodec_receive_frame(pCodecCtx, decoded_frame);
if (ret >= 0)
got_frame = 1;
else
{
got_frame = 0;
if (decoded_frame)
{
av_frame_unref(decoded_frame);
decoded_frame = NULL;
}
av_packet_unref (packet_ptr);
continue;
}
}
if(AV_SAMPLE_FMT_FLTP == pCodecCtx->sample_fmt)//AAC sample format for Libav released 10-October-2020 (ffmpeg 4.3.1)
{
//now get the PCM data ready to play or save
int nb_samples = decoded_frame->nb_samples;
int channels = pCodecCtx->channels;
if(channels > 2) //for this small sample only 2 channels...
{
channels = 2;//it will convert multichannel media files to 2 channels, remember this...more code need to be modified
}
int outputBufferLen = nb_samples * channels * 2;
int size_out //the size of the PCM data as sizeof(char)
=outputBufferLen;
char * buf = malloc(size_out);
short *outputBuffer=(short *)buf;
int in_samples = decoded_frame->nb_samples;
int i=0;
float * inputChannel0 = (float *)decoded_frame->extended_data[0];
// Mono
if (pCodecCtx->channels==1)
{
for (i=0; i<in_samples; i++)
{
float sample = *inputChannel0++;
if (sample<-1.0f) sample=-1.0f;
else if (sample>1.0f) sample=1.0f;
outputBuffer[i] = (int16_t) (sample * 32767.0f);//largest positive int16_t
}
}
// Stereo
else
{
float * inputChannel1 = (float *)decoded_frame->extended_data[1];
for (i=0; i < in_samples; i++)
{
float sample = *inputChannel0++;
if (sample<-1.0f) sample=-1.0f;
else if (sample>1.0f) sample=1.0f;
float sample2 = *inputChannel1++;
if (sample2<-1.0f) sample2=-1.0f;
else if (sample2>1.0f) sample2=1.0f;
outputBuffer[i*2] = (int16_t) ((sample) * 32767.0f);
outputBuffer[i*2+1] = (int16_t) ((sample2) * 32767.0f);
}
}
//use buf and size_out here then free the buf
free(buf);
}
}
av_packet_unref (packet_ptr);
if (decoded_frame)
{
av_frame_unref(decoded_frame);
decoded_frame = NULL;
}
}
Hope it helps...