Blackberry Listfield with Live Image - image

I want to display a listfield with live image in all rows of the listfield

check out this
also use the following code for displaying live images:
public static String getImageFromUrl(String url) {
//Image img = null;
String imageData = null;
try
{
imageData = getDataFromUrl(url);
//img = Image.createImage(imageData.getBytes(), 0,imageData.length() );
}
catch(Exception e1) {
e1.printStackTrace();
}
return imageData;
}
public static String getDataFromUrl(String url)
throws IOException {
StringBuffer b = new StringBuffer();
InputStream is = null;
HttpConnection c = null;
long len = 0 ;
int ch = 0;
ConnectionFactory connFact = new ConnectionFactory();
ConnectionDescriptor connDesc;
connDesc = connFact.getConnection(url);
if (connDesc != null)
{
//HttpConnection httpConn;
c = (HttpConnection)connDesc.getConnection();
}
// c = (HttpConnection)Connector.open(url);
is = c.openInputStream();
len = c.getLength();
if( len != -1) {
// Read exactly Content-Length bytes
for(int i =0 ; i < len ; i++ )
if((ch = is.read()) != -1) {
b.append((char) ch);
}
} else {
//Read until the connection is closed.
while ((ch = is.read()) != -1) {
len = is.available() ;
b.append((char)ch);
}
}
is.close();
c.close();
return b.toString();
}
Hope this will help you.

Related

Adding Object[] method to main()

I am trying to pass this method through my main() function but The loadMap already has the bufferreader so I am trying to use that rather than creating my own new buffer reader. How can I do this?
public static void main(String args[]) {
//throw exceptions here if args is empty
filename = args[0];
System.out.println(MapIO.loadMap(filename)[0]);
System.out.println(MapIO.loadMap(filename)[1]);
if (args.length < 1) {
System.err.println("Usage:\n" +"java CrawlGui mapname");
System.exit(1);
}
List<String> names=new LinkedList<String>();
try (BufferedReader reader = new BufferedReader(new FileReader(new
File(filename)))) {
String line;
while ((line = reader.readLine()) != null)
names.add(line);
System.out.println(names);
} catch (IOException e) {
e.printStackTrace();
}
MapIO.loadMap(filename);
launch(args);
}
/** Read information from a file created with saveMap
* #param filename Filename to read from
* #return null if unsucessful. If successful, an array of two Objects.
[0] being the Player object (if found) and
[1] being the start room.
* #detail. Do not add the player to the room they appear in, the caller
will be responsible for placing the player in the start room.
*/
public static Object[] loadMap(String filename) {
Player player = null;
try {
BufferedReader bf = new BufferedReader(
new FileReader(filename));
String line = bf.readLine();
int idcap = Integer.parseInt(line);
Room[] rooms = new Room[idcap];
for (int i = 0; i < idcap; ++i) {
line = bf.readLine();
if (line == null) {
return null;
}
rooms[i] = new Room(line);
}
for (int i = 0; i < idcap; ++i) { // for each room set up exits
line = bf.readLine();
int exitcount=Integer.parseInt(line);
for (int j=0; j < exitcount; ++j) {
line = bf.readLine();
if (line == null) {
return null;
}
int pos = line.indexOf(' ');
if (pos < 0) {
return null;
}
int target = Integer.parseInt(line.substring(0,pos));
String exname = line.substring(pos+1);
try {
rooms[i].addExit(exname, rooms[target]);
} catch (ExitExistsException e) {
return null;
} catch (NullRoomException e) {
return null;
}
}
}
for (int i = 0;i<idcap;++i) {
line = bf.readLine();
int itemcount = Integer.parseInt(line);
for (int j = 0; j < itemcount; ++j) {
line = bf.readLine();
if (line == null) {
return null;
}
Thing t = decodeThing(line, rooms[0]);
if (t == null) {
return null;
}
if (t instanceof Player) { // we don't add
player = (Player)t; // players to rooms
} else {
rooms[i].enter(t);
}
}
}
Object[] res = new Object[2];
res[0] = player;
res[1] = rooms[0];
return res;
} catch (IOException ex) {
return null;
} catch (IndexOutOfBoundsException ex) {
return null;
} catch (NumberFormatException nfe) {
return null;
}
}
You shouldn't do anything in main() other than call launch(). Move all the other startup code to your start() method. You can get the content of the args array using getParameters().getRaw():
#Override
public void start(Stage primaryStage) {
//throw exceptions here if args is empty
filename = getParameters().getRaw().get(0);
System.out.println(MapIO.loadMap(filename)[0]);
System.out.println(MapIO.loadMap(filename)[1]);
if (args.length < 1) {
System.err.println("Usage:\n" +"java CrawlGui mapname");
System.exit(1);
}
List<String> names=new LinkedList<String>();
try (BufferedReader reader = new BufferedReader(new FileReader(new
File(filename)))) {
String line;
while ((line = reader.readLine()) != null)
names.add(line);
System.out.println(names);
} catch (IOException e) {
e.printStackTrace();
}
Object[] whateverThisThingIs = MapIO.loadMap(filename);
// Now you have access to everything you need, at the point where you need it.
// existing start() code goes here...
}
public static void main(String args[]) {
launch(args);
}

FFMPEG AAC encoding causes audio to be lower in pitch

I built a sample application that encodes AAC (from PortAudio) into a MP4 container (no video stream).
The resulting audio is lower in pitch.
#include "stdafx.h"
#include "TestRecording.h"
#include "libffmpeg.h"
TestRecording::TestRecording()
{
}
TestRecording::~TestRecording()
{
}
struct RecordingContext
{
RecordingContext()
{
formatContext = NULL;
audioStream = NULL;
audioFrame = NULL;
audioFrameframeNumber = 0;
}
libffmpeg::AVFormatContext* formatContext;
libffmpeg::AVStream* audioStream;
libffmpeg::AVFrame* audioFrame;
int audioFrameframeNumber;
};
static int AudioRecordCallback(const void *inputBuffer, void *outputBuffer,
unsigned long framesPerBuffer,
const PaStreamCallbackTimeInfo* timeInfo,
PaStreamCallbackFlags statusFlags,
void *userData)
{
RecordingContext* recordingContext = (RecordingContext*)userData;
libffmpeg::avcodec_fill_audio_frame(recordingContext->audioFrame,
recordingContext->audioFrame->channels,
recordingContext->audioStream->codec->sample_fmt,
static_cast<const unsigned char*>(inputBuffer),
(framesPerBuffer * sizeof(float) * recordingContext->audioFrame->channels),
0);
libffmpeg::AVPacket pkt;
libffmpeg::av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
int gotpacket;
int result = avcodec_encode_audio2(recordingContext->audioStream->codec, &pkt, recordingContext->audioFrame, &gotpacket);
if (result < 0)
{
LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't encode the audio frame to acc");
return paContinue;
}
if (gotpacket)
{
pkt.stream_index = recordingContext->audioStream->index;
recordingContext->audioFrameframeNumber++;
// this codec requires no bitstream filter, just send it to the muxer!
result = libffmpeg::av_write_frame(recordingContext->formatContext, &pkt);
if (result < 0)
{
LOG(ERROR) << "Couldn't write the encoded audio frame";
libffmpeg::av_free_packet(&pkt);
return paContinue;
}
libffmpeg::av_free_packet(&pkt);
}
return paContinue;
}
static bool InitializeRecordingContext(RecordingContext* recordingContext)
{
int result = libffmpeg::avformat_alloc_output_context2(&recordingContext->formatContext, NULL, NULL, "C:\\Users\\Paul\\Desktop\\test.mp4");
if (result < 0)
{
LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't create output format context");
return false;
}
libffmpeg::AVCodec *audioCodec;
audioCodec = libffmpeg::avcodec_find_encoder(libffmpeg::AV_CODEC_ID_AAC);
if (audioCodec == NULL)
{
LOG(ERROR) << "Couldn't find the encoder for AAC";
}
recordingContext->audioStream = libffmpeg::avformat_new_stream(recordingContext->formatContext, audioCodec);
if (!recordingContext->audioStream)
{
LOG(ERROR) << "Couldn't create the audio stream";
return false;
}
recordingContext->audioStream->codec->bit_rate = 64000;
recordingContext->audioStream->codec->sample_fmt = libffmpeg::AV_SAMPLE_FMT_FLTP;
recordingContext->audioStream->codec->sample_rate = 48000;
recordingContext->audioStream->codec->channel_layout = AV_CH_LAYOUT_STEREO;
recordingContext->audioStream->codec->channels = libffmpeg::av_get_channel_layout_nb_channels(recordingContext->audioStream->codec->channel_layout);
recordingContext->audioStream->codecpar->bit_rate = recordingContext->audioStream->codec->bit_rate;
recordingContext->audioStream->codecpar->format = recordingContext->audioStream->codec->sample_fmt;
recordingContext->audioStream->codecpar->sample_rate = recordingContext->audioStream->codec->sample_rate;
recordingContext->audioStream->codecpar->channel_layout = recordingContext->audioStream->codec->channel_layout;
recordingContext->audioStream->codecpar->channels = recordingContext->audioStream->codec->channels;
result = libffmpeg::avcodec_open2(recordingContext->audioStream->codec, audioCodec, NULL);
if (result < 0)
{
LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't open the audio codec");
return false;
}
// create a new frame to store the audio samples
recordingContext->audioFrame = libffmpeg::av_frame_alloc();
if (!recordingContext->audioFrame)
{
LOG(ERROR) << "Couldn't alloce the output audio frame";
return false;
}
recordingContext->audioFrame->nb_samples = recordingContext->audioStream->codec->frame_size;
recordingContext->audioFrame->channel_layout = recordingContext->audioStream->codec->channel_layout;
recordingContext->audioFrame->channels = recordingContext->audioStream->codec->channels;
recordingContext->audioFrame->format = recordingContext->audioStream->codec->sample_fmt;
recordingContext->audioFrame->sample_rate = recordingContext->audioStream->codec->sample_rate;
result = libffmpeg::av_frame_get_buffer(recordingContext->audioFrame, 0);
if (result < 0)
{
LOG(ERROR) << "Coudln't initialize the output audio frame buffer";
return false;
}
// some formats want video_stream headers to be separate
if (!strcmp(recordingContext->formatContext->oformat->name, "mp4") || !strcmp(recordingContext->formatContext->oformat->name, "mov") || !strcmp(recordingContext->formatContext->oformat->name, "3gp"))
{
recordingContext->audioStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
// open the ouput file
if (!(recordingContext->formatContext->oformat->flags & AVFMT_NOFILE))
{
result = libffmpeg::avio_open(&recordingContext->formatContext->pb, recordingContext->formatContext->filename, AVIO_FLAG_WRITE);
if (result < 0)
{
LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't open the output file");
return false;
}
}
// write the stream headers
result = libffmpeg::avformat_write_header(recordingContext->formatContext, NULL);
if (result < 0)
{
LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't write the headers to the file");
return false;
}
return true;
}
static bool FinalizeRecordingContext(RecordingContext* recordingContext)
{
int result = 0;
// write the trailing information
if (recordingContext->formatContext->pb)
{
result = libffmpeg::av_write_trailer(recordingContext->formatContext);
if (result < 0)
{
LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't write the trailer information");
return false;
}
}
// close all the codes
for (int i = 0; i < (int)recordingContext->formatContext->nb_streams; i++)
{
result = libffmpeg::avcodec_close(recordingContext->formatContext->streams[i]->codec);
if (result < 0)
{
LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't close the codec");
return false;
}
}
// close the output file
if (recordingContext->formatContext->pb)
{
if (!(recordingContext->formatContext->oformat->flags & AVFMT_NOFILE))
{
result = libffmpeg::avio_close(recordingContext->formatContext->pb);
if (result < 0)
{
LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't close the output file");
return false;
}
}
}
// free the format context and all of its data
libffmpeg::avformat_free_context(recordingContext->formatContext);
recordingContext->formatContext = NULL;
recordingContext->audioStream = NULL;
if (recordingContext->audioFrame)
{
libffmpeg::av_frame_free(&recordingContext->audioFrame);
recordingContext->audioFrame = NULL;
}
return true;
}
int TestRecording::Test()
{
PaError result = paNoError;
result = Pa_Initialize();
if (result != paNoError) LOGINT_WITH_MESSAGE(ERROR, result, "Error initializing audio device framework");
RecordingContext recordingContext;
if (!InitializeRecordingContext(&recordingContext))
{
LOG(ERROR) << "Couldn't start recording file";
return 0;
}
auto defaultDevice = Pa_GetDefaultInputDevice();
auto deviceInfo = Pa_GetDeviceInfo(defaultDevice);
PaStreamParameters inputParameters;
inputParameters.device = defaultDevice;
inputParameters.channelCount = 2;
inputParameters.sampleFormat = paFloat32;
inputParameters.suggestedLatency = deviceInfo->defaultLowInputLatency;
inputParameters.hostApiSpecificStreamInfo = NULL;
PaStream* stream = NULL;
result = Pa_OpenStream(
&stream,
&inputParameters,
NULL,
48000,
1024,
paClipOff,
AudioRecordCallback,
&recordingContext);
if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't open the audio stream");
result = Pa_StartStream(stream);
if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't start the audio stream");
Sleep(1000 * 5);
result = Pa_StopStream(stream);
if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't stop the audio stream");
if (!FinalizeRecordingContext(&recordingContext)) LOG(ERROR) << "Couldn't stop recording file";
result = Pa_CloseStream(stream);
if (result != paNoError)LOGINT_WITH_MESSAGE(ERROR, result, "Couldn't stop the audio stream");
return 0;
}
Here is the stdout, in case it helps.
https://gist.github.com/pauldotknopf/9f24a604ce1f8a081aa68da1bf169e98
Why is the audio lower in pitch? I assume I am overlooking a parameter that needs to be configured between PortAudio and FFMPEG. Is there something super obvious that I am missing?

Optimizations for Solitaire SPOJ

Problem Code: SOLIT
Problem Link: http://www.spoj.com/problems/SOLIT/
I tried solving the SPOJ problem Solitaire. However, I ended up with a TLE (Time Limit Exceeded). My current solution is taking around 2 seconds to execute. I have no idea how to optimize my solution further in order to reduce the time. So, I would be grateful for any help in this regard.
Link to my solution: https://ideone.com/eySI91
import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.FileDescriptor;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.Queue;
import java.util.StringTokenizer;
class Solitaire {
enum Direction {
TOP, RIGHT, DOWN, LEFT;
};
static class Piece {
int row, col;
public Piece(int row, int col) {
this.row = row;
this.col = col;
}
#Override
public boolean equals(Object o)
{
if (!(o instanceof Piece))
return false;
Piece p = (Piece)o;
return (row==p.row && col==p.col);
}
#Override
public int hashCode()
{
return (row*10 + col)%11;
}
}
static class State {
HashSet<Piece> pieces;
public State() {
pieces = new HashSet<>(11);
}
public State(State s) {
pieces = new HashSet<>(11);
for (Piece p: s.pieces)
pieces.add(new Piece(p.row, p.col));
}
#Override
public boolean equals(Object o) {
if (!(o instanceof State))
return false;
State s = (State) o;
if (pieces.size()!=s.pieces.size())
return false;
for (Piece p: pieces)
{
if (!s.pieces.contains(p))
return false;
}
return true;
}
#Override
public int hashCode() {
final int MOD = 1000000007;
long code = 0;
for (Piece p: pieces) {
code = (code + p.hashCode())%MOD;
}
return (int) code;
}
#Override
public String toString()
{
String res = "";
for (Piece p: pieces)
res = res + " (" + p.row + ", " + p.col + ")";
return res;
}
public int getCloseness(State s)
{
int medianRow=0, medianCol=0, sMedianRow=0, sMedianCol=0;
for (Piece p: pieces)
{
medianRow+=p.row;
medianCol+=p.col;
}
medianRow/=4;
medianCol/=4;
for (Piece p: s.pieces)
{
sMedianRow+=p.row;
sMedianCol+=p.col;
}
sMedianRow/=4;
sMedianCol/=4;
int closeness = ((sMedianCol-medianCol)*(sMedianCol-medianCol)) + ((sMedianRow-medianRow)*(sMedianRow-medianRow));
return closeness;
}
}
static State makeMove(State curr, Piece piece, Direction dir, HashSet<State> visited) {
if (dir == Direction.TOP) {
if (piece.row==1)
return null;
if (curr.pieces.contains(new Piece(piece.row-1, piece.col)))
{
if (piece.row==2 || curr.pieces.contains(new Piece(piece.row-2, piece.col)))
return null;
else
{
State newState = new State(curr);
newState.pieces.remove(new Piece(piece.row, piece.col));
newState.pieces.add(new Piece(piece.row-2, piece.col));
if (visited.contains(newState))
return null;
else
return newState;
}
}
else
{
State newState = new State(curr);
newState.pieces.remove(new Piece(piece.row, piece.col));
newState.pieces.add(new Piece(piece.row-1, piece.col));
if (visited.contains(newState))
return null;
else
return newState;
}
}
else if (dir == Direction.RIGHT) {
if (piece.col==8)
return null;
if (curr.pieces.contains(new Piece(piece.row, piece.col+1)))
{
if (piece.col==7 || curr.pieces.contains(new Piece(piece.row, piece.col+2)))
return null;
else
{
State newState = new State(curr);
newState.pieces.remove(new Piece(piece.row, piece.col));
newState.pieces.add(new Piece(piece.row, piece.col+2));
if (visited.contains(newState))
return null;
else
return newState;
}
}
else
{
State newState = new State(curr);
newState.pieces.remove(new Piece(piece.row, piece.col));
newState.pieces.add(new Piece(piece.row, piece.col+1));
if (visited.contains(newState))
return null;
else
return newState;
}
}
else if (dir == Direction.DOWN) {
if (piece.row==8)
return null;
if (curr.pieces.contains(new Piece(piece.row+1, piece.col)))
{
if (piece.row==7 || curr.pieces.contains(new Piece(piece.row+2, piece.col)))
return null;
else
{
State newState = new State(curr);
newState.pieces.remove(new Piece(piece.row, piece.col));
newState.pieces.add(new Piece(piece.row+2, piece.col));
if (visited.contains(newState))
return null;
else
return newState;
}
}
else
{
State newState = new State(curr);
newState.pieces.remove(new Piece(piece.row, piece.col));
newState.pieces.add(new Piece(piece.row+1, piece.col));
if (visited.contains(newState))
return null;
else
return newState;
}
}
else // dir == Direction.LEFT
{
if (piece.col==1)
return null;
if (curr.pieces.contains(new Piece(piece.row, piece.col-1)))
{
if(piece.col==2 || curr.pieces.contains(new Piece(piece.row, piece.col-2)))
return null;
else
{
State newState = new State(curr);
newState.pieces.remove(new Piece(piece.row, piece.col));
newState.pieces.add(new Piece(piece.row, piece.col-2));
if (visited.contains(newState))
return null;
else
return newState;
}
}
else
{
State newState = new State(curr);
newState.pieces.remove(new Piece(piece.row, piece.col));
newState.pieces.add(new Piece(piece.row, piece.col-1));
if (visited.contains(newState))
return null;
else
return newState;
}
}
}
static boolean isReachableInEightMoves(State src, State target) {
Queue<State> q = new LinkedList<>();
HashSet<State> visited = new HashSet<>();
int closeness = src.getCloseness(target);
q.add(src);
int moves = 0;
while (!q.isEmpty() && moves <= 8) {
int levelNodes = q.size();
for (int i = 0; i < levelNodes; i++) {
State curr = q.remove();
if (curr.equals(target))
return true;
if (moves==8)
continue;
visited.add(curr);
for (Piece p: curr.pieces)
{
State newState = makeMove(curr, p, Direction.TOP, visited);
if (newState!=null)
{
int newCloseness = newState.getCloseness(target);
if (closeness>=newCloseness)
{
closeness=newCloseness;
visited.add(newState);
q.add(newState);
}
}
newState = makeMove(curr, p, Direction.RIGHT, visited);
if (newState!=null)
{
int newCloseness = newState.getCloseness(target);
if (closeness>=newCloseness)
{
closeness=newCloseness;
visited.add(newState);
q.add(newState);
}
}
newState = makeMove(curr, p, Direction.DOWN, visited);
if (newState!=null)
{
int newCloseness = newState.getCloseness(target);
if (closeness>=newCloseness)
{
closeness=newCloseness;
visited.add(newState);
q.add(newState);
}
}
newState = makeMove(curr, p, Direction.LEFT, visited);
if (newState!=null)
{
int newCloseness = newState.getCloseness(target);
if (closeness>=newCloseness)
{
closeness=newCloseness;
visited.add(newState);
q.add(newState);
}
}
}
}
moves++;
}
return false;
}
public static void main(String[] args) throws IOException {
BufferedWriter out = new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(FileDescriptor.out), "ASCII"));
CustomScanner sc = new CustomScanner();
int t = sc.nextInt();
long start = System.currentTimeMillis();
while (t-- > 0) {
State src = new State(), target = new State();
for (int i = 0; i < 4; i++) {
src.pieces.add(new Piece(sc.nextInt(), sc.nextInt()));
}
for (int i = 0; i < 4; i++) {
target.pieces.add(new Piece(sc.nextInt(), sc.nextInt()));
}
if (isReachableInEightMoves(src, target))
out.write("YES");
else
out.write("NO");
out.newLine();
}
long end = System.currentTimeMillis();
out.write("Time to execute = " + Double.toString((end-start)/1000d));
out.flush();
}
static class CustomScanner {
BufferedReader br;
StringTokenizer st;
public CustomScanner() {
br = new BufferedReader(new InputStreamReader(System.in));
}
private String next() {
while (st == null || !st.hasMoreElements()) {
try {
st = new StringTokenizer(br.readLine());
} catch (IOException e) {
e.printStackTrace();
}
}
return st.nextToken();
}
public int nextInt() {
return Integer.parseInt(next());
}
public long nextLong() {
return Long.parseLong(next());
}
public double nextDouble() {
return Double.parseDouble(next());
}
public String nextLine() {
String str = "";
try {
str = br.readLine();
} catch (IOException e) {
e.printStackTrace();
}
return str;
}
}
}
Some notes regarding the implementation:-
I am just doing a simple bfs traversal where each node is a state of
the board.
I have defined a function called getCloseness() which measures the closeness of two different states. It is basically the square of the distance between the centroids of the two states. A centroid of a state is the sum of all row values of each piece divided by 4 and the same for columns.
After calculating each state, I am checking if the closeness of this new state is lesser than or equal to the current closeness.
If it is not closer, then I will simply discard the new discovered state.
If it is closer, then I will update the closeness value and insert this new state into the Queue for future processing.
This process terminates when either the queue becomes empty or a state is discovered which is same as the target state.
The above approach takes approximately 1-3 seconds for cases where a minimum of 7 moves are required. I would be grateful if you can tell me how I can further optimize this solution.
The expected time according to the problem is 0.896s.

The .mp4 video does not play, which is created from ffmpeg library (not command line)

I use ffmpeg library to encode frames to a .mp4 video. The program runs smoothly without error. But the output .mp4 video does not play. Properties of the file does not even show it is a video file, no any information of video stream.
The related code is:
const char* ouVideoFileName = "output.mp4";
AVCodecID ouCodec_id = CODEC_ID_H264;
But if I change it to:
const char* ouVideoFileName = "output.avi";
AVCodecID ouCodec_id = CODEC_ID_H264;
The .avi video plays correctly.
What's wrong with .mp4 video?
You will need to share more of your code to find exact issue. Normally once you specify the container format in output file name, you will need to use av_guess_format to get the output format. After that you can use av_find_encoder for suggest codec_id
You will need to do something like this
AVFormatContext *m_outformat = NULL;
AVOutputFormat *outfmt = NULL;
std::string outfile = "clip_out.mp4";
outfmt = av_guess_format(NULL,outfile.c_str(),NULL);
if(outfmt == NULL)
{
ret = -1;
return ret;
}
else
{
m_outformat = avformat_alloc_context();
if(m_outformat)
{
m_outformat->oformat = outfmt;
_snprintf(m_outformat->filename, sizeof(m_outformat->filename), "%s", outfile.c_str());
}
else
{
ret = -1;
return ret;
}
}
AVCodec *out_vid_codec,*out_aud_codec;
out_vid_codec = out_aud_codec = NULL;
if(outfmt->video_codec != AV_CODEC_ID_NONE && m_in_vid_strm != NULL)
{
out_vid_codec = avcodec_find_encoder(outfmt->video_codec);
if(NULL == out_vid_codec)
{
PRINT_MSG("Could Not Find Vid Encoder")
ret = -1;
return ret;
}
else
{
PRINT_MSG("Found Out Vid Encoder ")
m_out_vid_strm = avformat_new_stream(m_outformat, out_vid_codec);
if(NULL == m_out_vid_strm)
{
PRINT_MSG("Failed to Allocate Output Vid Strm ")
ret = -1;
return ret;
}
else
{
PRINT_MSG("Allocated Video Stream ")
if(avcodec_copy_context(m_out_vid_strm->codec, m_informat->streams[m_in_vid_strm_idx]->codec) != 0)
{
PRINT_MSG("Failed to Copy Context ")
ret = -1;
return ret;
}
else
{
m_out_vid_strm->sample_aspect_ratio.den = m_out_vid_strm->codec->sample_aspect_ratio.den;
m_out_vid_strm->sample_aspect_ratio.num = m_in_vid_strm->codec->sample_aspect_ratio.num;
PRINT_MSG("Copied Context ")
m_out_vid_strm->codec->codec_id = m_in_vid_strm->codec->codec_id;
m_out_vid_strm->codec->time_base.num = 1;
m_out_vid_strm->codec->time_base.den = m_fps*(m_in_vid_strm->codec->ticks_per_frame);
m_out_vid_strm->time_base.num = 1;
m_out_vid_strm->time_base.den = 1000;
m_out_vid_strm->r_frame_rate.num = m_fps;
m_out_vid_strm->r_frame_rate.den = 1;
m_out_vid_strm->avg_frame_rate.den = 1;
m_out_vid_strm->avg_frame_rate.num = m_fps;
m_out_vid_strm->duration = (m_out_end_time - m_out_start_time)*1000;
}
}
}
}
if(outfmt->audio_codec != AV_CODEC_ID_NONE && m_in_aud_strm != NULL)
{
out_aud_codec = avcodec_find_encoder(outfmt->audio_codec);
if(NULL == out_aud_codec)
{
PRINT_MSG("Could Not Find Out Aud Encoder ")
ret = -1;
return ret;
}
else
{
PRINT_MSG("Found Out Aud Encoder ")
m_out_aud_strm = avformat_new_stream(m_outformat, out_aud_codec);
if(NULL == m_out_aud_strm)
{
PRINT_MSG("Failed to Allocate Out Vid Strm ")
ret = -1;
return ret;
}
else
{
if(avcodec_copy_context(m_out_aud_strm->codec, m_informat->streams[m_in_aud_strm_idx]->codec) != 0)
{
PRINT_MSG("Failed to Copy Context ")
ret = -1;
return ret;
}
else
{
PRINT_MSG("Copied Context ")
m_out_aud_strm->codec->codec_id = m_in_aud_strm->codec->codec_id;
m_out_aud_strm->codec->codec_tag = 0;
m_out_aud_strm->pts = m_in_aud_strm->pts;
m_out_aud_strm->duration = m_in_aud_strm->duration;
m_out_aud_strm->time_base.num = m_in_aud_strm->time_base.num;
m_out_aud_strm->time_base.den = m_in_aud_strm->time_base.den;
}
}
}
}
if (!(outfmt->flags & AVFMT_NOFILE))
{
if (avio_open2(&m_outformat->pb, outfile.c_str(), AVIO_FLAG_WRITE,NULL, NULL) < 0)
{
PRINT_VAL("Could Not Open File ", outfile)
ret = -1;
return ret;
}
}
/* Write the stream header, if any. */
if (avformat_write_header(m_outformat, NULL) < 0)
{
PRINT_VAL("Error Occurred While Writing Header ", outfile)
ret = -1;
return ret;
}
else
{
PRINT_MSG("Written Output header ")
m_init_done = true;
}
Now you can start the encoding of frames
I previously followed the example of decoding_encoding.c in FFmpeg documentation.
Later on, I followed the example of muxing.c, now it works!

Audio encoding using avcodec_fill_audio_frame() and memory leaks

As a part of encoding decoded audio packets, I'm using avcodec_fill_audio_frame(). I'm passing allocated AVFrame pointer to along with buffer containing the decoded samples and other parameters number of channels, sample format, buffer size. Though the encoding is working fine I'm not able to completely eliminate the memory leaks. I've taken care of most of things but still I'm not able detect the leakage.
Below is the function which I'm using for encoding. Please suggest something.
AudioSample contains decoded data and it is completely managed in different class(free in class destructor). I'm freeing the AVFrame in FFmpegEncoder destructor and AVPacket is freed every time using av_free_packet() with av_packet_destruct enabled. What more do I need to free?
void FfmpegEncoder::WriteAudioSample(AudioSample *audS)
{
int num_audio_frame = 0;
AVCodecContext *c = NULL;
// AVFrame *frame;
AVPacket pkt;
av_init_packet(&pkt);
pkt.destruct = av_destruct_packet;
pkt.data = NULL;
pkt.size = 0;
int ret = 0, got_packet = 0;
c = m_out_aud_strm->codec;
static int64_t aud_pts_in = -1;
if((audS != NULL) && (audS->GetSampleLength() > 0) )
{
int byte_per_sample = av_get_bytes_per_sample(c->sample_fmt);
PRINT_VAL("Byte Per Sample ", byte_per_sample)
m_frame->nb_samples = (audS->GetSampleLength())/(c->channels*av_get_bytes_per_sample(c->sample_fmt));
if(m_frame->nb_samples == c->frame_size)
{
#if 1
if(m_need_resample && (c->channels >= 2))
{
uint8_t * t_buff1 = new uint8_t[audS->GetSampleLength()];
if(t_buff1 != NULL)
{
for(int64_t i = 0; i< m_frame->nb_samples; i++)
{
memcpy(t_buff1 + i*byte_per_sample, (uint8_t*)((uint8_t*)audS->GetAudioSampleData() + i*byte_per_sample*c->channels), byte_per_sample);
memcpy(t_buff1 + (audS->GetSampleLength())/2 + i*byte_per_sample, (uint8_t*)((uint8_t*)audS->GetAudioSampleData() + i*byte_per_sample*c->channels+ byte_per_sample), byte_per_sample);
}
audS->FillAudioSample(t_buff1, audS->GetSampleLength());
delete[] t_buff1;
}
}
#endif
ret = avcodec_fill_audio_frame(m_frame, c->channels, c->sample_fmt, (uint8_t*)audS->GetAudioSampleData(),m_frame->nb_samples*byte_per_sample*c->channels, 0);
//ret = avcodec_fill_audio_frame(&frame, c->channels, c->sample_fmt, t_buff,frame.nb_samples*byte_per_sample*c->channels, 0);
if(ret != 0)
{
PRINT_MSG("Avcodec Fill Audio Failed ")
}
else
{
got_packet = 0;
ret = avcodec_encode_audio2(c, &pkt, m_frame, &got_packet);
if(ret < 0 || got_packet == 0)
{
PRINT_MSG("failed to encode audio ")
}
else
{
PRINT_MSG("Audio Packet Encoded ");
aud_pts_in++;
pkt.pts = aud_pts_in;
pkt.dts = pkt.pts;
pkt.stream_index = m_out_aud_strm->index;
ret = av_interleaved_write_frame(oc, &pkt);
if(ret != 0)
{
PRINT_MSG("Error Write Audio PKT ")
}
else
{
PRINT_MSG("Audio PKT Writen ")
}
}
}
}
avcodec_flush_buffers(c);
// avcodec_free_frame(&frame);
}
av_free_packet(&pkt);
}
Thanks,
Pradeep
//================== SEND AUDIO OUTPUT =======================
void AVOutputStream::sendAudioOutput (AVFrame* inputFrame)
{
AVCodecContext *codecCtx = pOutputAudioStream->codec;
// set source data variables
sourceNumberOfChannels = inputFrame->channels;
sourceChannelLayout = inputFrame->channel_layout;
sourceSampleRate = inputFrame->sample_rate;
_sourceSampleFormat = (AVSampleFormat)inputFrame->format;
sourceNumberOfSamples = inputFrame->nb_samples;
// set destination data variables
destinationNumberOfChannels = codecCtx->channels;
destinationChannelLayout = codecCtx->channel_layout;
destinationSampleRate = codecCtx->sample_rate;
destinationSampleFormat = codecCtx->sample_fmt;//AV_SAMPLE_FMT_FLTP;//EncodecCtx->sample_fmt;
destinationLineSize = 0;
destinationData = NULL;
int returnVal = 0;
if (startDecode == false)
{
startDecode = true;
resamplerCtx = swr_alloc_set_opts(NULL,
destinationChannelLayout,
destinationSampleFormat,
destinationSampleRate,
sourceChannelLayout,
_sourceSampleFormat,
sourceSampleRate,
0,
NULL);
if (resamplerCtx == NULL)
{
std::cout << "Unable to create the resampler context for the audio frame";
isConnected = false;
}
// initialize the resampling context
returnVal = swr_init(resamplerCtx);
if (returnVal < 0)
{
std::cout << "Unable to init the resampler context, error:";
isConnected = false;
}
} //if (startDecode == false)
if (sourceSampleRate != 0)
destinationNumberOfSamples = destinationSampleRate/sourceSampleRate * sourceNumberOfSamples;
// allocate the destination samples buffer
returnVal = av_samples_alloc_array_and_samples(&destinationData,
&destinationLineSize,
destinationNumberOfChannels,
destinationNumberOfSamples,
destinationSampleFormat,
0);
if (returnVal < 0)
{
std::cout << "Unable to allocate destination samples, error";
isConnected = false;
}
// convert to destination format
returnVal = swr_convert(resamplerCtx,
destinationData,
destinationNumberOfSamples,
(const uint8_t **)inputFrame->data, //sourceData,
sourceNumberOfSamples);
if (returnVal < 0)
{
std::cout << "Resampling failed, error \n";
isConnected = false;
}
int bufferSize = av_samples_get_buffer_size(&destinationLineSize,
destinationNumberOfChannels,
destinationNumberOfSamples,
destinationSampleFormat,
0);
//whithout fifo
pOutputAudioFrame = av_frame_alloc();
pOutputAudioFrame->nb_samples = codecCtx->frame_size;//frameNumberOfSamples;
pOutputAudioFrame->format = codecCtx->sample_fmt;
pOutputAudioFrame->channel_layout = codecCtx->channel_layout;
pOutputAudioFrame->channels = codecCtx->channels;
pOutputAudioFrame->sample_rate = codecCtx->sample_rate;
returnVal = avcodec_fill_audio_frame(pOutputAudioFrame,
pOutputAudioFrame->channels,
(AVSampleFormat)pOutputAudioFrame->format,
(const uint8_t *)destinationData[0],
bufferSize,0);
pOutputAudioFrame->pts = inputFrame->pts;
if (returnVal < 0)
{
std::cout << "Unable to fill the audio frame wsampleIndexith captured audio data,error";
isConnected = false;
}
// encode the audio frame, fill a packet for streaming
av_init_packet(&outAudioPacket);
outAudioPacket.data = NULL;
outAudioPacket.size = 0;
outAudioPacket.dts = outAudioPacket.pts = 0;
int gotPacket;
// encoding
returnVal = avcodec_encode_audio2(codecCtx, &outAudioPacket, pOutputAudioFrame, &gotPacket);
// free buffers
av_freep(&destinationData[0]);
av_freep(&destinationData);
av_frame_free(&pOutputAudioFrame);
if (gotPacket)
{
outAudioPacket.stream_index = pOutputAudioStream->index;
outAudioPacket.flags |= AV_PKT_FLAG_KEY;
returnVal = av_interleaved_write_frame(pOutputFormatCtx, &outAudioPacket);
//returnVal = av_write_frame(pOutputFormatCtx, &outAudioPacket);
if (returnVal != 0)
{
std::cout << "Cannot write audio packet \n";
isConnected = false;
}
av_free_packet(&outAudioPacket);
} // if (gotPacket)
}
You can see after resample i free used buffers.
// free buffers
av_freep(&destinationData[0]);
av_freep(&destinationData);

Resources