#include <stdlib.h>
#include <string.h>
+#include "libavutil/channel_layout.h"
#include "libavutil/frame.h"
#include "libavutil/mem.h"
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame,
FILE *outfile)
{
- int len, got_frame;
+ int16_t *interleave_buf;
+ int ret, data_size, i;
- while (pkt->size > 0) {
- len = avcodec_decode_audio4(dec_ctx, frame, &got_frame, pkt);
- if (len < 0) {
- fprintf(stderr, "Error while decoding\n");
+ /* send the packet with the compressed data to the decoder */
+ ret = avcodec_send_packet(dec_ctx, pkt);
+ if (ret < 0) {
+ fprintf(stderr, "Error submitting the packet to the decoder\n");
+ exit(1);
+ }
+
+ /* read all the output frames (in general there may be any number of them */
+ while (ret >= 0) {
+ ret = avcodec_receive_frame(dec_ctx, frame);
+ if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+ return;
+ else if (ret < 0) {
+ fprintf(stderr, "Error during decoding\n");
+ exit(1);
+ }
+
+ /* the stream parameters may change at any time, check that they are
+ * what we expect */
+ if (av_get_channel_layout_nb_channels(frame->channel_layout) != 2 ||
+ frame->format != AV_SAMPLE_FMT_S16P) {
+ fprintf(stderr, "Unsupported frame parameters\n");
exit(1);
}
- if (got_frame) {
- /* if a frame has been decoded, output it */
- int data_size = av_samples_get_buffer_size(NULL, dec_ctx->channels,
- frame->nb_samples,
- dec_ctx->sample_fmt, 1);
- fwrite(frame->data[0], 1, data_size, outfile);
+
+ /* The decoded data is signed 16-bit planar -- each channel in its own
+ * buffer. We interleave the two channels manually here, but using
+ * libavresample is recommended instead. */
+ data_size = sizeof(*interleave_buf) * 2 * frame->nb_samples;
+ interleave_buf = av_malloc(data_size);
+ if (!interleave_buf)
+ exit(1);
+
+ for (i = 0; i < frame->nb_samples; i++) {
+ interleave_buf[2 * i] = ((int16_t*)frame->data[0])[i];
+ interleave_buf[2 * i + 1] = ((int16_t*)frame->data[1])[i];
}
- pkt->size -= len;
- pkt->data += len;
+ fwrite(interleave_buf, 1, data_size, outfile);
+ av_freep(&interleave_buf);
}
}
uint8_t inbuf[AUDIO_INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
uint8_t *data;
size_t data_size;
- AVPacket avpkt;
+ AVPacket *pkt;
AVFrame *decoded_frame = NULL;
if (argc <= 2) {
/* register all the codecs */
avcodec_register_all();
- av_init_packet(&avpkt);
+ pkt = av_packet_alloc();
/* find the MPEG audio decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
}
}
- ret = av_parser_parse2(parser, c, &avpkt.data, &avpkt.size,
+ ret = av_parser_parse2(parser, c, &pkt->data, &pkt->size,
data, data_size,
AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
if (ret < 0) {
data += ret;
data_size -= ret;
- if (avpkt.size)
- decode(c, &avpkt, decoded_frame, outfile);
+ if (pkt->size)
+ decode(c, pkt, decoded_frame, outfile);
if (data_size < AUDIO_REFILL_THRESH) {
memmove(inbuf, data, data_size);
}
}
+ /* flush the decoder */
+ pkt->data = NULL;
+ pkt->size = 0;
+ decode(c, pkt, decoded_frame, outfile);
+
fclose(outfile);
fclose(f);
avcodec_free_context(&c);
av_parser_close(parser);
av_frame_free(&decoded_frame);
+ av_packet_free(&pkt);
return 0;
}