--- /dev/null
- #include <libavutil/imgutils.h>
+/*
+ * Copyright (c) 2001 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/**
+ * @file
+ * libavcodec API use example.
+ *
+ * Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
+ * not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
+ * format handling
+ */
+
+#include <math.h>
+
- int i, out_size, x, y, outbuf_size;
+#include <libavutil/opt.h>
+#include <libavcodec/avcodec.h>
+#include <libavutil/audioconvert.h>
++#include <libavutil/imgutils.h>
+#include <libavutil/mathematics.h>
+#include <libavutil/samplefmt.h>
+
+#define INBUF_SIZE 4096
+#define AUDIO_INBUF_SIZE 20480
+#define AUDIO_REFILL_THRESH 4096
+
+/* check that a given sample format is supported by the encoder */
+static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
+{
+ const enum AVSampleFormat *p = codec->sample_fmts;
+
+ while (*p != AV_SAMPLE_FMT_NONE) {
+ if (*p == sample_fmt)
+ return 1;
+ p++;
+ }
+ return 0;
+}
+
+/* just pick the highest supported samplerate */
+static int select_sample_rate(AVCodec *codec)
+{
+ const int *p;
+ int best_samplerate = 0;
+
+ if (!codec->supported_samplerates)
+ return 44100;
+
+ p = codec->supported_samplerates;
+ while (*p) {
+ best_samplerate = FFMAX(*p, best_samplerate);
+ p++;
+ }
+ return best_samplerate;
+}
+
+/* select layout with the highest channel count */
+static int select_channel_layout(AVCodec *codec)
+{
+ const uint64_t *p;
+ uint64_t best_ch_layout = 0;
+ int best_nb_channells = 0;
+
+ if (!codec->channel_layouts)
+ return AV_CH_LAYOUT_STEREO;
+
+ p = codec->channel_layouts;
+ while (*p) {
+ int nb_channels = av_get_channel_layout_nb_channels(*p);
+
+ if (nb_channels > best_nb_channells) {
+ best_ch_layout = *p;
+ best_nb_channells = nb_channels;
+ }
+ p++;
+ }
+ return best_ch_layout;
+}
+
+/*
+ * Audio encoding example
+ */
+static void audio_encode_example(const char *filename)
+{
+ AVCodec *codec;
+ AVCodecContext *c= NULL;
+ AVFrame *frame;
+ AVPacket pkt;
+ int i, j, k, ret, got_output;
+ int buffer_size;
+ FILE *f;
+ uint16_t *samples;
+ float t, tincr;
+
+ printf("Encode audio file %s\n", filename);
+
+ /* find the MP2 encoder */
+ codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
+ if (!codec) {
+ fprintf(stderr, "codec not found\n");
+ exit(1);
+ }
+
+ c = avcodec_alloc_context3(codec);
+
+ /* put sample parameters */
+ c->bit_rate = 64000;
+
+ /* check that the encoder supports s16 pcm input */
+ c->sample_fmt = AV_SAMPLE_FMT_S16;
+ if (!check_sample_fmt(codec, c->sample_fmt)) {
+ fprintf(stderr, "encoder does not support %s",
+ av_get_sample_fmt_name(c->sample_fmt));
+ exit(1);
+ }
+
+ /* select other audio parameters supported by the encoder */
+ c->sample_rate = select_sample_rate(codec);
+ c->channel_layout = select_channel_layout(codec);
+ c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
+
+ /* open it */
+ if (avcodec_open2(c, codec, NULL) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ exit(1);
+ }
+
+ f = fopen(filename, "wb");
+ if (!f) {
+ fprintf(stderr, "could not open %s\n", filename);
+ exit(1);
+ }
+
+ /* frame containing input raw audio */
+ frame = avcodec_alloc_frame();
+ if (!frame) {
+ fprintf(stderr, "could not allocate audio frame\n");
+ exit(1);
+ }
+
+ frame->nb_samples = c->frame_size;
+ frame->format = c->sample_fmt;
+ frame->channel_layout = c->channel_layout;
+
+ /* the codec gives us the frame size, in samples,
+ * we calculate the size of the samples buffer in bytes */
+ buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
+ c->sample_fmt, 0);
+ samples = av_malloc(buffer_size);
+ if (!samples) {
+ fprintf(stderr, "could not allocate %d bytes for samples buffer\n",
+ buffer_size);
+ exit(1);
+ }
+ /* setup the data pointers in the AVFrame */
+ ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
+ (const uint8_t*)samples, buffer_size, 0);
+ if (ret < 0) {
+ fprintf(stderr, "could not setup audio frame\n");
+ exit(1);
+ }
+
+ /* encode a single tone sound */
+ t = 0;
+ tincr = 2 * M_PI * 440.0 / c->sample_rate;
+ for(i=0;i<200;i++) {
+ av_init_packet(&pkt);
+ pkt.data = NULL; // packet data will be allocated by the encoder
+ pkt.size = 0;
+
+ for (j = 0; j < c->frame_size; j++) {
+ samples[2*j] = (int)(sin(t) * 10000);
+
+ for (k = 1; k < c->channels; k++)
+ samples[2*j + k] = samples[2*j];
+ t += tincr;
+ }
+ /* encode the samples */
+ ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
+ if (ret < 0) {
+ fprintf(stderr, "error encoding audio frame\n");
+ exit(1);
+ }
+ if (got_output) {
+ fwrite(pkt.data, 1, pkt.size, f);
+ av_free_packet(&pkt);
+ }
+ }
+ fclose(f);
+
+ av_freep(&samples);
+ av_freep(&frame);
+ avcodec_close(c);
+ av_free(c);
+}
+
+/*
+ * Audio decoding.
+ */
+static void audio_decode_example(const char *outfilename, const char *filename)
+{
+ AVCodec *codec;
+ AVCodecContext *c= NULL;
+ int len;
+ FILE *f, *outfile;
+ uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
+ AVPacket avpkt;
+ AVFrame *decoded_frame = NULL;
+
+ av_init_packet(&avpkt);
+
+ printf("Decode audio file %s\n", filename);
+
+ /* find the mpeg audio decoder */
+ codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
+ if (!codec) {
+ fprintf(stderr, "codec not found\n");
+ exit(1);
+ }
+
+ c = avcodec_alloc_context3(codec);
+
+ /* open it */
+ if (avcodec_open2(c, codec, NULL) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ exit(1);
+ }
+
+ f = fopen(filename, "rb");
+ if (!f) {
+ fprintf(stderr, "could not open %s\n", filename);
+ exit(1);
+ }
+ outfile = fopen(outfilename, "wb");
+ if (!outfile) {
+ av_free(c);
+ exit(1);
+ }
+
+ /* decode until eof */
+ avpkt.data = inbuf;
+ avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
+
+ while (avpkt.size > 0) {
+ int got_frame = 0;
+
+ if (!decoded_frame) {
+ if (!(decoded_frame = avcodec_alloc_frame())) {
+ fprintf(stderr, "out of memory\n");
+ exit(1);
+ }
+ } else
+ avcodec_get_frame_defaults(decoded_frame);
+
+ len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
+ if (len < 0) {
+ fprintf(stderr, "Error while decoding\n");
+ exit(1);
+ }
+ if (got_frame) {
+ /* if a frame has been decoded, output it */
+ int data_size = av_samples_get_buffer_size(NULL, c->channels,
+ decoded_frame->nb_samples,
+ c->sample_fmt, 1);
+ fwrite(decoded_frame->data[0], 1, data_size, outfile);
+ }
+ avpkt.size -= len;
+ avpkt.data += len;
+ avpkt.dts =
+ avpkt.pts = AV_NOPTS_VALUE;
+ if (avpkt.size < AUDIO_REFILL_THRESH) {
+ /* Refill the input buffer, to avoid trying to decode
+ * incomplete frames. Instead of this, one could also use
+ * a parser, or use a proper container format through
+ * libavformat. */
+ memmove(inbuf, avpkt.data, avpkt.size);
+ avpkt.data = inbuf;
+ len = fread(avpkt.data + avpkt.size, 1,
+ AUDIO_INBUF_SIZE - avpkt.size, f);
+ if (len > 0)
+ avpkt.size += len;
+ }
+ }
+
+ fclose(outfile);
+ fclose(f);
+
+ avcodec_close(c);
+ av_free(c);
+ av_free(decoded_frame);
+}
+
+/*
+ * Video encoding example
+ */
+static void video_encode_example(const char *filename, int codec_id)
+{
+ AVCodec *codec;
+ AVCodecContext *c= NULL;
- uint8_t *outbuf;
- int had_output=0;
++ int i, ret, x, y, got_output;
+ FILE *f;
+ AVFrame *picture;
- /* alloc image and output buffer */
- outbuf_size = 100000 + 12*c->width*c->height;
- outbuf = malloc(outbuf_size);
-
++ AVPacket pkt;
++ uint8_t endcode[] = { 0, 0, 1, 0xb7 };
+
+ printf("Encode video file %s\n", filename);
+
+ /* find the mpeg1 video encoder */
+ codec = avcodec_find_encoder(codec_id);
+ if (!codec) {
+ fprintf(stderr, "codec not found\n");
+ exit(1);
+ }
+
+ c = avcodec_alloc_context3(codec);
+ picture= avcodec_alloc_frame();
+
+ /* put sample parameters */
+ c->bit_rate = 400000;
+ /* resolution must be a multiple of two */
+ c->width = 352;
+ c->height = 288;
+ /* frames per second */
+ c->time_base= (AVRational){1,25};
+ c->gop_size = 10; /* emit one intra frame every ten frames */
+ c->max_b_frames=1;
+ c->pix_fmt = PIX_FMT_YUV420P;
+
+ if(codec_id == AV_CODEC_ID_H264)
+ av_opt_set(c->priv_data, "preset", "slow", 0);
+
+ /* open it */
+ if (avcodec_open2(c, codec, NULL) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ exit(1);
+ }
+
+ f = fopen(filename, "wb");
+ if (!f) {
+ fprintf(stderr, "could not open %s\n", filename);
+ exit(1);
+ }
+
- av_image_alloc(picture->data, picture->linesize,
- c->width, c->height, c->pix_fmt, 1);
+ /* the image can be allocated by any means and av_image_alloc() is
+ * just the most convenient way if av_malloc() is to be used */
- out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
- had_output |= out_size;
- printf("encoding frame %3d (size=%5d)\n", i, out_size);
- fwrite(outbuf, 1, out_size, f);
++ ret = av_image_alloc(picture->data, picture->linesize, c->width, c->height,
++ c->pix_fmt, 32);
++ if (ret < 0) {
++ fprintf(stderr, "could not alloc raw picture buffer\n");
++ exit(1);
++ }
++
++ picture->format = c->pix_fmt;
++ picture->width = c->width;
++ picture->height = c->height;
+
+ /* encode 1 second of video */
+ for(i=0;i<25;i++) {
++ av_init_packet(&pkt);
++ pkt.data = NULL; // packet data will be allocated by the encoder
++ pkt.size = 0;
++
+ fflush(stdout);
+ /* prepare a dummy image */
+ /* Y */
+ for(y=0;y<c->height;y++) {
+ for(x=0;x<c->width;x++) {
+ picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
+ }
+ }
+
+ /* Cb and Cr */
+ for(y=0;y<c->height/2;y++) {
+ for(x=0;x<c->width/2;x++) {
+ picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
+ picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
+ }
+ }
+
++ picture->pts = i;
++
+ /* encode the image */
- for(; out_size || !had_output; i++) {
++ ret = avcodec_encode_video2(c, &pkt, picture, &got_output);
++ if (ret < 0) {
++ fprintf(stderr, "error encoding frame\n");
++ exit(1);
++ }
++
++ if (got_output) {
++ printf("encoding frame %3d (size=%5d)\n", i, pkt.size);
++ fwrite(pkt.data, 1, pkt.size, f);
++ av_free_packet(&pkt);
++ }
+ }
+
+ /* get the delayed frames */
- out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
- had_output |= out_size;
- printf("write frame %3d (size=%5d)\n", i, out_size);
- fwrite(outbuf, 1, out_size, f);
++ for (got_output = 1; got_output; i++) {
+ fflush(stdout);
+
- outbuf[0] = 0x00;
- outbuf[1] = 0x00;
- outbuf[2] = 0x01;
- outbuf[3] = 0xb7;
- fwrite(outbuf, 1, 4, f);
++ ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
++ if (ret < 0) {
++ fprintf(stderr, "error encoding frame\n");
++ exit(1);
++ }
++
++ if (got_output) {
++ printf("write frame %3d (size=%5d)\n", i, pkt.size);
++ fwrite(pkt.data, 1, pkt.size, f);
++ av_free_packet(&pkt);
++ }
+ }
+
+ /* add sequence end code to have a real mpeg file */
- free(outbuf);
++ fwrite(endcode, 1, sizeof(endcode), f);
+ fclose(f);
- av_free(picture->data[0]);
+
+ avcodec_close(c);
+ av_free(c);
++ av_freep(&picture->data[0]);
+ av_free(picture);
+ printf("\n");
+}
+
+/*
+ * Video decoding example
+ */
+
+static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
+ char *filename)
+{
+ FILE *f;
+ int i;
+
+ f=fopen(filename,"w");
+ fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255);
+ for(i=0;i<ysize;i++)
+ fwrite(buf + i * wrap,1,xsize,f);
+ fclose(f);
+}
+
+static void video_decode_example(const char *outfilename, const char *filename)
+{
+ AVCodec *codec;
+ AVCodecContext *c= NULL;
+ int frame, got_picture, len;
+ FILE *f;
+ AVFrame *picture;
+ uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
+ char buf[1024];
+ AVPacket avpkt;
+
+ av_init_packet(&avpkt);
+
+ /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
+ memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
+
+ printf("Decode video file %s\n", filename);
+
+ /* find the mpeg1 video decoder */
+ codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
+ if (!codec) {
+ fprintf(stderr, "codec not found\n");
+ exit(1);
+ }
+
+ c = avcodec_alloc_context3(codec);
+ picture= avcodec_alloc_frame();
+
+ if(codec->capabilities&CODEC_CAP_TRUNCATED)
+ c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
+
+ /* For some codecs, such as msmpeg4 and mpeg4, width and height
+ MUST be initialized there because this information is not
+ available in the bitstream. */
+
+ /* open it */
+ if (avcodec_open2(c, codec, NULL) < 0) {
+ fprintf(stderr, "could not open codec\n");
+ exit(1);
+ }
+
+ /* the codec gives us the frame size, in samples */
+
+ f = fopen(filename, "rb");
+ if (!f) {
+ fprintf(stderr, "could not open %s\n", filename);
+ exit(1);
+ }
+
+ frame = 0;
+ for(;;) {
+ avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
+ if (avpkt.size == 0)
+ break;
+
+ /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
+ and this is the only method to use them because you cannot
+ know the compressed data size before analysing it.
+
+ BUT some other codecs (msmpeg4, mpeg4) are inherently frame
+ based, so you must call them with all the data for one
+ frame exactly. You must also initialize 'width' and
+ 'height' before initializing them. */
+
+ /* NOTE2: some codecs allow the raw parameters (frame size,
+ sample rate) to be changed at any frame. We handle this, so
+ you should also take care of it */
+
+ /* here, we use a stream based decoder (mpeg1video), so we
+ feed decoder and see if it could decode a frame */
+ avpkt.data = inbuf;
+ while (avpkt.size > 0) {
+ len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
+ if (len < 0) {
+ fprintf(stderr, "Error while decoding frame %d\n", frame);
+ exit(1);
+ }
+ if (got_picture) {
+ printf("saving frame %3d\n", frame);
+ fflush(stdout);
+
+ /* the picture is allocated by the decoder. no need to
+ free it */
+ snprintf(buf, sizeof(buf), outfilename, frame);
+ pgm_save(picture->data[0], picture->linesize[0],
+ c->width, c->height, buf);
+ frame++;
+ }
+ avpkt.size -= len;
+ avpkt.data += len;
+ }
+ }
+
+ /* some codecs, such as MPEG, transmit the I and P frame with a
+ latency of one frame. You must do the following to have a
+ chance to get the last frame of the video */
+ avpkt.data = NULL;
+ avpkt.size = 0;
+ len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
+ if (got_picture) {
+ printf("saving last frame %3d\n", frame);
+ fflush(stdout);
+
+ /* the picture is allocated by the decoder. no need to
+ free it */
+ snprintf(buf, sizeof(buf), outfilename, frame);
+ pgm_save(picture->data[0], picture->linesize[0],
+ c->width, c->height, buf);
+ frame++;
+ }
+
+ fclose(f);
+
+ avcodec_close(c);
+ av_free(c);
+ av_free(picture);
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ const char *filename;
+
+ /* register all the codecs */
+ avcodec_register_all();
+
+ if (argc <= 1) {
+ audio_encode_example("/tmp/test.mp2");
+ audio_decode_example("/tmp/test.sw", "/tmp/test.mp2");
+
+ video_encode_example("/tmp/test.h264", AV_CODEC_ID_H264);
+ video_encode_example("/tmp/test.mpg", AV_CODEC_ID_MPEG1VIDEO);
+ filename = "/tmp/test.mpg";
+ } else {
+ filename = argv[1];
+ }
+
+ // audio_decode_example("/tmp/test.sw", filename);
+ video_decode_example("/tmp/test%d.pgm", filename);
+
+ return 0;
+}
--- /dev/null
- int num_cblocks, pwidth, linesize, line_offset;
+/*
+ * Apple ProRes encoder
+ *
+ * Copyright (c) 2012 Konstantin Shishkov
+ *
+ * This encoder appears to be based on Anatoliy Wassermans considering
+ * similarities in the bugs.
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/opt.h"
+#include "avcodec.h"
+#include "put_bits.h"
+#include "bytestream.h"
+#include "internal.h"
+#include "proresdsp.h"
+#include "proresdata.h"
+
+#define CFACTOR_Y422 2
+#define CFACTOR_Y444 3
+
+#define MAX_MBS_PER_SLICE 8
+
+#define MAX_PLANES 3 // should be increased to 4 when there's PIX_FMT_YUV444AP10
+
+enum {
+ PRORES_PROFILE_PROXY = 0,
+ PRORES_PROFILE_LT,
+ PRORES_PROFILE_STANDARD,
+ PRORES_PROFILE_HQ,
+};
+
+enum {
+ QUANT_MAT_PROXY = 0,
+ QUANT_MAT_LT,
+ QUANT_MAT_STANDARD,
+ QUANT_MAT_HQ,
+ QUANT_MAT_DEFAULT,
+};
+
+static const uint8_t prores_quant_matrices[][64] = {
+ { // proxy
+ 4, 7, 9, 11, 13, 14, 15, 63,
+ 7, 7, 11, 12, 14, 15, 63, 63,
+ 9, 11, 13, 14, 15, 63, 63, 63,
+ 11, 11, 13, 14, 63, 63, 63, 63,
+ 11, 13, 14, 63, 63, 63, 63, 63,
+ 13, 14, 63, 63, 63, 63, 63, 63,
+ 13, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63, 63, 63, 63, 63,
+ },
+ { // LT
+ 4, 5, 6, 7, 9, 11, 13, 15,
+ 5, 5, 7, 8, 11, 13, 15, 17,
+ 6, 7, 9, 11, 13, 15, 15, 17,
+ 7, 7, 9, 11, 13, 15, 17, 19,
+ 7, 9, 11, 13, 14, 16, 19, 23,
+ 9, 11, 13, 14, 16, 19, 23, 29,
+ 9, 11, 13, 15, 17, 21, 28, 35,
+ 11, 13, 16, 17, 21, 28, 35, 41,
+ },
+ { // standard
+ 4, 4, 5, 5, 6, 7, 7, 9,
+ 4, 4, 5, 6, 7, 7, 9, 9,
+ 5, 5, 6, 7, 7, 9, 9, 10,
+ 5, 5, 6, 7, 7, 9, 9, 10,
+ 5, 6, 7, 7, 8, 9, 10, 12,
+ 6, 7, 7, 8, 9, 10, 12, 15,
+ 6, 7, 7, 9, 10, 11, 14, 17,
+ 7, 7, 9, 10, 11, 14, 17, 21,
+ },
+ { // high quality
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 5,
+ 4, 4, 4, 4, 4, 4, 5, 5,
+ 4, 4, 4, 4, 4, 5, 5, 6,
+ 4, 4, 4, 4, 5, 5, 6, 7,
+ 4, 4, 4, 4, 5, 6, 7, 7,
+ },
+ { // codec default
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 4, 4,
+ },
+};
+
+#define NUM_MB_LIMITS 4
+static const int prores_mb_limits[NUM_MB_LIMITS] = {
+ 1620, // up to 720x576
+ 2700, // up to 960x720
+ 6075, // up to 1440x1080
+ 9216, // up to 2048x1152
+};
+
+static const struct prores_profile {
+ const char *full_name;
+ uint32_t tag;
+ int min_quant;
+ int max_quant;
+ int br_tab[NUM_MB_LIMITS];
+ int quant;
+} prores_profile_info[4] = {
+ {
+ .full_name = "proxy",
+ .tag = MKTAG('a', 'p', 'c', 'o'),
+ .min_quant = 4,
+ .max_quant = 8,
+ .br_tab = { 300, 242, 220, 194 },
+ .quant = QUANT_MAT_PROXY,
+ },
+ {
+ .full_name = "LT",
+ .tag = MKTAG('a', 'p', 'c', 's'),
+ .min_quant = 1,
+ .max_quant = 9,
+ .br_tab = { 720, 560, 490, 440 },
+ .quant = QUANT_MAT_LT,
+ },
+ {
+ .full_name = "standard",
+ .tag = MKTAG('a', 'p', 'c', 'n'),
+ .min_quant = 1,
+ .max_quant = 6,
+ .br_tab = { 1050, 808, 710, 632 },
+ .quant = QUANT_MAT_STANDARD,
+ },
+ {
+ .full_name = "high quality",
+ .tag = MKTAG('a', 'p', 'c', 'h'),
+ .min_quant = 1,
+ .max_quant = 6,
+ .br_tab = { 1566, 1216, 1070, 950 },
+ .quant = QUANT_MAT_HQ,
+ }
+// for 4444 profile bitrate numbers are { 2350, 1828, 1600, 1425 }
+};
+
+#define TRELLIS_WIDTH 16
+#define SCORE_LIMIT INT_MAX / 2
+
+struct TrellisNode {
+ int prev_node;
+ int quant;
+ int bits;
+ int score;
+};
+
+#define MAX_STORED_Q 16
+
+typedef struct ProresThreadData {
+ DECLARE_ALIGNED(16, DCTELEM, blocks)[MAX_PLANES][64 * 4 * MAX_MBS_PER_SLICE];
+ DECLARE_ALIGNED(16, uint16_t, emu_buf)[16 * 16];
+ int16_t custom_q[64];
+ struct TrellisNode *nodes;
+} ProresThreadData;
+
+typedef struct ProresContext {
+ AVClass *class;
+ DECLARE_ALIGNED(16, DCTELEM, blocks)[MAX_PLANES][64 * 4 * MAX_MBS_PER_SLICE];
+ DECLARE_ALIGNED(16, uint16_t, emu_buf)[16*16];
+ int16_t quants[MAX_STORED_Q][64];
+ int16_t custom_q[64];
+ const uint8_t *quant_mat;
+
+ ProresDSPContext dsp;
+ ScanTable scantable;
+
+ int mb_width, mb_height;
+ int mbs_per_slice;
+ int num_chroma_blocks, chroma_factor;
+ int slices_width;
+ int slices_per_picture;
+ int pictures_per_frame; // 1 for progressive, 2 for interlaced
+ int cur_picture_idx;
+ int num_planes;
+ int bits_per_mb;
+ int force_quant;
+
+ char *vendor;
+ int quant_sel;
+
+ int frame_size_upper_bound;
+
+ int profile;
+ const struct prores_profile *profile_info;
+
+ int *slice_q;
+
+ ProresThreadData *tdata;
+} ProresContext;
+
+static void get_slice_data(ProresContext *ctx, const uint16_t *src,
+ int linesize, int x, int y, int w, int h,
+ DCTELEM *blocks, uint16_t *emu_buf,
+ int mbs_per_slice, int blocks_per_mb, int is_chroma)
+{
+ const uint16_t *esrc;
+ const int mb_width = 4 * blocks_per_mb;
+ int elinesize;
+ int i, j, k;
+
+ for (i = 0; i < mbs_per_slice; i++, src += mb_width) {
+ if (x >= w) {
+ memset(blocks, 0, 64 * (mbs_per_slice - i) * blocks_per_mb
+ * sizeof(*blocks));
+ return;
+ }
+ if (x + mb_width <= w && y + 16 <= h) {
+ esrc = src;
+ elinesize = linesize;
+ } else {
+ int bw, bh, pix;
+
+ esrc = emu_buf;
+ elinesize = 16 * sizeof(*emu_buf);
+
+ bw = FFMIN(w - x, mb_width);
+ bh = FFMIN(h - y, 16);
+
+ for (j = 0; j < bh; j++) {
+ memcpy(emu_buf + j * 16,
+ (const uint8_t*)src + j * linesize,
+ bw * sizeof(*src));
+ pix = emu_buf[j * 16 + bw - 1];
+ for (k = bw; k < mb_width; k++)
+ emu_buf[j * 16 + k] = pix;
+ }
+ for (; j < 16; j++)
+ memcpy(emu_buf + j * 16,
+ emu_buf + (bh - 1) * 16,
+ mb_width * sizeof(*emu_buf));
+ }
+ if (!is_chroma) {
+ ctx->dsp.fdct(esrc, elinesize, blocks);
+ blocks += 64;
+ if (blocks_per_mb > 2) {
+ ctx->dsp.fdct(esrc + 8, elinesize, blocks);
+ blocks += 64;
+ }
+ ctx->dsp.fdct(esrc + elinesize * 4, elinesize, blocks);
+ blocks += 64;
+ if (blocks_per_mb > 2) {
+ ctx->dsp.fdct(esrc + elinesize * 4 + 8, elinesize, blocks);
+ blocks += 64;
+ }
+ } else {
+ ctx->dsp.fdct(esrc, elinesize, blocks);
+ blocks += 64;
+ ctx->dsp.fdct(esrc + elinesize * 4, elinesize, blocks);
+ blocks += 64;
+ if (blocks_per_mb > 2) {
+ ctx->dsp.fdct(esrc + 8, elinesize, blocks);
+ blocks += 64;
+ ctx->dsp.fdct(esrc + elinesize * 4 + 8, elinesize, blocks);
+ blocks += 64;
+ }
+ }
+
+ x += mb_width;
+ }
+}
+
+/**
+ * Write an unsigned rice/exp golomb codeword.
+ */
+static inline void encode_vlc_codeword(PutBitContext *pb, unsigned codebook, int val)
+{
+ unsigned int rice_order, exp_order, switch_bits, switch_val;
+ int exponent;
+
+ /* number of prefix bits to switch between Rice and expGolomb */
+ switch_bits = (codebook & 3) + 1;
+ rice_order = codebook >> 5; /* rice code order */
+ exp_order = (codebook >> 2) & 7; /* exp golomb code order */
+
+ switch_val = switch_bits << rice_order;
+
+ if (val >= switch_val) {
+ val -= switch_val - (1 << exp_order);
+ exponent = av_log2(val);
+
+ put_bits(pb, exponent - exp_order + switch_bits, 0);
+ put_bits(pb, exponent + 1, val);
+ } else {
+ exponent = val >> rice_order;
+
+ if (exponent)
+ put_bits(pb, exponent, 0);
+ put_bits(pb, 1, 1);
+ if (rice_order)
+ put_sbits(pb, rice_order, val);
+ }
+}
+
+#define GET_SIGN(x) ((x) >> 31)
+#define MAKE_CODE(x) (((x) << 1) ^ GET_SIGN(x))
+
+static void encode_dcs(PutBitContext *pb, DCTELEM *blocks,
+ int blocks_per_slice, int scale)
+{
+ int i;
+ int codebook = 3, code, dc, prev_dc, delta, sign, new_sign;
+
+ prev_dc = (blocks[0] - 0x4000) / scale;
+ encode_vlc_codeword(pb, FIRST_DC_CB, MAKE_CODE(prev_dc));
+ sign = 0;
+ codebook = 3;
+ blocks += 64;
+
+ for (i = 1; i < blocks_per_slice; i++, blocks += 64) {
+ dc = (blocks[0] - 0x4000) / scale;
+ delta = dc - prev_dc;
+ new_sign = GET_SIGN(delta);
+ delta = (delta ^ sign) - sign;
+ code = MAKE_CODE(delta);
+ encode_vlc_codeword(pb, ff_prores_dc_codebook[codebook], code);
+ codebook = (code + (code & 1)) >> 1;
+ codebook = FFMIN(codebook, 3);
+ sign = new_sign;
+ prev_dc = dc;
+ }
+}
+
+static void encode_acs(PutBitContext *pb, DCTELEM *blocks,
+ int blocks_per_slice,
+ int plane_size_factor,
+ const uint8_t *scan, const int16_t *qmat)
+{
+ int idx, i;
+ int run, level, run_cb, lev_cb;
+ int max_coeffs, abs_level;
+
+ max_coeffs = blocks_per_slice << 6;
+ run_cb = ff_prores_run_to_cb_index[4];
+ lev_cb = ff_prores_lev_to_cb_index[2];
+ run = 0;
+
+ for (i = 1; i < 64; i++) {
+ for (idx = scan[i]; idx < max_coeffs; idx += 64) {
+ level = blocks[idx] / qmat[scan[i]];
+ if (level) {
+ abs_level = FFABS(level);
+ encode_vlc_codeword(pb, ff_prores_ac_codebook[run_cb], run);
+ encode_vlc_codeword(pb, ff_prores_ac_codebook[lev_cb],
+ abs_level - 1);
+ put_sbits(pb, 1, GET_SIGN(level));
+
+ run_cb = ff_prores_run_to_cb_index[FFMIN(run, 15)];
+ lev_cb = ff_prores_lev_to_cb_index[FFMIN(abs_level, 9)];
+ run = 0;
+ } else {
+ run++;
+ }
+ }
+ }
+}
+
+static int encode_slice_plane(ProresContext *ctx, PutBitContext *pb,
+ const uint16_t *src, int linesize,
+ int mbs_per_slice, DCTELEM *blocks,
+ int blocks_per_mb, int plane_size_factor,
+ const int16_t *qmat)
+{
+ int blocks_per_slice, saved_pos;
+
+ saved_pos = put_bits_count(pb);
+ blocks_per_slice = mbs_per_slice * blocks_per_mb;
+
+ encode_dcs(pb, blocks, blocks_per_slice, qmat[0]);
+ encode_acs(pb, blocks, blocks_per_slice, plane_size_factor,
+ ctx->scantable.permutated, qmat);
+ flush_put_bits(pb);
+
+ return (put_bits_count(pb) - saved_pos) >> 3;
+}
+
+static int encode_slice(AVCodecContext *avctx, const AVFrame *pic,
+ PutBitContext *pb,
+ int sizes[4], int x, int y, int quant,
+ int mbs_per_slice)
+{
+ ProresContext *ctx = avctx->priv_data;
+ int i, xp, yp;
+ int total_size = 0;
+ const uint16_t *src;
+ int slice_width_factor = av_log2(mbs_per_slice);
- line_offset = ((ctx->cur_picture_idx ^ !pic->top_field_first) &
- (ctx->pictures_per_frame - 1)) * pic->linesize[i];
++ int num_cblocks, pwidth, linesize, line_add;
+ int plane_factor, is_chroma;
+ uint16_t *qmat;
+
++ if (ctx->pictures_per_frame == 1)
++ line_add = 0;
++ else
++ line_add = ctx->cur_picture_idx ^ !pic->top_field_first;
++
+ if (ctx->force_quant) {
+ qmat = ctx->quants[0];
+ } else if (quant < MAX_STORED_Q) {
+ qmat = ctx->quants[quant];
+ } else {
+ qmat = ctx->custom_q;
+ for (i = 0; i < 64; i++)
+ qmat[i] = ctx->quant_mat[i] * quant;
+ }
+
+ for (i = 0; i < ctx->num_planes; i++) {
+ is_chroma = (i == 1 || i == 2);
+ plane_factor = slice_width_factor + 2;
+ if (is_chroma)
+ plane_factor += ctx->chroma_factor - 3;
+ if (!is_chroma || ctx->chroma_factor == CFACTOR_Y444) {
+ xp = x << 4;
+ yp = y << 4;
+ num_cblocks = 4;
+ pwidth = avctx->width;
+ } else {
+ xp = x << 3;
+ yp = y << 4;
+ num_cblocks = 2;
+ pwidth = avctx->width >> 1;
+ }
+
- src = (const uint16_t*)(pic->data[i] + yp * linesize + line_offset) + xp;
+ linesize = pic->linesize[i] * ctx->pictures_per_frame;
- ctx->blocks[0], ctx->emu_buf, mbs_per_slice,
- num_cblocks, is_chroma);
++ src = (const uint16_t*)(pic->data[i] + yp * linesize +
++ line_add * pic->linesize[i]) + xp;
+
+ get_slice_data(ctx, src, linesize, xp, yp,
+ pwidth, avctx->height / ctx->pictures_per_frame,
- int linesize[4], line_offset;
++ ctx->blocks[0], ctx->emu_buf,
++ mbs_per_slice, num_cblocks, is_chroma);
+ sizes[i] = encode_slice_plane(ctx, pb, src, linesize,
+ mbs_per_slice, ctx->blocks[0],
+ num_cblocks, plane_factor,
+ qmat);
+ total_size += sizes[i];
+ }
+ return total_size;
+}
+
+static inline int estimate_vlc(unsigned codebook, int val)
+{
+ unsigned int rice_order, exp_order, switch_bits, switch_val;
+ int exponent;
+
+ /* number of prefix bits to switch between Rice and expGolomb */
+ switch_bits = (codebook & 3) + 1;
+ rice_order = codebook >> 5; /* rice code order */
+ exp_order = (codebook >> 2) & 7; /* exp golomb code order */
+
+ switch_val = switch_bits << rice_order;
+
+ if (val >= switch_val) {
+ val -= switch_val - (1 << exp_order);
+ exponent = av_log2(val);
+
+ return exponent * 2 - exp_order + switch_bits + 1;
+ } else {
+ return (val >> rice_order) + rice_order + 1;
+ }
+}
+
+static int estimate_dcs(int *error, DCTELEM *blocks, int blocks_per_slice,
+ int scale)
+{
+ int i;
+ int codebook = 3, code, dc, prev_dc, delta, sign, new_sign;
+ int bits;
+
+ prev_dc = (blocks[0] - 0x4000) / scale;
+ bits = estimate_vlc(FIRST_DC_CB, MAKE_CODE(prev_dc));
+ sign = 0;
+ codebook = 3;
+ blocks += 64;
+ *error += FFABS(blocks[0] - 0x4000) % scale;
+
+ for (i = 1; i < blocks_per_slice; i++, blocks += 64) {
+ dc = (blocks[0] - 0x4000) / scale;
+ *error += FFABS(blocks[0] - 0x4000) % scale;
+ delta = dc - prev_dc;
+ new_sign = GET_SIGN(delta);
+ delta = (delta ^ sign) - sign;
+ code = MAKE_CODE(delta);
+ bits += estimate_vlc(ff_prores_dc_codebook[codebook], code);
+ codebook = (code + (code & 1)) >> 1;
+ codebook = FFMIN(codebook, 3);
+ sign = new_sign;
+ prev_dc = dc;
+ }
+
+ return bits;
+}
+
+static int estimate_acs(int *error, DCTELEM *blocks, int blocks_per_slice,
+ int plane_size_factor,
+ const uint8_t *scan, const int16_t *qmat)
+{
+ int idx, i;
+ int run, level, run_cb, lev_cb;
+ int max_coeffs, abs_level;
+ int bits = 0;
+
+ max_coeffs = blocks_per_slice << 6;
+ run_cb = ff_prores_run_to_cb_index[4];
+ lev_cb = ff_prores_lev_to_cb_index[2];
+ run = 0;
+
+ for (i = 1; i < 64; i++) {
+ for (idx = scan[i]; idx < max_coeffs; idx += 64) {
+ level = blocks[idx] / qmat[scan[i]];
+ *error += FFABS(blocks[idx]) % qmat[scan[i]];
+ if (level) {
+ abs_level = FFABS(level);
+ bits += estimate_vlc(ff_prores_ac_codebook[run_cb], run);
+ bits += estimate_vlc(ff_prores_ac_codebook[lev_cb],
+ abs_level - 1) + 1;
+
+ run_cb = ff_prores_run_to_cb_index[FFMIN(run, 15)];
+ lev_cb = ff_prores_lev_to_cb_index[FFMIN(abs_level, 9)];
+ run = 0;
+ } else {
+ run++;
+ }
+ }
+ }
+
+ return bits;
+}
+
+static int estimate_slice_plane(ProresContext *ctx, int *error, int plane,
+ const uint16_t *src, int linesize,
+ int mbs_per_slice,
+ int blocks_per_mb, int plane_size_factor,
+ const int16_t *qmat, ProresThreadData *td)
+{
+ int blocks_per_slice;
+ int bits;
+
+ blocks_per_slice = mbs_per_slice * blocks_per_mb;
+
+ bits = estimate_dcs(error, td->blocks[plane], blocks_per_slice, qmat[0]);
+ bits += estimate_acs(error, td->blocks[plane], blocks_per_slice,
+ plane_size_factor, ctx->scantable.permutated, qmat);
+
+ return FFALIGN(bits, 8);
+}
+
+static int find_slice_quant(AVCodecContext *avctx, const AVFrame *pic,
+ int trellis_node, int x, int y, int mbs_per_slice,
+ ProresThreadData *td)
+{
+ ProresContext *ctx = avctx->priv_data;
+ int i, q, pq, xp, yp;
+ const uint16_t *src;
+ int slice_width_factor = av_log2(mbs_per_slice);
+ int num_cblocks[MAX_PLANES], pwidth;
+ int plane_factor[MAX_PLANES], is_chroma[MAX_PLANES];
+ const int min_quant = ctx->profile_info->min_quant;
+ const int max_quant = ctx->profile_info->max_quant;
+ int error, bits, bits_limit;
+ int mbs, prev, cur, new_score;
+ int slice_bits[TRELLIS_WIDTH], slice_score[TRELLIS_WIDTH];
+ int overquant;
+ uint16_t *qmat;
- line_offset = ((ctx->cur_picture_idx ^ !pic->top_field_first) &
- (ctx->pictures_per_frame - 1)) * pic->linesize[i];
++ int linesize[4], line_add;
+
++ if (ctx->pictures_per_frame == 1)
++ line_add = 0;
++ else
++ line_add = ctx->cur_picture_idx ^ !pic->top_field_first;
+ mbs = x + mbs_per_slice;
+
+ for (i = 0; i < ctx->num_planes; i++) {
+ is_chroma[i] = (i == 1 || i == 2);
+ plane_factor[i] = slice_width_factor + 2;
+ if (is_chroma[i])
+ plane_factor[i] += ctx->chroma_factor - 3;
+ if (!is_chroma[i] || ctx->chroma_factor == CFACTOR_Y444) {
+ xp = x << 4;
+ yp = y << 4;
+ num_cblocks[i] = 4;
+ pwidth = avctx->width;
+ } else {
+ xp = x << 3;
+ yp = y << 4;
+ num_cblocks[i] = 2;
+ pwidth = avctx->width >> 1;
+ }
+
- src = (const uint16_t*)(pic->data[i] + yp * linesize[i] + line_offset) + xp;
+ linesize[i] = pic->linesize[i] * ctx->pictures_per_frame;
- td->blocks[i], td->emu_buf, mbs_per_slice,
- num_cblocks[i], is_chroma[i]);
++ src = (const uint16_t*)(pic->data[i] + yp * linesize[i] +
++ line_add * pic->linesize[i]) + xp;
+
+ get_slice_data(ctx, src, linesize[i], xp, yp,
+ pwidth, avctx->height / ctx->pictures_per_frame,
- if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
++ td->blocks[i], td->emu_buf,
++ mbs_per_slice, num_cblocks[i], is_chroma[i]);
+ }
+
+ for (q = min_quant; q < max_quant + 2; q++) {
+ td->nodes[trellis_node + q].prev_node = -1;
+ td->nodes[trellis_node + q].quant = q;
+ }
+
+ // todo: maybe perform coarser quantising to fit into frame size when needed
+ for (q = min_quant; q <= max_quant; q++) {
+ bits = 0;
+ error = 0;
+ for (i = 0; i < ctx->num_planes; i++) {
+ bits += estimate_slice_plane(ctx, &error, i,
+ src, linesize[i],
+ mbs_per_slice,
+ num_cblocks[i], plane_factor[i],
+ ctx->quants[q], td);
+ }
+ if (bits > 65000 * 8) {
+ error = SCORE_LIMIT;
+ break;
+ }
+ slice_bits[q] = bits;
+ slice_score[q] = error;
+ }
+ if (slice_bits[max_quant] <= ctx->bits_per_mb * mbs_per_slice) {
+ slice_bits[max_quant + 1] = slice_bits[max_quant];
+ slice_score[max_quant + 1] = slice_score[max_quant] + 1;
+ overquant = max_quant;
+ } else {
+ for (q = max_quant + 1; q < 128; q++) {
+ bits = 0;
+ error = 0;
+ if (q < MAX_STORED_Q) {
+ qmat = ctx->quants[q];
+ } else {
+ qmat = td->custom_q;
+ for (i = 0; i < 64; i++)
+ qmat[i] = ctx->quant_mat[i] * q;
+ }
+ for (i = 0; i < ctx->num_planes; i++) {
+ bits += estimate_slice_plane(ctx, &error, i,
+ src, linesize[i],
+ mbs_per_slice,
+ num_cblocks[i], plane_factor[i],
+ qmat, td);
+ }
+ if (bits <= ctx->bits_per_mb * mbs_per_slice)
+ break;
+ }
+
+ slice_bits[max_quant + 1] = bits;
+ slice_score[max_quant + 1] = error;
+ overquant = q;
+ }
+ td->nodes[trellis_node + max_quant + 1].quant = overquant;
+
+ bits_limit = mbs * ctx->bits_per_mb;
+ for (pq = min_quant; pq < max_quant + 2; pq++) {
+ prev = trellis_node - TRELLIS_WIDTH + pq;
+
+ for (q = min_quant; q < max_quant + 2; q++) {
+ cur = trellis_node + q;
+
+ bits = td->nodes[prev].bits + slice_bits[q];
+ error = slice_score[q];
+ if (bits > bits_limit)
+ error = SCORE_LIMIT;
+
+ if (td->nodes[prev].score < SCORE_LIMIT && error < SCORE_LIMIT)
+ new_score = td->nodes[prev].score + error;
+ else
+ new_score = SCORE_LIMIT;
+ if (td->nodes[cur].prev_node == -1 ||
+ td->nodes[cur].score >= new_score) {
+
+ td->nodes[cur].bits = bits;
+ td->nodes[cur].score = new_score;
+ td->nodes[cur].prev_node = prev;
+ }
+ }
+ }
+
+ error = td->nodes[trellis_node + min_quant].score;
+ pq = trellis_node + min_quant;
+ for (q = min_quant + 1; q < max_quant + 2; q++) {
+ if (td->nodes[trellis_node + q].score <= error) {
+ error = td->nodes[trellis_node + q].score;
+ pq = trellis_node + q;
+ }
+ }
+
+ return pq;
+}
+
+static int find_quant_thread(AVCodecContext *avctx, void *arg,
+ int jobnr, int threadnr)
+{
+ ProresContext *ctx = avctx->priv_data;
+ ProresThreadData *td = ctx->tdata + threadnr;
+ int mbs_per_slice = ctx->mbs_per_slice;
+ int x, y = jobnr, mb, q = 0;
+
+ for (x = mb = 0; x < ctx->mb_width; x += mbs_per_slice, mb++) {
+ while (ctx->mb_width - x < mbs_per_slice)
+ mbs_per_slice >>= 1;
+ q = find_slice_quant(avctx, avctx->coded_frame,
+ (mb + 1) * TRELLIS_WIDTH, x, y,
+ mbs_per_slice, td);
+ }
+
+ for (x = ctx->slices_width - 1; x >= 0; x--) {
+ ctx->slice_q[x + y * ctx->slices_width] = td->nodes[q].quant;
+ q = td->nodes[q].prev_node;
+ }
+
+ return 0;
+}
+
+static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+ const AVFrame *pic, int *got_packet)
+{
+ ProresContext *ctx = avctx->priv_data;
+ uint8_t *orig_buf, *buf, *slice_hdr, *slice_sizes, *tmp;
+ uint8_t *picture_size_pos;
+ PutBitContext pb;
+ int x, y, i, mb, q = 0;
+ int sizes[4] = { 0 };
+ int slice_hdr_size = 2 + 2 * (ctx->num_planes - 1);
+ int frame_size, picture_size, slice_size;
+ int pkt_size, ret;
+ uint8_t frame_flags;
+
+ *avctx->coded_frame = *pic;
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+ avctx->coded_frame->key_frame = 1;
+
+ pkt_size = ctx->frame_size_upper_bound + FF_MIN_BUFFER_SIZE;
+
+ if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size)) < 0)
+ return ret;
+
+ orig_buf = pkt->data;
+
+ // frame atom
+ orig_buf += 4; // frame size
+ bytestream_put_be32 (&orig_buf, FRAME_ID); // frame container ID
+ buf = orig_buf;
+
+ // frame header
+ tmp = buf;
+ buf += 2; // frame header size will be stored here
+ bytestream_put_be16 (&buf, 0); // version 1
+ bytestream_put_buffer(&buf, ctx->vendor, 4);
+ bytestream_put_be16 (&buf, avctx->width);
+ bytestream_put_be16 (&buf, avctx->height);
+
+ frame_flags = ctx->chroma_factor << 6;
- }
++ if (avctx->flags & CODEC_FLAG_INTERLACED_DCT)
+ frame_flags |= pic->top_field_first ? 0x04 : 0x08;
- for (ctx->cur_picture_idx = 0; ctx->cur_picture_idx < ctx->pictures_per_frame; ++ctx->cur_picture_idx) {
+ bytestream_put_byte (&buf, frame_flags);
+
+ bytestream_put_byte (&buf, 0); // reserved
+ bytestream_put_byte (&buf, avctx->color_primaries);
+ bytestream_put_byte (&buf, avctx->color_trc);
+ bytestream_put_byte (&buf, avctx->colorspace);
+ bytestream_put_byte (&buf, 0x40); // source format and alpha information
+ bytestream_put_byte (&buf, 0); // reserved
+ if (ctx->quant_sel != QUANT_MAT_DEFAULT) {
+ bytestream_put_byte (&buf, 0x03); // matrix flags - both matrices are present
+ // luma quantisation matrix
+ for (i = 0; i < 64; i++)
+ bytestream_put_byte(&buf, ctx->quant_mat[i]);
+ // chroma quantisation matrix
+ for (i = 0; i < 64; i++)
+ bytestream_put_byte(&buf, ctx->quant_mat[i]);
+ } else {
+ bytestream_put_byte (&buf, 0x00); // matrix flags - default matrices are used
+ }
+ bytestream_put_be16 (&tmp, buf - orig_buf); // write back frame header size
+
-
++ for (ctx->cur_picture_idx = 0;
++ ctx->cur_picture_idx < ctx->pictures_per_frame;
++ ctx->cur_picture_idx++) {
+ // picture header
+ picture_size_pos = buf + 1;
+ bytestream_put_byte (&buf, 0x40); // picture header size (in bits)
+ buf += 4; // picture data size will be stored here
+ bytestream_put_be16 (&buf, ctx->slices_per_picture);
+ bytestream_put_byte (&buf, av_log2(ctx->mbs_per_slice) << 4); // slice width and height in MBs
+
+ // seek table - will be filled during slice encoding
+ slice_sizes = buf;
+ buf += ctx->slices_per_picture * 2;
+
+ // slices
+ if (!ctx->force_quant) {
+ ret = avctx->execute2(avctx, find_quant_thread, NULL, NULL,
+ ctx->mb_height);
+ if (ret)
+ return ret;
+ }
+
+ for (y = 0; y < ctx->mb_height; y++) {
+ int mbs_per_slice = ctx->mbs_per_slice;
+ for (x = mb = 0; x < ctx->mb_width; x += mbs_per_slice, mb++) {
+ q = ctx->force_quant ? ctx->force_quant
+ : ctx->slice_q[mb + y * ctx->slices_width];
+
+ while (ctx->mb_width - x < mbs_per_slice)
+ mbs_per_slice >>= 1;
+
+ bytestream_put_byte(&buf, slice_hdr_size << 3);
+ slice_hdr = buf;
+ buf += slice_hdr_size - 1;
+ init_put_bits(&pb, buf, (pkt_size - (buf - orig_buf)) * 8);
+ encode_slice(avctx, pic, &pb, sizes, x, y, q, mbs_per_slice);
+
+ bytestream_put_byte(&slice_hdr, q);
+ slice_size = slice_hdr_size + sizes[ctx->num_planes - 1];
+ for (i = 0; i < ctx->num_planes - 1; i++) {
+ bytestream_put_be16(&slice_hdr, sizes[i]);
+ slice_size += sizes[i];
+ }
+ bytestream_put_be16(&slice_sizes, slice_size);
+ buf += slice_size - slice_hdr_size;
+ }
+ }
+
+ picture_size = buf - (picture_size_pos - 1);
+ bytestream_put_be32(&picture_size_pos, picture_size);
+ }
+
+ orig_buf -= 8;
+ frame_size = buf - orig_buf;
+ bytestream_put_be32(&orig_buf, frame_size);
+
- if (prores_mb_limits[i] >= ctx->mb_width * ctx->mb_height * ctx->pictures_per_frame)
+ pkt->size = frame_size;
+ pkt->flags |= AV_PKT_FLAG_KEY;
+ *got_packet = 1;
+
+ return 0;
+}
+
+static av_cold int encode_close(AVCodecContext *avctx)
+{
+ ProresContext *ctx = avctx->priv_data;
+ int i;
+
+ av_freep(&avctx->coded_frame);
+
+ if (ctx->tdata) {
+ for (i = 0; i < avctx->thread_count; i++)
+ av_free(ctx->tdata[i].nodes);
+ }
+ av_freep(&ctx->tdata);
+ av_freep(&ctx->slice_q);
+
+ return 0;
+}
+
+static av_cold int encode_init(AVCodecContext *avctx)
+{
+ ProresContext *ctx = avctx->priv_data;
+ int mps;
+ int i, j;
+ int min_quant, max_quant;
+ int interlaced = !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
+
+ avctx->bits_per_raw_sample = 10;
+ avctx->coded_frame = avcodec_alloc_frame();
+ if (!avctx->coded_frame)
+ return AVERROR(ENOMEM);
+
+ ff_proresdsp_init(&ctx->dsp, avctx);
+ ff_init_scantable(ctx->dsp.dct_permutation, &ctx->scantable,
+ interlaced ? ff_prores_interlaced_scan
+ : ff_prores_progressive_scan);
+
+ mps = ctx->mbs_per_slice;
+ if (mps & (mps - 1)) {
+ av_log(avctx, AV_LOG_ERROR,
+ "there should be an integer power of two MBs per slice\n");
+ return AVERROR(EINVAL);
+ }
+
+ ctx->chroma_factor = avctx->pix_fmt == PIX_FMT_YUV422P10
+ ? CFACTOR_Y422
+ : CFACTOR_Y444;
+ ctx->profile_info = prores_profile_info + ctx->profile;
+ ctx->num_planes = 3;
+
+ ctx->mb_width = FFALIGN(avctx->width, 16) >> 4;
+
+ if (interlaced)
+ ctx->mb_height = FFALIGN(avctx->height, 32) >> 5;
+ else
+ ctx->mb_height = FFALIGN(avctx->height, 16) >> 4;
+
+ ctx->slices_width = ctx->mb_width / mps;
+ ctx->slices_width += av_popcount(ctx->mb_width - ctx->slices_width * mps);
+ ctx->slices_per_picture = ctx->mb_height * ctx->slices_width;
+ ctx->pictures_per_frame = 1 + interlaced;
+
+ if (ctx->quant_sel == -1)
+ ctx->quant_mat = prores_quant_matrices[ctx->profile_info->quant];
+ else
+ ctx->quant_mat = prores_quant_matrices[ctx->quant_sel];
+
+ if (strlen(ctx->vendor) != 4) {
+ av_log(avctx, AV_LOG_ERROR, "vendor ID should be 4 bytes\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ ctx->force_quant = avctx->global_quality / FF_QP2LAMBDA;
+ if (!ctx->force_quant) {
+ if (!ctx->bits_per_mb) {
+ for (i = 0; i < NUM_MB_LIMITS - 1; i++)
- (mps * ctx->bits_per_mb) / 8) + 200;
++ if (prores_mb_limits[i] >= ctx->mb_width * ctx->mb_height *
++ ctx->pictures_per_frame)
+ break;
+ ctx->bits_per_mb = ctx->profile_info->br_tab[i];
+ } else if (ctx->bits_per_mb < 128) {
+ av_log(avctx, AV_LOG_ERROR, "too few bits per MB, please set at least 128\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ min_quant = ctx->profile_info->min_quant;
+ max_quant = ctx->profile_info->max_quant;
+ for (i = min_quant; i < MAX_STORED_Q; i++) {
+ for (j = 0; j < 64; j++)
+ ctx->quants[i][j] = ctx->quant_mat[j] * i;
+ }
+
+ ctx->slice_q = av_malloc(ctx->slices_per_picture * sizeof(*ctx->slice_q));
+ if (!ctx->slice_q) {
+ encode_close(avctx);
+ return AVERROR(ENOMEM);
+ }
+
+ ctx->tdata = av_mallocz(avctx->thread_count * sizeof(*ctx->tdata));
+ if (!ctx->tdata) {
+ encode_close(avctx);
+ return AVERROR(ENOMEM);
+ }
+
+ for (j = 0; j < avctx->thread_count; j++) {
+ ctx->tdata[j].nodes = av_malloc((ctx->slices_width + 1)
+ * TRELLIS_WIDTH
+ * sizeof(*ctx->tdata->nodes));
+ if (!ctx->tdata[j].nodes) {
+ encode_close(avctx);
+ return AVERROR(ENOMEM);
+ }
+ for (i = min_quant; i < max_quant + 2; i++) {
+ ctx->tdata[j].nodes[i].prev_node = -1;
+ ctx->tdata[j].nodes[i].bits = 0;
+ ctx->tdata[j].nodes[i].score = 0;
+ }
+ }
+ } else {
+ int ls = 0;
+
+ if (ctx->force_quant > 64) {
+ av_log(avctx, AV_LOG_ERROR, "too large quantiser, maximum is 64\n");
+ return AVERROR_INVALIDDATA;
+ }
+
+ for (j = 0; j < 64; j++) {
+ ctx->quants[0][j] = ctx->quant_mat[j] * ctx->force_quant;
+ ls += av_log2((1 << 11) / ctx->quants[0][j]) * 2 + 1;
+ }
+
+ ctx->bits_per_mb = ls * 8;
+ if (ctx->chroma_factor == CFACTOR_Y444)
+ ctx->bits_per_mb += ls * 4;
+ if (ctx->num_planes == 4)
+ ctx->bits_per_mb += ls * 4;
+ }
+
+ ctx->frame_size_upper_bound = ctx->pictures_per_frame *
+ ctx->slices_per_picture *
+ (2 + 2 * ctx->num_planes +
- av_log(avctx, AV_LOG_DEBUG, "profile %d, %d slices/pic, %d pics/frame, %d bits per MB\n",
- ctx->profile, ctx->slices_per_picture, ctx->pictures_per_frame, ctx->bits_per_mb);
++ (mps * ctx->bits_per_mb) / 8)
++ + 200;
+
+ avctx->codec_tag = ctx->profile_info->tag;
+
++ av_log(avctx, AV_LOG_DEBUG,
++ "profile %d, %d slices, interlacing: %s, %d bits per MB\n",
++ ctx->profile, ctx->slices_per_picture * ctx->pictures_per_frame,
++ interlaced ? "yes" : "no", ctx->bits_per_mb);
+ av_log(avctx, AV_LOG_DEBUG, "frame size upper bound: %d\n",
+ ctx->frame_size_upper_bound);
+
+ return 0;
+}
+
+#define OFFSET(x) offsetof(ProresContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+
+static const AVOption options[] = {
+ { "mbs_per_slice", "macroblocks per slice", OFFSET(mbs_per_slice),
+ AV_OPT_TYPE_INT, { 8 }, 1, MAX_MBS_PER_SLICE, VE },
+ { "profile", NULL, OFFSET(profile), AV_OPT_TYPE_INT,
+ { PRORES_PROFILE_STANDARD },
+ PRORES_PROFILE_PROXY, PRORES_PROFILE_HQ, VE, "profile" },
+ { "proxy", NULL, 0, AV_OPT_TYPE_CONST, { PRORES_PROFILE_PROXY },
+ 0, 0, VE, "profile" },
+ { "lt", NULL, 0, AV_OPT_TYPE_CONST, { PRORES_PROFILE_LT },
+ 0, 0, VE, "profile" },
+ { "standard", NULL, 0, AV_OPT_TYPE_CONST, { PRORES_PROFILE_STANDARD },
+ 0, 0, VE, "profile" },
+ { "hq", NULL, 0, AV_OPT_TYPE_CONST, { PRORES_PROFILE_HQ },
+ 0, 0, VE, "profile" },
+ { "vendor", "vendor ID", OFFSET(vendor),
+ AV_OPT_TYPE_STRING, { .str = "Lavc" }, CHAR_MIN, CHAR_MAX, VE },
+ { "bits_per_mb", "desired bits per macroblock", OFFSET(bits_per_mb),
+ AV_OPT_TYPE_INT, { 0 }, 0, 8192, VE },
+ { "quant_mat", "quantiser matrix", OFFSET(quant_sel), AV_OPT_TYPE_INT,
+ { -1 }, -1, QUANT_MAT_DEFAULT, VE, "quant_mat" },
+ { "auto", NULL, 0, AV_OPT_TYPE_CONST, { -1 },
+ 0, 0, VE, "quant_mat" },
+ { "proxy", NULL, 0, AV_OPT_TYPE_CONST, { QUANT_MAT_PROXY },
+ 0, 0, VE, "quant_mat" },
+ { "lt", NULL, 0, AV_OPT_TYPE_CONST, { QUANT_MAT_LT },
+ 0, 0, VE, "quant_mat" },
+ { "standard", NULL, 0, AV_OPT_TYPE_CONST, { QUANT_MAT_STANDARD },
+ 0, 0, VE, "quant_mat" },
+ { "hq", NULL, 0, AV_OPT_TYPE_CONST, { QUANT_MAT_HQ },
+ 0, 0, VE, "quant_mat" },
+ { "default", NULL, 0, AV_OPT_TYPE_CONST, { QUANT_MAT_DEFAULT },
+ 0, 0, VE, "quant_mat" },
+ { NULL }
+};
+
+static const AVClass proresenc_class = {
+ .class_name = "ProRes encoder",
+ .item_name = av_default_item_name,
+ .option = options,
+ .version = LIBAVUTIL_VERSION_INT,
+};
+
+AVCodec ff_prores_kostya_encoder = {
+ .name = "prores_kostya",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_PRORES,
+ .priv_data_size = sizeof(ProresContext),
+ .init = encode_init,
+ .close = encode_close,
+ .encode2 = encode_frame,
+ .capabilities = CODEC_CAP_SLICE_THREADS,
+ .long_name = NULL_IF_CONFIG_SMALL("Apple ProRes (iCodec Pro)"),
+ .pix_fmts = (const enum PixelFormat[]) {
+ PIX_FMT_YUV422P10, PIX_FMT_YUV444P10, PIX_FMT_NONE
+ },
+ .priv_class = &proresenc_class,
+};
#include <windows.h>
#include <process.h>
+#include "libavutil/common.h"
+ #include "libavutil/internal.h"
+ #include "libavutil/mem.h"
typedef struct {
void *handle;
x86/simple_idct_mmx.o \
MMX-OBJS-$(CONFIG_AAC_DECODER) += x86/sbrdsp_init.o
- MMX-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp_mmx.o
+ MMX-OBJS-$(CONFIG_AC3DSP) += x86/ac3dsp_init.o
MMX-OBJS-$(CONFIG_CAVS_DECODER) += x86/cavsdsp_mmx.o
MMX-OBJS-$(CONFIG_DNXHD_ENCODER) += x86/dnxhd_mmx.o
-MMX-OBJS-$(CONFIG_DWT) += x86/snowdsp_mmx.o
+MMX-OBJS-$(CONFIG_DWT) += x86/snowdsp_mmx.o \
+ x86/dwt.o
MMX-OBJS-$(CONFIG_ENCODERS) += x86/dsputilenc_mmx.o
- MMX-OBJS-$(CONFIG_FFT) += x86/fft.o
+ MMX-OBJS-$(CONFIG_FFT) += x86/fft_init.o
+MMX-OBJS-$(CONFIG_GPL) += x86/idct_mmx.o
- MMX-OBJS-$(CONFIG_H264DSP) += x86/h264dsp_mmx.o
+ MMX-OBJS-$(CONFIG_H264DSP) += x86/h264dsp_init.o
MMX-OBJS-$(CONFIG_H264PRED) += x86/h264_intrapred_init.o
MMX-OBJS-$(CONFIG_LPC) += x86/lpc_mmx.o
MMX-OBJS-$(CONFIG_MPEGAUDIODSP) += x86/mpegaudiodec_mmx.o
--- /dev/null
- * This file is part of Libav.
+ /*
+ * x86-optimized AC-3 DSP utils
+ * Copyright (c) 2011 Justin Ruggles
+ *
- * Libav is free software; you can redistribute it and/or
++ * This file is part of FFmpeg.
+ *
- * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
- * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+ #include "libavutil/x86/asm.h"
+ #include "dsputil_mmx.h"
+ #include "libavcodec/ac3dsp.h"
+
+ extern void ff_ac3_exponent_min_mmx (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
+ extern void ff_ac3_exponent_min_mmxext(uint8_t *exp, int num_reuse_blocks, int nb_coefs);
+ extern void ff_ac3_exponent_min_sse2 (uint8_t *exp, int num_reuse_blocks, int nb_coefs);
+
+ extern int ff_ac3_max_msb_abs_int16_mmx (const int16_t *src, int len);
+ extern int ff_ac3_max_msb_abs_int16_mmx2 (const int16_t *src, int len);
+ extern int ff_ac3_max_msb_abs_int16_sse2 (const int16_t *src, int len);
+ extern int ff_ac3_max_msb_abs_int16_ssse3(const int16_t *src, int len);
+
+ extern void ff_ac3_lshift_int16_mmx (int16_t *src, unsigned int len, unsigned int shift);
+ extern void ff_ac3_lshift_int16_sse2(int16_t *src, unsigned int len, unsigned int shift);
+
+ extern void ff_ac3_rshift_int32_mmx (int32_t *src, unsigned int len, unsigned int shift);
+ extern void ff_ac3_rshift_int32_sse2(int32_t *src, unsigned int len, unsigned int shift);
+
+ extern void ff_float_to_fixed24_3dnow(int32_t *dst, const float *src, unsigned int len);
+ extern void ff_float_to_fixed24_sse (int32_t *dst, const float *src, unsigned int len);
+ extern void ff_float_to_fixed24_sse2 (int32_t *dst, const float *src, unsigned int len);
+
+ extern int ff_ac3_compute_mantissa_size_sse2(uint16_t mant_cnt[6][16]);
+
+ extern void ff_ac3_extract_exponents_3dnow(uint8_t *exp, int32_t *coef, int nb_coefs);
+ extern void ff_ac3_extract_exponents_sse2 (uint8_t *exp, int32_t *coef, int nb_coefs);
+ extern void ff_ac3_extract_exponents_ssse3(uint8_t *exp, int32_t *coef, int nb_coefs);
+
+ av_cold void ff_ac3dsp_init_x86(AC3DSPContext *c, int bit_exact)
+ {
+ #if HAVE_YASM
+ int mm_flags = av_get_cpu_flags();
+
+ if (mm_flags & AV_CPU_FLAG_MMX) {
+ c->ac3_exponent_min = ff_ac3_exponent_min_mmx;
+ c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx;
+ c->ac3_lshift_int16 = ff_ac3_lshift_int16_mmx;
+ c->ac3_rshift_int32 = ff_ac3_rshift_int32_mmx;
+ }
+ if (mm_flags & AV_CPU_FLAG_3DNOW && HAVE_AMD3DNOW) {
+ c->extract_exponents = ff_ac3_extract_exponents_3dnow;
+ if (!bit_exact) {
+ c->float_to_fixed24 = ff_float_to_fixed24_3dnow;
+ }
+ }
+ if (mm_flags & AV_CPU_FLAG_MMXEXT && HAVE_MMXEXT) {
+ c->ac3_exponent_min = ff_ac3_exponent_min_mmxext;
+ c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_mmx2;
+ }
+ if (mm_flags & AV_CPU_FLAG_SSE && HAVE_SSE) {
+ c->float_to_fixed24 = ff_float_to_fixed24_sse;
+ }
+ if (mm_flags & AV_CPU_FLAG_SSE2 && HAVE_SSE) {
+ c->ac3_exponent_min = ff_ac3_exponent_min_sse2;
+ c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_sse2;
+ c->float_to_fixed24 = ff_float_to_fixed24_sse2;
+ c->compute_mantissa_size = ff_ac3_compute_mantissa_size_sse2;
+ c->extract_exponents = ff_ac3_extract_exponents_sse2;
+ if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
+ c->ac3_lshift_int16 = ff_ac3_lshift_int16_sse2;
+ c->ac3_rshift_int32 = ff_ac3_rshift_int32_sse2;
+ }
+ }
+ if (mm_flags & AV_CPU_FLAG_SSSE3 && HAVE_SSSE3) {
+ c->ac3_max_msb_abs_int16 = ff_ac3_max_msb_abs_int16_ssse3;
+ if (!(mm_flags & AV_CPU_FLAG_ATOM)) {
+ c->extract_exponents = ff_ac3_extract_exponents_ssse3;
+ }
+ }
+ #endif
+ }
--- /dev/null
- * This file is part of Libav.
+ /*
- * Libav is free software; you can redistribute it and/or
++ * This file is part of FFmpeg.
+ *
- * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
- * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+ #include "libavutil/cpu.h"
+ #include "libavcodec/dsputil.h"
+ #include "libavcodec/dct.h"
+ #include "fft.h"
+
+ av_cold void ff_fft_init_mmx(FFTContext *s)
+ {
+ #if HAVE_YASM
+ int has_vectors = av_get_cpu_flags();
+ #if ARCH_X86_32
+ if (has_vectors & AV_CPU_FLAG_3DNOW && HAVE_AMD3DNOW) {
+ /* 3DNow! for K6-2/3 */
+ s->imdct_calc = ff_imdct_calc_3dnow;
+ s->imdct_half = ff_imdct_half_3dnow;
+ s->fft_calc = ff_fft_calc_3dnow;
+ }
+ if (has_vectors & AV_CPU_FLAG_3DNOWEXT && HAVE_AMD3DNOWEXT) {
+ /* 3DNowEx for K7 */
+ s->imdct_calc = ff_imdct_calc_3dnowext;
+ s->imdct_half = ff_imdct_half_3dnowext;
+ s->fft_calc = ff_fft_calc_3dnowext;
+ }
+ #endif
+ if (has_vectors & AV_CPU_FLAG_SSE && HAVE_SSE) {
+ /* SSE for P3/P4/K8 */
+ s->imdct_calc = ff_imdct_calc_sse;
+ s->imdct_half = ff_imdct_half_sse;
+ s->fft_permute = ff_fft_permute_sse;
+ s->fft_calc = ff_fft_calc_sse;
+ s->fft_permutation = FF_FFT_PERM_SWAP_LSBS;
+ }
+ if (has_vectors & AV_CPU_FLAG_AVX && HAVE_AVX && s->nbits >= 5) {
+ /* AVX for SB */
+ s->imdct_half = ff_imdct_half_avx;
+ s->fft_calc = ff_fft_calc_avx;
+ s->fft_permutation = FF_FFT_PERM_AVX;
+ }
+ #endif
+ }
+
+ #if CONFIG_DCT
+ av_cold void ff_dct_init_mmx(DCTContext *s)
+ {
+ #if HAVE_YASM
+ int has_vectors = av_get_cpu_flags();
+ if (has_vectors & AV_CPU_FLAG_SSE && HAVE_SSE)
+ s->dct32 = ff_dct32_float_sse;
+ if (has_vectors & AV_CPU_FLAG_SSE2 && HAVE_SSE)
+ s->dct32 = ff_dct32_float_sse2;
+ if (has_vectors & AV_CPU_FLAG_AVX && HAVE_AVX)
+ s->dct32 = ff_dct32_float_avx;
+ #endif
+ }
+ #endif
--- /dev/null
- * This file is part of Libav.
+ /*
+ * Format Conversion Utils
+ * Copyright (c) 2000, 2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
- * Libav is free software; you can redistribute it and/or
++ * This file is part of FFmpeg.
+ *
- * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
- * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
++ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
+ */
+
+ #include "libavutil/cpu.h"
+ #include "libavutil/x86/asm.h"
+ #include "libavcodec/fmtconvert.h"
+ #include "libavcodec/dsputil.h"
+
+ #if HAVE_YASM
+
+ void ff_int32_to_float_fmul_scalar_sse (float *dst, const int *src, float mul, int len);
+ void ff_int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len);
+
+ void ff_float_to_int16_3dnow(int16_t *dst, const float *src, long len);
+ void ff_float_to_int16_sse (int16_t *dst, const float *src, long len);
+ void ff_float_to_int16_sse2 (int16_t *dst, const float *src, long len);
+
+ void ff_float_to_int16_step_3dnow(int16_t *dst, const float *src, long len, long step);
+ void ff_float_to_int16_step_sse (int16_t *dst, const float *src, long len, long step);
+ void ff_float_to_int16_step_sse2 (int16_t *dst, const float *src, long len, long step);
+
+ void ff_float_to_int16_interleave2_3dnow(int16_t *dst, const float **src, long len);
+ void ff_float_to_int16_interleave2_sse (int16_t *dst, const float **src, long len);
+ void ff_float_to_int16_interleave2_sse2 (int16_t *dst, const float **src, long len);
+
+ void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
+ void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
+ void ff_float_to_int16_interleave6_3dnowext(int16_t *dst, const float **src, int len);
+
+ #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
+
+ #define FLOAT_TO_INT16_INTERLEAVE(cpu) \
+ /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
+ static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
+ int c;\
+ for(c=0; c<channels; c++){\
+ ff_float_to_int16_step_##cpu(dst+c, src[c], len, channels);\
+ }\
+ }\
+ \
+ static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
+ if(channels==1)\
+ ff_float_to_int16_##cpu(dst, src[0], len);\
+ else if(channels==2){\
+ ff_float_to_int16_interleave2_##cpu(dst, src, len);\
+ }else if(channels==6){\
+ ff_float_to_int16_interleave6_##cpu(dst, src, len);\
+ }else\
+ float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
+ }
+
+ FLOAT_TO_INT16_INTERLEAVE(3dnow)
+ FLOAT_TO_INT16_INTERLEAVE(sse)
+ FLOAT_TO_INT16_INTERLEAVE(sse2)
+
+ static void float_to_int16_interleave_3dnowext(int16_t *dst, const float **src,
+ long len, int channels)
+ {
+ if(channels==6)
+ ff_float_to_int16_interleave6_3dnowext(dst, src, len);
+ else
+ float_to_int16_interleave_3dnow(dst, src, len, channels);
+ }
+
+ void ff_float_interleave2_mmx(float *dst, const float **src, unsigned int len);
+ void ff_float_interleave2_sse(float *dst, const float **src, unsigned int len);
+
+ void ff_float_interleave6_mmx(float *dst, const float **src, unsigned int len);
+ void ff_float_interleave6_sse(float *dst, const float **src, unsigned int len);
+
+ static void float_interleave_mmx(float *dst, const float **src,
+ unsigned int len, int channels)
+ {
+ if (channels == 2) {
+ ff_float_interleave2_mmx(dst, src, len);
+ } else if (channels == 6)
+ ff_float_interleave6_mmx(dst, src, len);
+ else
+ ff_float_interleave_c(dst, src, len, channels);
+ }
+
+ static void float_interleave_sse(float *dst, const float **src,
+ unsigned int len, int channels)
+ {
+ if (channels == 2) {
+ ff_float_interleave2_sse(dst, src, len);
+ } else if (channels == 6)
+ ff_float_interleave6_sse(dst, src, len);
+ else
+ ff_float_interleave_c(dst, src, len, channels);
+ }
+ #endif
+
+ void ff_fmt_convert_init_x86(FmtConvertContext *c, AVCodecContext *avctx)
+ {
+ #if HAVE_YASM
+ int mm_flags = av_get_cpu_flags();
+
+ if (mm_flags & AV_CPU_FLAG_MMX) {
+ c->float_interleave = float_interleave_mmx;
+
+ if (HAVE_AMD3DNOW && mm_flags & AV_CPU_FLAG_3DNOW) {
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->float_to_int16 = ff_float_to_int16_3dnow;
+ c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
+ }
+ }
+ if (HAVE_AMD3DNOWEXT && mm_flags & AV_CPU_FLAG_3DNOWEXT) {
+ if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
+ c->float_to_int16_interleave = float_to_int16_interleave_3dnowext;
+ }
+ }
+ if (HAVE_SSE && mm_flags & AV_CPU_FLAG_SSE) {
+ c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_sse;
+ c->float_to_int16 = ff_float_to_int16_sse;
+ c->float_to_int16_interleave = float_to_int16_interleave_sse;
+ c->float_interleave = float_interleave_sse;
+ }
+ if (HAVE_SSE && mm_flags & AV_CPU_FLAG_SSE2) {
+ c->int32_to_float_fmul_scalar = ff_int32_to_float_fmul_scalar_sse2;
+ c->float_to_int16 = ff_float_to_int16_sse2;
+ c->float_to_int16_interleave = float_to_int16_interleave_sse2;
+ }
+ }
+ #endif
+ }
--- /dev/null
- * This file is part of Libav.
+ /*
+ * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
+ *
- * Libav is free software; you can redistribute it and/or
++ * This file is part of FFmpeg.
+ *
- * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
- * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
-#if ARCH_X86_32
++ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+ #include "libavutil/cpu.h"
+ #include "libavutil/x86/asm.h"
+ #include "libavcodec/h264dsp.h"
+ #include "dsputil_mmx.h"
+
+ /***********************************/
+ /* IDCT */
+ #define IDCT_ADD_FUNC(NUM, DEPTH, OPT) \
+ void ff_h264_idct ## NUM ## _add_ ## DEPTH ## _ ## OPT(uint8_t *dst, \
+ int16_t *block, \
+ int stride);
+
+ IDCT_ADD_FUNC(, 8, mmx)
+ IDCT_ADD_FUNC(, 10, sse2)
+ IDCT_ADD_FUNC(_dc, 8, mmx2)
+ IDCT_ADD_FUNC(_dc, 10, mmx2)
+ IDCT_ADD_FUNC(8_dc, 8, mmx2)
+ IDCT_ADD_FUNC(8_dc, 10, sse2)
+ IDCT_ADD_FUNC(8, 8, mmx)
+ IDCT_ADD_FUNC(8, 8, sse2)
+ IDCT_ADD_FUNC(8, 10, sse2)
+ #if HAVE_AVX
+ IDCT_ADD_FUNC(, 10, avx)
+ IDCT_ADD_FUNC(8_dc, 10, avx)
+ IDCT_ADD_FUNC(8, 10, avx)
+ #endif
+
+
+ #define IDCT_ADD_REP_FUNC(NUM, REP, DEPTH, OPT) \
+ void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
+ (uint8_t *dst, const int *block_offset, \
+ DCTELEM *block, int stride, const uint8_t nnzc[6 * 8]);
+
+ IDCT_ADD_REP_FUNC(8, 4, 8, mmx)
+ IDCT_ADD_REP_FUNC(8, 4, 8, mmx2)
+ IDCT_ADD_REP_FUNC(8, 4, 8, sse2)
+ IDCT_ADD_REP_FUNC(8, 4, 10, sse2)
+ IDCT_ADD_REP_FUNC(8, 4, 10, avx)
+ IDCT_ADD_REP_FUNC(, 16, 8, mmx)
+ IDCT_ADD_REP_FUNC(, 16, 8, mmx2)
+ IDCT_ADD_REP_FUNC(, 16, 8, sse2)
+ IDCT_ADD_REP_FUNC(, 16, 10, sse2)
+ IDCT_ADD_REP_FUNC(, 16intra, 8, mmx)
+ IDCT_ADD_REP_FUNC(, 16intra, 8, mmx2)
+ IDCT_ADD_REP_FUNC(, 16intra, 8, sse2)
+ IDCT_ADD_REP_FUNC(, 16intra, 10, sse2)
+ #if HAVE_AVX
+ IDCT_ADD_REP_FUNC(, 16, 10, avx)
+ IDCT_ADD_REP_FUNC(, 16intra, 10, avx)
+ #endif
+
+
+ #define IDCT_ADD_REP_FUNC2(NUM, REP, DEPTH, OPT) \
+ void ff_h264_idct ## NUM ## _add ## REP ## _ ## DEPTH ## _ ## OPT \
+ (uint8_t **dst, const int *block_offset, \
+ DCTELEM *block, int stride, const uint8_t nnzc[6 * 8]);
+
+ IDCT_ADD_REP_FUNC2(, 8, 8, mmx)
+ IDCT_ADD_REP_FUNC2(, 8, 8, mmx2)
+ IDCT_ADD_REP_FUNC2(, 8, 8, sse2)
+ IDCT_ADD_REP_FUNC2(, 8, 10, sse2)
+ #if HAVE_AVX
+ IDCT_ADD_REP_FUNC2(, 8, 10, avx)
+ #endif
+
+ void ff_h264_luma_dc_dequant_idct_mmx(DCTELEM *output, DCTELEM *input, int qmul);
+ void ff_h264_luma_dc_dequant_idct_sse2(DCTELEM *output, DCTELEM *input, int qmul);
+
+ /***********************************/
+ /* deblocking */
+
+ void ff_h264_loop_filter_strength_mmx2(int16_t bS[2][4][4], uint8_t nnz[40],
+ int8_t ref[2][40], int16_t mv[2][40][2],
+ int bidir, int edges, int step,
+ int mask_mv0, int mask_mv1, int field);
+
+ #define LF_FUNC(DIR, TYPE, DEPTH, OPT) \
+ void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT(uint8_t *pix, \
+ int stride, \
+ int alpha, \
+ int beta, \
+ int8_t *tc0);
+ #define LF_IFUNC(DIR, TYPE, DEPTH, OPT) \
+ void ff_deblock_ ## DIR ## _ ## TYPE ## _ ## DEPTH ## _ ## OPT(uint8_t *pix, \
+ int stride, \
+ int alpha, \
+ int beta);
+
+ #define LF_FUNCS(type, depth) \
+ LF_FUNC(h, chroma, depth, mmx2) \
+ LF_IFUNC(h, chroma_intra, depth, mmx2) \
+ LF_FUNC(v, chroma, depth, mmx2) \
+ LF_IFUNC(v, chroma_intra, depth, mmx2) \
+ LF_FUNC(h, luma, depth, mmx2) \
+ LF_IFUNC(h, luma_intra, depth, mmx2) \
+ LF_FUNC(h, luma, depth, sse2) \
+ LF_IFUNC(h, luma_intra, depth, sse2) \
+ LF_FUNC(v, luma, depth, sse2) \
+ LF_IFUNC(v, luma_intra, depth, sse2) \
+ LF_FUNC(h, chroma, depth, sse2) \
+ LF_IFUNC(h, chroma_intra, depth, sse2) \
+ LF_FUNC(v, chroma, depth, sse2) \
+ LF_IFUNC(v, chroma_intra, depth, sse2) \
+ LF_FUNC(h, luma, depth, avx) \
+ LF_IFUNC(h, luma_intra, depth, avx) \
+ LF_FUNC(v, luma, depth, avx) \
+ LF_IFUNC(v, luma_intra, depth, avx) \
+ LF_FUNC(h, chroma, depth, avx) \
+ LF_IFUNC(h, chroma_intra, depth, avx) \
+ LF_FUNC(v, chroma, depth, avx) \
+ LF_IFUNC(v, chroma_intra, depth, avx)
+
+ LF_FUNCS(uint8_t, 8)
+ LF_FUNCS(uint16_t, 10)
+
- if (mm_flags & AV_CPU_FLAG_AVX) {
++#if ARCH_X86_32 && HAVE_YASM
+ LF_FUNC(v8, luma, 8, mmx2)
+ static void ff_deblock_v_luma_8_mmx2(uint8_t *pix, int stride, int alpha,
+ int beta, int8_t *tc0)
+ {
+ if ((tc0[0] & tc0[1]) >= 0)
+ ff_deblock_v8_luma_8_mmx2(pix + 0, stride, alpha, beta, tc0);
+ if ((tc0[2] & tc0[3]) >= 0)
+ ff_deblock_v8_luma_8_mmx2(pix + 8, stride, alpha, beta, tc0 + 2);
+ }
+
+ LF_IFUNC(v8, luma_intra, 8, mmx2)
+ static void ff_deblock_v_luma_intra_8_mmx2(uint8_t *pix, int stride,
+ int alpha, int beta)
+ {
+ ff_deblock_v8_luma_intra_8_mmx2(pix + 0, stride, alpha, beta);
+ ff_deblock_v8_luma_intra_8_mmx2(pix + 8, stride, alpha, beta);
+ }
+ #endif /* ARCH_X86_32 */
+
+ LF_FUNC(v, luma, 10, mmx2)
+ LF_IFUNC(v, luma_intra, 10, mmx2)
+
+ /***********************************/
+ /* weighted prediction */
+
+ #define H264_WEIGHT(W, OPT) \
+ void ff_h264_weight_ ## W ## _ ## OPT(uint8_t *dst, int stride, \
+ int height, int log2_denom, \
+ int weight, int offset);
+
+ #define H264_BIWEIGHT(W, OPT) \
+ void ff_h264_biweight_ ## W ## _ ## OPT(uint8_t *dst, uint8_t *src, \
+ int stride, int height, \
+ int log2_denom, int weightd, \
+ int weights, int offset);
+
+ #define H264_BIWEIGHT_MMX(W) \
+ H264_WEIGHT(W, mmx2) \
+ H264_BIWEIGHT(W, mmx2)
+
+ #define H264_BIWEIGHT_MMX_SSE(W) \
+ H264_BIWEIGHT_MMX(W) \
+ H264_WEIGHT(W, sse2) \
+ H264_BIWEIGHT(W, sse2) \
+ H264_BIWEIGHT(W, ssse3)
+
+ H264_BIWEIGHT_MMX_SSE(16)
+ H264_BIWEIGHT_MMX_SSE(8)
+ H264_BIWEIGHT_MMX(4)
+
+ #define H264_WEIGHT_10(W, DEPTH, OPT) \
+ void ff_h264_weight_ ## W ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
+ int stride, \
+ int height, \
+ int log2_denom, \
+ int weight, \
+ int offset);
+
+ #define H264_BIWEIGHT_10(W, DEPTH, OPT) \
+ void ff_h264_biweight_ ## W ## _ ## DEPTH ## _ ## OPT(uint8_t *dst, \
+ uint8_t *src, \
+ int stride, \
+ int height, \
+ int log2_denom, \
+ int weightd, \
+ int weights, \
+ int offset);
+
+ #define H264_BIWEIGHT_10_SSE(W, DEPTH) \
+ H264_WEIGHT_10(W, DEPTH, sse2) \
+ H264_WEIGHT_10(W, DEPTH, sse4) \
+ H264_BIWEIGHT_10(W, DEPTH, sse2) \
+ H264_BIWEIGHT_10(W, DEPTH, sse4)
+
+ H264_BIWEIGHT_10_SSE(16, 10)
+ H264_BIWEIGHT_10_SSE(8, 10)
+ H264_BIWEIGHT_10_SSE(4, 10)
+
+ void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
+ const int chroma_format_idc)
+ {
+ #if HAVE_YASM
+ int mm_flags = av_get_cpu_flags();
+
+ if (chroma_format_idc == 1 && mm_flags & AV_CPU_FLAG_MMXEXT)
+ c->h264_loop_filter_strength = ff_h264_loop_filter_strength_mmx2;
+
+ if (bit_depth == 8) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
+ c->h264_idct_dc_add =
+ c->h264_idct_add = ff_h264_idct_add_8_mmx;
+ c->h264_idct8_dc_add =
+ c->h264_idct8_add = ff_h264_idct8_add_8_mmx;
+
+ c->h264_idct_add16 = ff_h264_idct_add16_8_mmx;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx;
+ if (chroma_format_idc == 1)
+ c->h264_idct_add8 = ff_h264_idct_add8_8_mmx;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx;
+ if (mm_flags & AV_CPU_FLAG_CMOV)
+ c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_mmx;
+
+ if (mm_flags & AV_CPU_FLAG_MMXEXT) {
+ c->h264_idct_dc_add = ff_h264_idct_dc_add_8_mmx2;
+ c->h264_idct8_dc_add = ff_h264_idct8_dc_add_8_mmx2;
+ c->h264_idct_add16 = ff_h264_idct_add16_8_mmx2;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_8_mmx2;
+ if (chroma_format_idc == 1)
+ c->h264_idct_add8 = ff_h264_idct_add8_8_mmx2;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_8_mmx2;
+
+ c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_8_mmx2;
+ c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_8_mmx2;
+ if (chroma_format_idc == 1) {
+ c->h264_h_loop_filter_chroma = ff_deblock_h_chroma_8_mmx2;
+ c->h264_h_loop_filter_chroma_intra = ff_deblock_h_chroma_intra_8_mmx2;
+ }
+ #if ARCH_X86_32
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_mmx2;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_mmx2;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_mmx2;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_mmx2;
+ #endif /* ARCH_X86_32 */
+ c->weight_h264_pixels_tab[0] = ff_h264_weight_16_mmx2;
+ c->weight_h264_pixels_tab[1] = ff_h264_weight_8_mmx2;
+ c->weight_h264_pixels_tab[2] = ff_h264_weight_4_mmx2;
+
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_mmx2;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_mmx2;
+ c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_mmx2;
+
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
+ c->h264_idct8_add = ff_h264_idct8_add_8_sse2;
+
+ c->h264_idct_add16 = ff_h264_idct_add16_8_sse2;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_8_sse2;
+ if (chroma_format_idc == 1)
+ c->h264_idct_add8 = ff_h264_idct_add8_8_sse2;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_8_sse2;
+ c->h264_luma_dc_dequant_idct = ff_h264_luma_dc_dequant_idct_sse2;
+
+ c->weight_h264_pixels_tab[0] = ff_h264_weight_16_sse2;
+ c->weight_h264_pixels_tab[1] = ff_h264_weight_8_sse2;
+
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_sse2;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_sse2;
+
+ #if HAVE_ALIGNED_STACK
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_sse2;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_sse2;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_sse2;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_sse2;
+ #endif /* HAVE_ALIGNED_STACK */
+ }
+ if (mm_flags & AV_CPU_FLAG_SSSE3) {
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_ssse3;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_ssse3;
+ }
++ if (HAVE_AVX && mm_flags & AV_CPU_FLAG_AVX) {
+ #if HAVE_ALIGNED_STACK
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_8_avx;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_8_avx;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_8_avx;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_8_avx;
+ #endif /* HAVE_ALIGNED_STACK */
+ }
+ }
+ }
+ } else if (bit_depth == 10) {
+ if (mm_flags & AV_CPU_FLAG_MMX) {
+ if (mm_flags & AV_CPU_FLAG_MMXEXT) {
+ #if ARCH_X86_32
+ c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_mmx2;
+ c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_mmx2;
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_mmx2;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_mmx2;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_mmx2;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_mmx2;
+ #endif /* ARCH_X86_32 */
+ c->h264_idct_dc_add = ff_h264_idct_dc_add_10_mmx2;
+ if (mm_flags & AV_CPU_FLAG_SSE2) {
+ c->h264_idct_add = ff_h264_idct_add_10_sse2;
+ c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_sse2;
+
+ c->h264_idct_add16 = ff_h264_idct_add16_10_sse2;
+ if (chroma_format_idc == 1)
+ c->h264_idct_add8 = ff_h264_idct_add8_10_sse2;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_10_sse2;
+ #if HAVE_ALIGNED_STACK
+ c->h264_idct8_add = ff_h264_idct8_add_10_sse2;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_10_sse2;
+ #endif /* HAVE_ALIGNED_STACK */
+
+ c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse2;
+ c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse2;
+ c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse2;
+
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse2;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse2;
+ c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse2;
+
+ c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_sse2;
+ c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_sse2;
+ #if HAVE_ALIGNED_STACK
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_sse2;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_sse2;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_sse2;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_sse2;
+ #endif /* HAVE_ALIGNED_STACK */
+ }
+ if (mm_flags & AV_CPU_FLAG_SSE4) {
+ c->weight_h264_pixels_tab[0] = ff_h264_weight_16_10_sse4;
+ c->weight_h264_pixels_tab[1] = ff_h264_weight_8_10_sse4;
+ c->weight_h264_pixels_tab[2] = ff_h264_weight_4_10_sse4;
+
+ c->biweight_h264_pixels_tab[0] = ff_h264_biweight_16_10_sse4;
+ c->biweight_h264_pixels_tab[1] = ff_h264_biweight_8_10_sse4;
+ c->biweight_h264_pixels_tab[2] = ff_h264_biweight_4_10_sse4;
+ }
+ #if HAVE_AVX
+ if (mm_flags & AV_CPU_FLAG_AVX) {
+ c->h264_idct_dc_add =
+ c->h264_idct_add = ff_h264_idct_add_10_avx;
+ c->h264_idct8_dc_add = ff_h264_idct8_dc_add_10_avx;
+
+ c->h264_idct_add16 = ff_h264_idct_add16_10_avx;
+ if (chroma_format_idc == 1)
+ c->h264_idct_add8 = ff_h264_idct_add8_10_avx;
+ c->h264_idct_add16intra = ff_h264_idct_add16intra_10_avx;
+ #if HAVE_ALIGNED_STACK
+ c->h264_idct8_add = ff_h264_idct8_add_10_avx;
+ c->h264_idct8_add4 = ff_h264_idct8_add4_10_avx;
+ #endif /* HAVE_ALIGNED_STACK */
+
+ c->h264_v_loop_filter_chroma = ff_deblock_v_chroma_10_avx;
+ c->h264_v_loop_filter_chroma_intra = ff_deblock_v_chroma_intra_10_avx;
+ #if HAVE_ALIGNED_STACK
+ c->h264_v_loop_filter_luma = ff_deblock_v_luma_10_avx;
+ c->h264_h_loop_filter_luma = ff_deblock_h_luma_10_avx;
+ c->h264_v_loop_filter_luma_intra = ff_deblock_v_luma_intra_10_avx;
+ c->h264_h_loop_filter_luma_intra = ff_deblock_h_luma_intra_10_avx;
+ #endif /* HAVE_ALIGNED_STACK */
+ }
+ #endif /* HAVE_AVX */
+ }
+ }
+ }
+ #endif /* HAVE_YASM */
+ }
#include "libavutil/avutil.h"
#define LIBAVFORMAT_VERSION_MAJOR 54
-#define LIBAVFORMAT_VERSION_MINOR 13
-#define LIBAVFORMAT_VERSION_MICRO 3
+#define LIBAVFORMAT_VERSION_MINOR 23
- #define LIBAVFORMAT_VERSION_MICRO 100
++#define LIBAVFORMAT_VERSION_MICRO 101
#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
LIBAVFORMAT_VERSION_MINOR, \