Merge remote-tracking branch 'qatar/master'
[ffmpeg.git] / libavcodec / tiffenc.c
index c061ac9..7361614 100644 (file)
@@ -29,6 +29,7 @@
 #include "libavutil/opt.h"
 
 #include "avcodec.h"
+#include "internal.h"
 #if CONFIG_ZLIB
 #include <zlib.h>
 #endif
@@ -76,7 +77,7 @@ typedef struct TiffEncoderContext {
  * @param need Needed bytes
  * @return 0 - ok, 1 - no free space
  */
-inline static int check_size(TiffEncoderContext * s, uint64_t need)
+static inline int check_size(TiffEncoderContext * s, uint64_t need)
 {
     if (s->buf_size < *s->buf - s->buf_start + need) {
         *s->buf = s->buf_start + s->buf_size + 1;
@@ -120,7 +121,7 @@ static void add_entry(TiffEncoderContext * s,
 {
     uint8_t *entries_ptr = s->entries + 12 * s->num_entries;
 
-    assert(s->num_entries < TIFF_MAX_ENTRY);
+    av_assert0(s->num_entries < TIFF_MAX_ENTRY);
 
     bytestream_put_le16(&entries_ptr, tag);
     bytestream_put_le16(&entries_ptr, type);
@@ -192,13 +193,24 @@ static void pack_yuv(TiffEncoderContext * s, uint8_t * dst, int lnum)
     int w = (s->width - 1) / s->subsampling[0] + 1;
     uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
     uint8_t *pv = &p->data[2][lnum / s->subsampling[1] * p->linesize[2]];
-    for (i = 0; i < w; i++){
-        for (j = 0; j < s->subsampling[1]; j++)
-            for (k = 0; k < s->subsampling[0]; k++)
-                *dst++ = p->data[0][(lnum + j) * p->linesize[0] +
-                                    i * s->subsampling[0] + k];
-        *dst++ = *pu++;
-        *dst++ = *pv++;
+    if(s->width % s->subsampling[0] || s->height % s->subsampling[1]){
+        for (i = 0; i < w; i++){
+            for (j = 0; j < s->subsampling[1]; j++)
+                for (k = 0; k < s->subsampling[0]; k++)
+                    *dst++ = p->data[0][FFMIN(lnum + j, s->height-1) * p->linesize[0] +
+                                        FFMIN(i * s->subsampling[0] + k, s->width-1)];
+            *dst++ = *pu++;
+            *dst++ = *pv++;
+        }
+    }else{
+        for (i = 0; i < w; i++){
+            for (j = 0; j < s->subsampling[1]; j++)
+                for (k = 0; k < s->subsampling[0]; k++)
+                    *dst++ = p->data[0][(lnum + j) * p->linesize[0] +
+                                        i * s->subsampling[0] + k];
+            *dst++ = *pu++;
+            *dst++ = *pv++;
+        }
     }
 }
 
@@ -251,23 +263,28 @@ static int encode_frame(AVCodecContext * avctx, AVPacket *pkt,
         bpp_tab[3] = 16;
         break;
     case PIX_FMT_RGBA:
+        avctx->bits_per_coded_sample =
         s->bpp = 32;
         s->photometric_interpretation = 2;
         break;
     case PIX_FMT_RGB24:
+        avctx->bits_per_coded_sample =
         s->bpp = 24;
         s->photometric_interpretation = 2;
         break;
     case PIX_FMT_GRAY8:
+        avctx->bits_per_coded_sample = 0x28;
         s->bpp = 8;
         s->photometric_interpretation = 1;
         break;
     case PIX_FMT_PAL8:
+        avctx->bits_per_coded_sample =
         s->bpp = 8;
         s->photometric_interpretation = 3;
         break;
     case PIX_FMT_MONOBLACK:
     case PIX_FMT_MONOWHITE:
+        avctx->bits_per_coded_sample =
         s->bpp = 1;
         s->photometric_interpretation = avctx->pix_fmt == PIX_FMT_MONOBLACK;
         bpp_tab[0] = 1;
@@ -303,12 +320,9 @@ static int encode_frame(AVCodecContext * avctx, AVPacket *pkt,
 
     strips = (s->height - 1) / s->rps + 1;
 
-    if (!pkt->data &&
-        (ret = av_new_packet(pkt, avctx->width * avctx->height * s->bpp * 2 +
-                                  avctx->height * 4 + FF_MIN_BUFFER_SIZE)) < 0) {
-        av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
+    if ((ret = ff_alloc_packet2(avctx, pkt, avctx->width * avctx->height * s->bpp * 2 +
+                                  avctx->height * 4 + FF_MIN_BUFFER_SIZE)) < 0)
         return ret;
-    }
     ptr          = pkt->data;
     s->buf_start = pkt->data;
     s->buf       = &ptr;
@@ -463,7 +477,7 @@ fail:
     av_free(strip_sizes);
     av_free(strip_offsets);
     av_free(yuv_line);
-    return ret;
+    return ret < 0 ? ret : 0;
 }
 
 #define OFFSET(x) offsetof(TiffEncoderContext, x)
@@ -493,13 +507,14 @@ AVCodec ff_tiff_encoder = {
     .id             = CODEC_ID_TIFF,
     .priv_data_size = sizeof(TiffEncoderContext),
     .encode2        = encode_frame,
-    .pix_fmts =
-        (const enum PixelFormat[]) {PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8,
-                              PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE,
-                              PIX_FMT_YUV420P, PIX_FMT_YUV422P,
-                              PIX_FMT_YUV444P, PIX_FMT_YUV410P,
-                              PIX_FMT_YUV411P, PIX_FMT_RGB48LE,
-                              PIX_FMT_RGBA, PIX_FMT_RGBA64LE, PIX_FMT_NONE},
-    .long_name = NULL_IF_CONFIG_SMALL("TIFF image"),
+    .pix_fmts       = (const enum PixelFormat[]) {
+        PIX_FMT_RGB24, PIX_FMT_PAL8, PIX_FMT_GRAY8,
+        PIX_FMT_MONOBLACK, PIX_FMT_MONOWHITE,
+        PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_YUV444P,
+        PIX_FMT_YUV410P, PIX_FMT_YUV411P, PIX_FMT_RGB48LE,
+        PIX_FMT_RGBA, PIX_FMT_RGBA64LE,
+        PIX_FMT_NONE
+    },
+    .long_name      = NULL_IF_CONFIG_SMALL("TIFF image"),
     .priv_class     = &tiffenc_class,
 };