diff --git a/libavcodec/tiff.c b/libavcodec/tiff.c index 0868a016e3..c5891d4d70 100644 --- a/libavcodec/tiff.c +++ b/libavcodec/tiff.c @@ -53,7 +53,7 @@ typedef struct TiffContext { int palette_is_set; int le; enum TiffCompr compr; - int invert; + enum TiffPhotometric photometric; int planar; int fax_opts; int predictor; @@ -744,20 +744,31 @@ static int tiff_decode_tag(TiffContext *s, AVFrame *frame) case TIFF_PREDICTOR: s->predictor = value; break; - case TIFF_INVERT: + case TIFF_PHOTOMETRIC: switch (value) { - case 0: - s->invert = 1; - break; - case 1: - s->invert = 0; - break; - case 2: - case 3: + case TIFF_PHOTOMETRIC_WHITE_IS_ZERO: + case TIFF_PHOTOMETRIC_BLACK_IS_ZERO: + case TIFF_PHOTOMETRIC_RGB: + case TIFF_PHOTOMETRIC_PALETTE: + s->photometric = value; break; + case TIFF_PHOTOMETRIC_ALPHA_MASK: + case TIFF_PHOTOMETRIC_SEPARATED: + case TIFF_PHOTOMETRIC_YCBCR: + case TIFF_PHOTOMETRIC_CIE_LAB: + case TIFF_PHOTOMETRIC_ICC_LAB: + case TIFF_PHOTOMETRIC_ITU_LAB: + case TIFF_PHOTOMETRIC_CFA: + case TIFF_PHOTOMETRIC_LOG_L: + case TIFF_PHOTOMETRIC_LOG_LUV: + case TIFF_PHOTOMETRIC_LINEAR_RAW: + avpriv_report_missing_feature(s->avctx, + "PhotometricInterpretation 0x%04X", + value); + return AVERROR_PATCHWELCOME; default: - av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n", - value); + av_log(s->avctx, AV_LOG_ERROR, "PhotometricInterpretation %u is " + "unknown\n", value); return AVERROR_INVALIDDATA; } break; @@ -967,12 +978,12 @@ static int decode_frame(AVCodecContext *avctx, av_log(avctx, AV_LOG_ERROR, "IFD offset is greater than image size\n"); return AVERROR_INVALIDDATA; } - s->le = le; + s->le = le; // TIFF_BPP is not a required tag and defaults to 1 - s->bppcount = s->bpp = 1; - s->invert = 0; - s->compr = TIFF_RAW; - s->fill_order = 0; + s->bppcount = s->bpp = 1; + s->photometric = TIFF_PHOTOMETRIC_NONE; + s->compr = TIFF_RAW; + s->fill_order = 0; free_geotags(s); // Reset these offsets so we can tell if they were set this frame @@ -1095,7 +1106,7 @@ static int decode_frame(AVCodecContext *avctx, } } - if (s->invert) { + if (s->photometric == TIFF_PHOTOMETRIC_WHITE_IS_ZERO) { dst = p->data[plane]; for (i = 0; i < s->height; i++) { for (j = 0; j < p->linesize[plane]; j++) diff --git a/libavcodec/tiff.h b/libavcodec/tiff.h index ae189b6fdd..11e9f1629d 100644 --- a/libavcodec/tiff.h +++ b/libavcodec/tiff.h @@ -40,7 +40,7 @@ enum TiffTags { TIFF_HEIGHT, TIFF_BPP, TIFF_COMPR, - TIFF_INVERT = 0x106, + TIFF_PHOTOMETRIC = 0x106, TIFF_FILL_ORDER = 0x10A, TIFF_DOCUMENT_NAME = 0x10D, TIFF_IMAGE_DESCRIPTION = 0x10E, @@ -146,6 +146,24 @@ enum TiffGeoTagKey { TIFF_VERTICAL_UNITS_GEOKEY = 4099 }; +enum TiffPhotometric { + TIFF_PHOTOMETRIC_NONE = -1, + TIFF_PHOTOMETRIC_WHITE_IS_ZERO, /* mono or grayscale, 0 is white */ + TIFF_PHOTOMETRIC_BLACK_IS_ZERO, /* mono or grayscale, 0 is black */ + TIFF_PHOTOMETRIC_RGB, /* RGB or RGBA*/ + TIFF_PHOTOMETRIC_PALETTE, /* Uses a palette */ + TIFF_PHOTOMETRIC_ALPHA_MASK, /* Transparency mask */ + TIFF_PHOTOMETRIC_SEPARATED, /* CMYK or some other ink set */ + TIFF_PHOTOMETRIC_YCBCR, /* YCbCr */ + TIFF_PHOTOMETRIC_CIE_LAB = 8, /* 1976 CIE L*a*b* */ + TIFF_PHOTOMETRIC_ICC_LAB, /* ICC L*a*b* */ + TIFF_PHOTOMETRIC_ITU_LAB, /* ITU L*a*b* */ + TIFF_PHOTOMETRIC_CFA = 32803, /* Color Filter Array (DNG) */ + TIFF_PHOTOMETRIC_LOG_L = 32844, /* CIE Log2(L) */ + TIFF_PHOTOMETRIC_LOG_LUV, /* CIE Log L*u*v* */ + TIFF_PHOTOMETRIC_LINEAR_RAW = 34892, /* Linear Raw (DNG) */ +}; + enum TiffGeoTagType { GEOTIFF_SHORT = 0, GEOTIFF_DOUBLE = 34736, diff --git a/libavcodec/tiffenc.c b/libavcodec/tiffenc.c index 67229a93b1..456b3bf54a 100644 --- a/libavcodec/tiffenc.c +++ b/libavcodec/tiffenc.c @@ -58,7 +58,7 @@ typedef struct TiffEncoderContext { unsigned int bpp; ///< bits per pixel int compr; ///< compression level int bpp_tab_size; ///< bpp_tab size - int photometric_interpretation; ///< photometric interpretation + enum TiffPhotometric photometric_interpretation; ///< photometric interpretation int strips; ///< number of strips uint32_t *strip_sizes; unsigned int strip_sizes_size; @@ -254,7 +254,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, alpha = 1; case AV_PIX_FMT_RGB48LE: case AV_PIX_FMT_RGB24: - s->photometric_interpretation = 2; + s->photometric_interpretation = TIFF_PHOTOMETRIC_RGB; break; case AV_PIX_FMT_GRAY8: avctx->bits_per_coded_sample = 0x28; @@ -262,13 +262,13 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, alpha = avctx->pix_fmt == AV_PIX_FMT_GRAY8A; case AV_PIX_FMT_GRAY16LE: case AV_PIX_FMT_MONOBLACK: - s->photometric_interpretation = 1; + s->photometric_interpretation = TIFF_PHOTOMETRIC_BLACK_IS_ZERO; break; case AV_PIX_FMT_PAL8: - s->photometric_interpretation = 3; + s->photometric_interpretation = TIFF_PHOTOMETRIC_PALETTE; break; case AV_PIX_FMT_MONOWHITE: - s->photometric_interpretation = 0; + s->photometric_interpretation = TIFF_PHOTOMETRIC_WHITE_IS_ZERO; break; case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUV422P: @@ -277,7 +277,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, case AV_PIX_FMT_YUV410P: case AV_PIX_FMT_YUV411P: av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &shift_h, &shift_v); - s->photometric_interpretation = 6; + s->photometric_interpretation = TIFF_PHOTOMETRIC_YCBCR; s->subsampling[0] = 1 << shift_h; s->subsampling[1] = 1 << shift_v; is_yuv = 1; @@ -426,9 +426,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt, if (s->bpp_tab_size) add_entry(s, TIFF_BPP, TIFF_SHORT, s->bpp_tab_size, bpp_tab); - add_entry1(s, TIFF_COMPR, TIFF_SHORT, s->compr); - add_entry1(s, TIFF_INVERT, TIFF_SHORT, s->photometric_interpretation); - add_entry(s, TIFF_STRIP_OFFS, TIFF_LONG, strips, s->strip_offsets); + add_entry1(s, TIFF_COMPR, TIFF_SHORT, s->compr); + add_entry1(s, TIFF_PHOTOMETRIC, TIFF_SHORT, s->photometric_interpretation); + add_entry(s, TIFF_STRIP_OFFS, TIFF_LONG, strips, s->strip_offsets); if (s->bpp_tab_size) add_entry1(s, TIFF_SAMPLES_PER_PIXEL, TIFF_SHORT, s->bpp_tab_size);