1 /*===========================================================================*
2 - Copyright 2010 Google Inc.
3 -
4 - This code is licensed under the same terms as WebM:
5 - Software License Agreement: http://www.webmproject.org/license/software/
6 - Additional IP Rights Grant: http://www.webmproject.org/license/additional/
7 *===========================================================================*/
8
9 /*
10 * Encoding/Decoding of WebP still image compression format.
11 *
12 * 1. WebPDecode: Takes an array of bytes (string) corresponding to the WebP
13 * encoded image and generates output in the YUV format with
14 * the color components U, V subsampled to 1/2 resolution along
15 * each dimension.
16 *
17 * 2. YUV420toRGBA: Converts from YUV (with color subsampling) such as produced
18 * by the WebPDecode routine into 32 bits per pixel RGBA data
19 * array. This data array can be directly used by the Leptonica
20 * Pix in-memory image format.
21 *
22 * 3. WebPEncode: Takes a Y, U, V data buffers (with color components U and V
23 * subsampled to 1/2 resolution) and generates the WebP string
24 *
25 * 4. RGBAToYUV420: Generates Y, U, V data (with color subsampling) from 32 bits
26 * per pixel RGBA data buffer. The resulting YUV data can be
27 * directly fed into the WebPEncode routine.
28 *
29 * 5. AdjustColorspace:
30 *
31 * 6. AdjustColorspaceBack:
32 */
33 #include "gd.h"
34 #ifdef HAVE_LIBVPX
35 #include "webpimg.h"
36
37 #include <math.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <sys/stat.h>
42
43 #include "vpx/vpx_decoder.h"
44 #include "vpx/vp8dx.h"
45 #include "vpx/vpx_encoder.h"
46 #include "vpx/vp8cx.h"
47 #include "gd.h"
48
49 /*---------------------------------------------------------------------*
50 * color conversions *
51 *---------------------------------------------------------------------*/
52
53 #ifndef inline
54 # define inline __inline
55 #endif
clip(float v,int a,int b)56 static inline int clip(float v, int a, int b) {
57 return (v > b) ? b : (v < 0) ? 0 : (int)(v);
58 }
59 enum {
60 COLOR_RED = 1,
61 COLOR_GREEN = 2,
62 COLOR_BLUE = 3,
63 ALPHA_CHANNEL = 0
64 };
65
66 /* endian neutral extractions of ARGB from a 32 bit pixel */
67 static const uint32 RED_SHIFT =
68 8 * (sizeof(uint32) - 1 - COLOR_RED); /* 16 */
69 static const uint32 GREEN_SHIFT =
70 8 * (sizeof(uint32) - 1 - COLOR_GREEN); /* 8 */
71 static const uint32 BLUE_SHIFT =
72 8 * (sizeof(uint32) - 1 - COLOR_BLUE); /* 0 */
73 static const uint32 ALPHA_SHIFT =
74 8 * (sizeof(uint32) - 1 - ALPHA_CHANNEL); /* 24 */
75
GetRed(const uint32 * rgba)76 static inline int GetRed(const uint32* rgba) {
77 return gdTrueColorGetRed(*rgba);
78 }
79
GetGreen(const uint32 * rgba)80 static inline int GetGreen(const uint32* rgba) {
81 return gdTrueColorGetGreen(*rgba);
82 }
83
GetBlue(const uint32 * rgba)84 static inline int GetBlue(const uint32* rgba) {
85 return gdTrueColorGetBlue(*rgba);
86 }
87
88 enum { YUV_FRAC = 16 };
89
clip_uv(int v)90 static inline int clip_uv(int v) {
91 v = (v + (257 << (YUV_FRAC + 2 - 1))) >> (YUV_FRAC + 2);
92 return ((v & ~0xff) == 0) ? v : v < 0 ? 0u : 255u;
93 }
94
95
96 /* YUV <-----> RGB conversions */
97 /* The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
98 * More information at: http://en.wikipedia.org/wiki/YCbCr
99 */
GetLumaY(int r,int g,int b)100 static inline int GetLumaY(int r, int g, int b) {
101 const int kRound = (1 << (YUV_FRAC - 1)) + (16 << YUV_FRAC);
102 // Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
103 const int luma = 16839 * r + 33059 * g + 6420 * b;
104 return (luma + kRound) >> YUV_FRAC;
105 }
106
GetLumaYfromPtr(uint32 * rgba)107 static inline int GetLumaYfromPtr(uint32* rgba) {
108 const int r = GetRed(rgba);
109 const int g = GetGreen(rgba);
110 const int b = GetBlue(rgba);
111 return GetLumaY(r, g, b);
112 }
113
GetChromaU(int r,int g,int b)114 static inline int GetChromaU(int r, int g, int b) {
115 // U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
116 return clip_uv(-9719 * r - 19081 * g + 28800 * b);
117 }
118
GetChromaV(int r,int g,int b)119 static inline int GetChromaV(int r, int g, int b) {
120 // V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
121 return clip_uv(+28800 * r - 24116 * g - 4684 * b);
122 }
123
124 /* Converts YUV to RGB and writes into a 32 bit pixel in endian
125 * neutral fashion
126 */
127 enum { RGB_FRAC = 16, RGB_HALF = (1 << RGB_FRAC) / 2,
128 RGB_RANGE_MIN = -227, RGB_RANGE_MAX = 256 + 226 };
129
130 static int init_done = 0;
131 static int16_t kVToR[256], kUToB[256];
132 static int32_t kVToG[256], kUToG[256];
133 static uint8_t kClip[RGB_RANGE_MAX - RGB_RANGE_MIN];
134
InitTables()135 static void InitTables() {
136 int i;
137 for (i = 0; i < 256; ++i) {
138 kVToR[i] = (89858 * (i - 128) + RGB_HALF) >> RGB_FRAC;
139 kUToG[i] = -22014 * (i - 128) + RGB_HALF;
140 kVToG[i] = -45773 * (i - 128);
141 kUToB[i] = (113618 * (i - 128) + RGB_HALF) >> RGB_FRAC;
142 }
143 for (i = RGB_RANGE_MIN; i < RGB_RANGE_MAX; ++i) {
144 const int j = ((i - 16) * 76283 + RGB_HALF) >> RGB_FRAC;
145 kClip[i - RGB_RANGE_MIN] = (j < 0) ? 0 : (j > 255) ? 255 : j;
146 }
147
148 init_done = 1;
149 }
150
ToRGB(int y,int u,int v,uint32 * const dst)151 static void ToRGB(int y, int u, int v, uint32* const dst) {
152 const int r_off = kVToR[v];
153 const int g_off = (kVToG[v] + kUToG[u]) >> RGB_FRAC;
154 const int b_off = kUToB[u];
155 const int r = kClip[y + r_off - RGB_RANGE_MIN];
156 const int g = kClip[y + g_off - RGB_RANGE_MIN];
157 const int b = kClip[y + b_off - RGB_RANGE_MIN];
158 *dst = (r << RED_SHIFT) | (g << GREEN_SHIFT) | (b << BLUE_SHIFT);
159 }
160
get_le32(const uint8 * const data)161 static inline uint32 get_le32(const uint8* const data) {
162 return data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);
163 }
164
165 /* Returns the difference (in dB) between two images represented in YUV format
166 *
167 * Input:
168 * Y1/U1/V1: The Y/U/V data of the first image
169 * Y2/U2/V2: The Y/U/V data of the second image
170 *
171 * Returns the PSNR (http://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio)
172 * value computed between the two images
173 */
GetPSNRYuv(const uint8 * Y1,const uint8 * U1,const uint8 * V1,const uint8 * Y2,const uint8 * U2,const uint8 * V2,int y_width,int y_height)174 double GetPSNRYuv(const uint8* Y1,
175 const uint8* U1,
176 const uint8* V1,
177 const uint8* Y2,
178 const uint8* U2,
179 const uint8* V2,
180 int y_width,
181 int y_height) {
182 int x, y, row_idx;
183 const int uv_width = ((y_width + 1) >> 1);
184 const int uv_height = ((y_height + 1) >> 1);
185 double sse = 0., count = 0.;
186 for (y = 0; y < y_height; ++y) {
187 count += y_width;
188 row_idx = y * y_width;
189 for (x = 0; x < y_width; ++x) {
190 double diff = Y1[row_idx + x] - Y2[row_idx + x];
191 sse += diff * diff;
192 }
193 }
194 for (y = 0; y < uv_height; ++y) {
195 count += 2 * uv_width;
196 row_idx = y * uv_width;
197 for (x = 0; x < uv_width; ++x) {
198 const double diff_U = U1[row_idx + x] - U2[row_idx + x];
199 const double diff_V = V1[row_idx + x] - V2[row_idx + x];
200 sse += diff_U * diff_U + diff_V * diff_V;
201 }
202 }
203 return -4.3429448 * log(sse / (255. * 255. * count));
204 }
205
206 /* Returns the difference (in dB) between two images. One represented
207 * using Y,U,V vectors and the other is webp image data.
208 * Input:
209 * Y1/U1/V1: The Y/U/V data of the first image
210 * imgdata: data buffer containing webp image
211 * imgdata_size: size of the imgdata buffer
212 *
213 * Returns the PSNR value computed between the two images
214 */
WebPGetPSNR(const uint8 * Y1,const uint8 * U1,const uint8 * V1,uint8 * imgdata,int imgdata_size)215 double WebPGetPSNR(const uint8* Y1,
216 const uint8* U1,
217 const uint8* V1,
218 uint8* imgdata,
219 int imgdata_size) {
220 uint8* Y2 = NULL;
221 uint8* U2 = NULL;
222 uint8* V2 = NULL;
223 int w = 0, h = 0;
224 double psnr = 0;
225
226 WebPDecode(imgdata,
227 imgdata_size,
228 &Y2,
229 &U2,
230 &V2,
231 &w,
232 &h);
233
234 psnr = GetPSNRYuv(Y1, U1, V1, Y2, U2, V2, w, h);
235 free(Y2);
236
237 return psnr;
238 }
239
240 /*---------------------------------------------------------------------*
241 * Reading WebP *
242 *---------------------------------------------------------------------*/
243
244 /* RIFF layout is:
245 * 0ffset tag
246 * 0...3 "RIFF" 4-byte tag
247 * 4...7 size of image data (including metadata) starting at offset 8
248 * 8...11 "WEBP" our form-type signature
249 * 12..15 "VP8 " 4-byte tags, describing the raw video format used
250 * 16..19 size of the raw WebP image data, starting at offset 20
251 * 20.... the WebP bytes
252 * There can be extra chunks after the "VP8 " chunk (ICMT, ICOP, ...)
253 * All 32-bits sizes are in little-endian order.
254 * Note: chunk data must be padded to multiple of 2 in size
255 */
256
SkipRiffHeader(const uint8 ** data_ptr,int * data_size_ptr)257 int SkipRiffHeader(const uint8** data_ptr, int *data_size_ptr) {
258 /* 20 bytes RIFF header 10 bytes VP8 header */
259 const int kHeaderSize = (20 + 10);
260 uint32 chunk_size = 0xffffffffu;
261
262 if (*data_size_ptr >= kHeaderSize && !memcmp(*data_ptr, "RIFF", 4)) {
263 if (memcmp(*data_ptr + 8, "WEBP", 4)) {
264 return 0; /* wrong image file signature */
265 } else {
266 const uint32 riff_size = get_le32(*data_ptr + 4);
267 if (memcmp(*data_ptr + 12, "VP8 ", 4)) {
268 return 0; /* invalid compression format */
269 }
270 chunk_size = get_le32(*data_ptr + 16);
271 if ((chunk_size > riff_size + 8) || (chunk_size & 1)) {
272 return 0; /* inconsistent size information. */
273 }
274 /* We have a RIFF container. Skip it. */
275 *data_ptr += 20;
276 *data_size_ptr -= 20;
277 }
278 }
279 return chunk_size;
280 }
281
282 /* Generate RGBA row from an YUV row (with width upsampling of chrome data)
283 * Input:
284 * 1, 2, 3. y_src, u_src, v_src - Pointers to input Y, U, V row data
285 * respectively. We reuse these variables, they iterate over all pixels in
286 * the row.
287 * 4. y_width: width of the Y image plane (aka image width)
288 * Output:
289 * 5. rgb_sat: pointer to the output rgb row. We reuse this variable, it
290 * iterates over all pixels in the row.
291 */
YUV420toRGBLine(uint8 * y_src,uint8 * u_src,uint8 * v_src,int y_width,uint32 * rgb_dst)292 static void YUV420toRGBLine(uint8* y_src,
293 uint8* u_src,
294 uint8* v_src,
295 int y_width,
296 uint32* rgb_dst) {
297 int x;
298 for (x = 0; x < (y_width >> 1); ++x) {
299 const int U = u_src[0];
300 const int V = v_src[0];
301 ToRGB(y_src[0], U, V, rgb_dst);
302 ToRGB(y_src[1], U, V, rgb_dst + 1);
303 ++u_src;
304 ++v_src;
305 y_src += 2;
306 rgb_dst += 2;
307 }
308 if (y_width & 1) { /* Rightmost pixel */
309 ToRGB(y_src[0], (*u_src), (*v_src), rgb_dst);
310 }
311 }
312
313 /* Converts from YUV (with color subsampling) such as produced by the WebPDecode
314 * routine into 32 bits per pixel RGBA data array. This data array can be
315 * directly used by the Leptonica Pix in-memory image format.
316 * Input:
317 * 1, 2, 3. Y, U, V: the input data buffers
318 * 4. pixwpl: the desired words per line corresponding to the supplied
319 * output pixdata.
320 * 5. width, height: the dimensions of the image whose data resides in Y,
321 * U, V.
322 * Output:
323 * 6. pixdata: the output data buffer. Caller should allocate
324 * height * pixwpl bytes of memory before calling this routine.
325 */
YUV420toRGBA(uint8 * Y,uint8 * U,uint8 * V,int words_per_line,int width,int height,uint32 * pixdata)326 void YUV420toRGBA(uint8* Y,
327 uint8* U,
328 uint8* V,
329 int words_per_line,
330 int width,
331 int height,
332 uint32* pixdata) {
333 int y_width = width;
334 int y_stride = y_width;
335 int uv_width = ((y_width + 1) >> 1);
336 int uv_stride = uv_width;
337 int y;
338
339 if (!init_done)
340 InitTables();
341
342 /* note that the U, V upsampling in height is happening here as the U, V
343 * buffers sent to successive odd-even pair of lines is same.
344 */
345 for (y = 0; y < height; ++y) {
346 YUV420toRGBLine(Y + y * y_stride,
347 U + (y >> 1) * uv_stride,
348 V + (y >> 1) * uv_stride,
349 width,
350 pixdata + y * words_per_line);
351 }
352 }
353
gd_YUV420toRGBA(uint8 * Y,uint8 * U,uint8 * V,gdImagePtr im)354 void gd_YUV420toRGBA(uint8* Y,
355 uint8* U,
356 uint8* V,
357 gdImagePtr im) {
358 int width = im->sx;
359 int height = im->sy;
360 int y_width = width;
361 int y_stride = y_width;
362 int uv_width = ((y_width + 1) >> 1);
363 int uv_stride = uv_width;
364 int y;
365
366 /* output im must be truecolor */
367 if (!im->trueColor) {
368 return;
369 }
370
371 if (!init_done)
372 InitTables();
373
374 /* note that the U, V upsampling in height is happening here as the U, V
375 * buffers sent to successive odd-even pair of lines is same.
376 */
377 for (y = 0; y < height; ++y) {
378 YUV420toRGBLine(Y + y * y_stride,
379 U + (y >> 1) * uv_stride,
380 V + (y >> 1) * uv_stride,
381 width,
382 im->tpixels[y]);
383 }
384 }
385
VPXDecode(const uint8 * data,int data_size,uint8 ** p_Y,uint8 ** p_U,uint8 ** p_V,int * p_width,int * p_height)386 static WebPResult VPXDecode(const uint8* data,
387 int data_size,
388 uint8** p_Y,
389 uint8** p_U,
390 uint8** p_V,
391 int* p_width,
392 int* p_height) {
393 vpx_codec_ctx_t dec;
394 vp8_postproc_cfg_t ppcfg;
395 WebPResult result = webp_failure;
396
397 if (!data || data_size <= 10 || !p_Y || !p_U || !p_V
398 || *p_Y != NULL || *p_U != NULL || *p_V != NULL) {
399 return webp_failure;
400 }
401
402 if (vpx_codec_dec_init(&dec,
403 &vpx_codec_vp8_dx_algo, NULL, 0) != VPX_CODEC_OK) {
404 return webp_failure;
405 }
406
407 ppcfg.post_proc_flag = VP8_NOFILTERING;
408 vpx_codec_control(&dec, VP8_SET_POSTPROC, &ppcfg);
409
410
411 if (vpx_codec_decode(&dec, data, data_size, NULL, 0) == VPX_CODEC_OK) {
412 vpx_codec_iter_t iter = NULL;
413 vpx_image_t* const img = vpx_codec_get_frame(&dec, &iter);
414 if (img) {
415 int y_width = img->d_w;
416 int y_height = img->d_h;
417 int y_stride = y_width;
418 int uv_width = (y_width + 1) >> 1;
419 int uv_stride = uv_width;
420 int uv_height = ((y_height + 1) >> 1);
421 int y;
422
423 *p_width = y_width;
424 *p_height = y_height;
425 if ((*p_Y = (uint8 *)(calloc(y_stride * y_height
426 + 2 * uv_stride * uv_height,
427 sizeof(uint8)))) != NULL) {
428 *p_U = *p_Y + y_height * y_stride;
429 *p_V = *p_U + uv_height * uv_stride;
430 for (y = 0; y < y_height; ++y) {
431 memcpy(*p_Y + y * y_stride,
432 img->planes[0] + y * img->stride[0],
433 y_width);
434 }
435 for (y = 0; y < uv_height; ++y) {
436 memcpy(*p_U + y * uv_stride,
437 img->planes[1] + y * img->stride[1],
438 uv_width);
439 memcpy(*p_V + y * uv_stride,
440 img->planes[2] + y * img->stride[2],
441 uv_width);
442 }
443 result = webp_success;
444 }
445 }
446 }
447 vpx_codec_destroy(&dec);
448
449 return result;
450 }
451
WebPDecode(const uint8 * data,int data_size,uint8 ** p_Y,uint8 ** p_U,uint8 ** p_V,int * p_width,int * p_height)452 WebPResult WebPDecode(const uint8* data,
453 int data_size,
454 uint8** p_Y,
455 uint8** p_U,
456 uint8** p_V,
457 int* p_width,
458 int* p_height) {
459
460 const uint32 chunk_size = SkipRiffHeader(&data, &data_size);
461 if (!chunk_size) {
462 return webp_failure; /* unsupported RIFF header */
463 }
464
465 return VPXDecode(data, data_size, p_Y, p_U, p_V, p_width, p_height);
466 }
467
468 /*---------------------------------------------------------------------*
469 * Writing WebP *
470 *---------------------------------------------------------------------*/
471
472 /* Takes a pair of RGBA row data as input and generates 2 rows of Y data and one
473 * row of subsampled U, V data as output
474 * Input:
475 * 1, 2. rgb_line1, rgb_line2 - input rgba rows
476 * 3. width - image width
477 * Outout:
478 * 4, 5, 6: Output Y, U, V row
479 */
RGBALinepairToYUV420(uint32 * rgb_line1,uint32 * rgb_line2,int width,uint8 * Y_dst1,uint8 * Y_dst2,uint8 * u_dst,uint8 * v_dst)480 static void RGBALinepairToYUV420(uint32* rgb_line1,
481 uint32* rgb_line2,
482 int width,
483 uint8* Y_dst1,
484 uint8* Y_dst2,
485 uint8* u_dst,
486 uint8* v_dst) {
487 int x;
488 for (x = (width >> 1); x > 0; --x) {
489 const int sum_r =
490 GetRed(rgb_line1 + 0) + GetRed(rgb_line1 + 1) +
491 GetRed(rgb_line2 + 0) + GetRed(rgb_line2 + 1);
492 const int sum_g =
493 GetGreen(rgb_line1 + 0) + GetGreen(rgb_line1 + 1) +
494 GetGreen(rgb_line2 + 0) + GetGreen(rgb_line2 + 1);
495 const int sum_b =
496 GetBlue(rgb_line1 + 0) + GetBlue(rgb_line1 + 1) +
497 GetBlue(rgb_line2 + 0) + GetBlue(rgb_line2 + 1);
498
499 Y_dst1[0] = GetLumaYfromPtr(rgb_line1 + 0);
500 Y_dst1[1] = GetLumaYfromPtr(rgb_line1 + 1);
501 Y_dst2[0] = GetLumaYfromPtr(rgb_line2 + 0);
502 Y_dst2[1] = GetLumaYfromPtr(rgb_line2 + 1);
503
504 *u_dst++ = GetChromaU(sum_r, sum_g, sum_b);
505 *v_dst++ = GetChromaV(sum_r, sum_g, sum_b);
506
507 rgb_line1 += 2;
508 rgb_line2 += 2;
509 Y_dst1 += 2;
510 Y_dst2 += 2;
511 }
512
513 if (width & 1) { /* rightmost pixel. */
514 const int sum_r = GetRed(rgb_line1) + GetRed(rgb_line2);
515 const int sum_g = GetGreen(rgb_line1) + GetGreen(rgb_line2);
516 const int sum_b = GetBlue(rgb_line1) + GetBlue(rgb_line2);
517
518 Y_dst1[0] = GetLumaYfromPtr(rgb_line1);
519 Y_dst2[0] = GetLumaYfromPtr(rgb_line2);
520 *u_dst = GetChromaU(2 * sum_r, 2 * sum_g, 2 * sum_b);
521 *v_dst = GetChromaV(2 * sum_r, 2 * sum_g, 2 * sum_b);
522 }
523 }
524
525 /* Generates Y, U, V data (with color subsampling) from 32 bits
526 * per pixel RGBA data buffer. The resulting YUV data can be directly fed into
527 * the WebPEncode routine.
528 * Input:
529 * 1. pixdatainput rgba data buffer
530 * 2. words per line corresponding to pixdata
531 * 3, 4. image width and height respectively
532 * Output:
533 * 5, 6, 7. Output YUV data buffers
534 */
gd_RGBAToYUV420(gdImagePtr im2,uint8 * Y,uint8 * U,uint8 * V)535 void gd_RGBAToYUV420(gdImagePtr im2,
536 uint8* Y,
537 uint8* U,
538 uint8* V) {
539 int y_width = im2->sx;
540 int y_height = im2->sy;
541 int y_stride = y_width;
542 int uv_width = ((y_width + 1) >> 1);
543 int uv_stride = uv_width;
544 int y;
545 gdImagePtr im = NULL;
546 int free_im = 0;
547
548 if (!im2->trueColor) {
549 /* Todo: Replace the color/YUV functions with our own and simplify
550 that should boost the conversion a bit as well, not only for
551 palette image. */
552 im = gdImageCreateTrueColor(im2->sx, im2->sy);
553 if (!im) {
554 php_gd_error("gd-webp error: cannot convert palette input to truecolor");
555 return;
556 }
557 gdImageCopy(im, im2, 0, 0, 0, 0, im->sx, im->sy);
558 free_im = 1;
559 } else {
560 im = im2;
561 }
562 for (y = 0; y < (y_height >> 1); ++y) {
563 RGBALinepairToYUV420(im->tpixels[2 * y],
564 im->tpixels[2 * y + 1],
565 y_width,
566 Y + 2 * y * y_stride,
567 Y + (2 * y + 1) * y_stride,
568 U + y * uv_stride,
569 V + y * uv_stride);
570 }
571 if (y_height & 1) {
572 RGBALinepairToYUV420(im->tpixels[y_height - 1],
573 im->tpixels[y_height - 1],
574 y_width,
575 Y + (y_height - 1) * y_stride,
576 Y + (y_height - 1) * y_stride,
577 U + (y_height >> 1) * uv_stride,
578 V + (y_height >> 1) * uv_stride);
579 }
580 if (free_im) {
581 gdImageDestroy(im);
582 }
583 }
584
585 /* Generates Y, U, V data (with color subsampling) from 32 bits
586 * per pixel RGBA data buffer. The resulting YUV data can be directly fed into
587 * the WebPEncode routine.
588 * Input:
589 * 1. pixdatainput rgba data buffer
590 * 2. words per line corresponding to pixdata
591 * 3, 4. image width and height respectively
592 * Output:
593 * 5, 6, 7. Output YUV data buffers
594 */
RGBAToYUV420(uint32 * pixdata,int words_per_line,int width,int height,uint8 * Y,uint8 * U,uint8 * V)595 void RGBAToYUV420(uint32* pixdata,
596 int words_per_line,
597 int width,
598 int height,
599 uint8* Y,
600 uint8* U,
601 uint8* V) {
602 int y_width = width;
603 int y_height = height;
604 int y_stride = y_width;
605 int uv_width = ((y_width + 1) >> 1);
606 int uv_stride = uv_width;
607 int y;
608
609 for (y = 0; y < (y_height >> 1); ++y) {
610 RGBALinepairToYUV420(pixdata + 2 * y * words_per_line,
611 pixdata + (2 * y + 1) * words_per_line,
612 y_width,
613 Y + 2 * y * y_stride,
614 Y + (2 * y + 1) * y_stride,
615 U + y * uv_stride,
616 V + y * uv_stride);
617 }
618 if (y_height & 1) {
619 RGBALinepairToYUV420(pixdata + (y_height - 1) * words_per_line,
620 pixdata + (y_height - 1) * words_per_line,
621 y_width,
622 Y + (y_height - 1) * y_stride,
623 Y + (y_height - 1) * y_stride,
624 U + (y_height >> 1) * uv_stride,
625 V + (y_height >> 1) * uv_stride);
626 }
627 }
628
codec_ctl(vpx_codec_ctx_t * enc,enum vp8e_enc_control_id id,int value)629 static int codec_ctl(vpx_codec_ctx_t *enc,
630 enum vp8e_enc_control_id id,
631 int value) {
632 const vpx_codec_err_t res = vpx_codec_control_(enc, id, value);
633 if (res != VPX_CODEC_OK) {
634 return webp_failure;
635 }
636 return webp_success;
637 }
638
SetupParams(vpx_codec_enc_cfg_t * cfg,int QP)639 static void SetupParams(vpx_codec_enc_cfg_t* cfg,
640 int QP) {
641 cfg->g_threads = 2;
642 cfg->rc_min_quantizer = QP;
643 cfg->rc_max_quantizer = QP;
644 cfg->kf_mode = VPX_KF_FIXED;
645 }
646
647 /* VPXEncode: Takes a Y, U, V data buffers (with color components U and V
648 * subsampled to 1/2 resolution) and generates the VPX string.
649 * Output VPX string is placed in the *p_out buffer. container_size
650 * indicates number of bytes to be left blank at the beginning of
651 * *p_out buffer to accommodate for a container header.
652 *
653 * Return: success/failure
654 */
VPXEncode(const uint8 * Y,const uint8 * U,const uint8 * V,int y_width,int y_height,int y_stride,int uv_width,int uv_height,int uv_stride,int QP,int container_size,unsigned char ** p_out,int * p_out_size_bytes)655 static WebPResult VPXEncode(const uint8* Y,
656 const uint8* U,
657 const uint8* V,
658 int y_width,
659 int y_height,
660 int y_stride,
661 int uv_width,
662 int uv_height,
663 int uv_stride,
664 int QP,
665 int container_size,
666 unsigned char** p_out,
667 int* p_out_size_bytes) {
668 vpx_codec_iface_t* iface = &vpx_codec_vp8_cx_algo;
669 vpx_codec_err_t res;
670 vpx_codec_enc_cfg_t cfg;
671 vpx_codec_ctx_t enc;
672 WebPResult result = webp_failure;
673 vpx_image_t img;
674
675 *p_out = NULL;
676 *p_out_size_bytes = 0;
677
678
679 /* validate input parameters. */
680 if (!p_out || !Y || !U || !V
681 || y_width <= 0 || y_height <= 0 || uv_width <= 0 || uv_height <= 0
682 || y_stride < y_width || uv_stride < uv_width
683 || QP < 0 || QP > 63) {
684 return webp_failure;
685 }
686
687 res = vpx_codec_enc_config_default(iface, &cfg, 0);
688 if (res != VPX_CODEC_OK) {
689 return webp_failure;
690 }
691
692 SetupParams(&cfg, QP);
693 cfg.g_w = y_width;
694 cfg.g_h = y_height;
695
696 res = vpx_codec_enc_init(&enc, iface, &cfg, 0);
697
698 if (res == VPX_CODEC_OK) {
699 codec_ctl(&enc, VP8E_SET_CPUUSED, 3);
700 codec_ctl(&enc, VP8E_SET_NOISE_SENSITIVITY, 0);
701 codec_ctl(&enc, VP8E_SET_SHARPNESS, 0);
702 codec_ctl(&enc, VP8E_SET_ENABLEAUTOALTREF, 0);
703 codec_ctl(&enc, VP8E_SET_ARNR_MAXFRAMES, 0);
704 codec_ctl(&enc, VP8E_SET_ARNR_TYPE, 0);
705 codec_ctl(&enc, VP8E_SET_ARNR_STRENGTH, 0);
706 codec_ctl(&enc, VP8E_SET_STATIC_THRESHOLD, 0);
707 codec_ctl(&enc, VP8E_SET_TOKEN_PARTITIONS, 2);
708
709 vpx_img_wrap(&img, VPX_IMG_FMT_I420,
710 y_width, y_height, 16, (uint8*)(Y));
711 img.planes[VPX_PLANE_Y] = (uint8*)(Y);
712 img.planes[VPX_PLANE_U] = (uint8*)(U);
713 img.planes[VPX_PLANE_V] = (uint8*)(V);
714 img.stride[VPX_PLANE_Y] = y_stride;
715 img.stride[VPX_PLANE_U] = uv_stride;
716 img.stride[VPX_PLANE_V] = uv_stride;
717
718 res = vpx_codec_encode(&enc, &img, 0, 1, 0, VPX_DL_BEST_QUALITY);
719
720 if (res == VPX_CODEC_OK) {
721 vpx_codec_iter_t iter = NULL;
722 const vpx_codec_cx_pkt_t* pkt = vpx_codec_get_cx_data(&enc, &iter);
723 if (pkt != NULL) {
724 *p_out = (unsigned char*)(calloc(container_size + pkt->data.frame.sz,
725 1));
726
727 memcpy(*p_out + container_size,
728 (const void*)(pkt->data.frame.buf),
729 pkt->data.frame.sz);
730 *p_out_size_bytes = container_size + pkt->data.frame.sz;
731
732 result = webp_success;
733 }
734 }
735 }
736
737 vpx_codec_destroy(&enc);
738
739 return result;
740 }
741
WebPEncode(const uint8 * Y,const uint8 * U,const uint8 * V,int y_width,int y_height,int y_stride,int uv_width,int uv_height,int uv_stride,int QP,unsigned char ** p_out,int * p_out_size_bytes,double * psnr)742 WebPResult WebPEncode(const uint8* Y,
743 const uint8* U,
744 const uint8* V,
745 int y_width,
746 int y_height,
747 int y_stride,
748 int uv_width,
749 int uv_height,
750 int uv_stride,
751 int QP,
752 unsigned char** p_out,
753 int* p_out_size_bytes,
754 double *psnr) {
755
756 const int kRiffHeaderSize = 20;
757
758 if (VPXEncode(Y, U, V,
759 y_width, y_height, y_stride,
760 uv_width, uv_height, uv_stride,
761 QP, kRiffHeaderSize,
762 p_out, p_out_size_bytes) != webp_success) {
763 return webp_failure;
764 } else {
765 /* Write RIFF header */
766 const int img_size_bytes = *p_out_size_bytes - kRiffHeaderSize;
767 const int chunk_size = (img_size_bytes + 1) & ~1; /* make size even */
768 const int riff_size = chunk_size + 12;
769 const uint8_t kRiffHeader[20] = { 'R', 'I', 'F', 'F',
770 (riff_size >> 0) & 255,
771 (riff_size >> 8) & 255,
772 (riff_size >> 16) & 255,
773 (riff_size >> 24) & 255,
774 'W', 'E', 'B', 'P',
775 'V', 'P', '8', ' ',
776 (chunk_size >> 0) & 255,
777 (chunk_size >> 8) & 255,
778 (chunk_size >> 16) & 255,
779 (chunk_size >> 24) & 255 };
780 memcpy(*p_out, kRiffHeader, kRiffHeaderSize);
781
782 if (img_size_bytes & 1) { /* write a padding byte */
783 const int new_size = *p_out_size_bytes + 1;
784 unsigned char* p = (unsigned char*)realloc(*p_out, new_size);
785 if (p == NULL) {
786 free(*p_out);
787 *p_out = NULL;
788 *p_out_size_bytes = 0;
789 return webp_failure;
790 }
791 p[new_size - 1] = 0;
792 *p_out = p;
793 *p_out_size_bytes = new_size;
794 }
795
796 if (psnr) {
797 *psnr = WebPGetPSNR(Y, U, V, *p_out, *p_out_size_bytes);
798 }
799
800 return webp_success;
801 }
802 }
803
AdjustColorspace(uint8 * Y,uint8 * U,uint8 * V,int width,int height)804 void AdjustColorspace(uint8* Y, uint8* U, uint8* V, int width, int height) {
805 int y_width = width;
806 int y_height = height;
807 int y_stride = y_width;
808 int uv_width = ((y_width + 1) >> 1);
809 int uv_height = ((y_height + 1) >> 1);
810 int uv_stride = uv_width;
811 int x, y;
812 /* convert luma */
813 for (y = 0; y < y_height; ++y) {
814 uint8* const Yrow = Y + y * y_stride;
815 for (x = 0; x < y_width; ++x) {
816 /* maps [0..255] to [16..235] */
817 Yrow[x] = ((Yrow[x] * 55 + 32) >> 6) + 16;
818 }
819 }
820 /* convert chroma */
821 for (y = 0; y < uv_height; ++y) {
822 uint8* const Urow = U + y * uv_stride;
823 uint8* const Vrow = V + y * uv_stride;
824 for (x = 0; x < uv_width; ++x) {
825 /* maps [0..255] to [16..240] */
826 Urow[x] = (((Urow[x] - 127) * 7) >> 3) + 128;
827 Vrow[x] = (((Vrow[x] - 127) * 7) >> 3) + 128;
828 }
829 }
830 }
831
AdjustColorspaceBack(uint8 * Y,uint8 * U,uint8 * V,int width,int height)832 void AdjustColorspaceBack(uint8* Y, uint8* U, uint8* V, int width, int height) {
833 int y_width = width;
834 int y_height = height;
835 int y_stride = y_width;
836 int uv_width = ((y_width + 1) >> 1);
837 int uv_height = ((y_height + 1) >> 1);
838 int uv_stride = uv_width;
839 int x, y;
840 /* convert luma */
841 for (y = 0; y < y_height; ++y) {
842 uint8* const Yrow = Y + y * y_stride;
843 for (x = 0; x < y_width; ++x) {
844 /* maps [16..235] to [0..255] */
845 const int v = ((Yrow[x] - 16) * 149 + 64) >> 7;
846 Yrow[x] = (v < 0) ? 0 : (v > 255) ? 255u : v;
847 }
848 }
849 /* convert chroma */
850 for (y = 0; y < uv_height; ++y) {
851 uint8* const Urow = U + y * uv_stride;
852 uint8* const Vrow = V + y * uv_stride;
853 for (x = 0; x < uv_width; ++x) {
854 /* maps [0..255] to [16..240] */
855 const int ru = (((Urow[x] - 128) * 73) >> 6) + 128;
856 const int rv = (((Vrow[x] - 128) * 73) >> 6) + 128;
857 Urow[x] = (ru < 0) ? 0 : (ru > 255) ? 255u : ru;
858 Vrow[x] = (rv < 0) ? 0 : (rv > 255) ? 255u : rv;
859 }
860 }
861 }
862
WebPGetInfo(const uint8 * data,int data_size,int * width,int * height)863 WebPResult WebPGetInfo(const uint8* data,
864 int data_size,
865 int *width,
866 int *height) {
867 const uint32 chunk_size = SkipRiffHeader(&data, &data_size);
868
869 if (width) *width = 0;
870 if (height) *height = 0;
871
872 if (!chunk_size) {
873 return webp_failure; /* unsupported RIFF header */
874 }
875
876 /* Validate raw video data */
877 if (data_size < 10) {
878 return webp_failure; /* not enough data */
879 }
880
881 /* check signature */
882 if (data[3] != 0x9d || data[4] != 0x01 || data[5] != 0x2a) {
883 return webp_failure; /* Wrong signature. */
884 } else {
885 const uint32 bits = data[0] | (data[1] << 8) | (data[2] << 16);
886
887 if ((bits & 1)) { /* Not a keyframe. */
888 return webp_failure;
889 } else {
890 const int profile = (bits >> 1) & 7;
891 const int show_frame = (bits >> 4) & 1;
892 const uint32 partition_length = (bits >> 5);
893
894 if (profile > 3) {
895 return webp_failure; /* unknown profile */
896 }
897 if (!show_frame) {
898 return webp_failure; /* first frame is invisible! */
899 }
900 if (partition_length >= chunk_size) {
901 return webp_failure; /* inconsistent size information. */
902 } else {
903 const int w = ((data[7] << 8) | data[6]) & 0x3fff;
904 const int h = ((data[9] << 8) | data[8]) & 0x3fff;
905 if (width) *width = w;
906 if (height) *height = h;
907
908 return webp_success;
909 }
910 }
911 }
912 return webp_failure;
913 }
914 #endif /* HAVE_LIBVPX */
915