webpimg.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914
  1. /*===========================================================================*
  2. - Copyright 2010 Google Inc.
  3. -
  4. - This code is licensed under the same terms as WebM:
  5. - Software License Agreement: http://www.webmproject.org/license/software/
  6. - Additional IP Rights Grant: http://www.webmproject.org/license/additional/
  7. *===========================================================================*/
  8. /*
  9. * Encoding/Decoding of WebP still image compression format.
  10. *
  11. * 1. WebPDecode: Takes an array of bytes (string) corresponding to the WebP
  12. * encoded image and generates output in the YUV format with
  13. * the color components U, V subsampled to 1/2 resolution along
  14. * each dimension.
  15. *
  16. * 2. YUV420toRGBA: Converts from YUV (with color subsampling) such as produced
  17. * by the WebPDecode routine into 32 bits per pixel RGBA data
  18. * array. This data array can be directly used by the Leptonica
  19. * Pix in-memory image format.
  20. *
  21. * 3. WebPEncode: Takes a Y, U, V data buffers (with color components U and V
  22. * subsampled to 1/2 resolution) and generates the WebP string
  23. *
  24. * 4. RGBAToYUV420: Generates Y, U, V data (with color subsampling) from 32 bits
  25. * per pixel RGBA data buffer. The resulting YUV data can be
  26. * directly fed into the WebPEncode routine.
  27. *
  28. * 5. AdjustColorspace:
  29. *
  30. * 6. AdjustColorspaceBack:
  31. */
  32. #include "gd.h"
  33. #ifdef HAVE_LIBVPX
  34. #include "webpimg.h"
  35. #include <math.h>
  36. #include <stdio.h>
  37. #include <stdlib.h>
  38. #include <string.h>
  39. #include <sys/stat.h>
  40. #include "vpx/vpx_decoder.h"
  41. #include "vpx/vp8dx.h"
  42. #include "vpx/vpx_encoder.h"
  43. #include "vpx/vp8cx.h"
  44. #include "gd.h"
  45. /*---------------------------------------------------------------------*
  46. * color conversions *
  47. *---------------------------------------------------------------------*/
  48. #ifndef inline
  49. # define inline __inline
  50. #endif
  51. static inline int clip(float v, int a, int b) {
  52. return (v > b) ? b : (v < 0) ? 0 : (int)(v);
  53. }
  54. enum {
  55. COLOR_RED = 1,
  56. COLOR_GREEN = 2,
  57. COLOR_BLUE = 3,
  58. ALPHA_CHANNEL = 0
  59. };
  60. /* endian neutral extractions of ARGB from a 32 bit pixel */
  61. static const uint32 RED_SHIFT =
  62. 8 * (sizeof(uint32) - 1 - COLOR_RED); /* 16 */
  63. static const uint32 GREEN_SHIFT =
  64. 8 * (sizeof(uint32) - 1 - COLOR_GREEN); /* 8 */
  65. static const uint32 BLUE_SHIFT =
  66. 8 * (sizeof(uint32) - 1 - COLOR_BLUE); /* 0 */
  67. static const uint32 ALPHA_SHIFT =
  68. 8 * (sizeof(uint32) - 1 - ALPHA_CHANNEL); /* 24 */
  69. static inline int GetRed(const uint32* rgba) {
  70. return gdTrueColorGetRed(*rgba);
  71. }
  72. static inline int GetGreen(const uint32* rgba) {
  73. return gdTrueColorGetGreen(*rgba);
  74. }
  75. static inline int GetBlue(const uint32* rgba) {
  76. return gdTrueColorGetBlue(*rgba);
  77. }
  78. enum { YUV_FRAC = 16 };
  79. static inline int clip_uv(int v) {
  80. v = (v + (257 << (YUV_FRAC + 2 - 1))) >> (YUV_FRAC + 2);
  81. return ((v & ~0xff) == 0) ? v : v < 0 ? 0u : 255u;
  82. }
  83. /* YUV <-----> RGB conversions */
  84. /* The exact naming is Y'CbCr, following the ITU-R BT.601 standard.
  85. * More information at: http://en.wikipedia.org/wiki/YCbCr
  86. */
  87. static inline int GetLumaY(int r, int g, int b) {
  88. const int kRound = (1 << (YUV_FRAC - 1)) + (16 << YUV_FRAC);
  89. // Y = 0.2569 * R + 0.5044 * G + 0.0979 * B + 16
  90. const int luma = 16839 * r + 33059 * g + 6420 * b;
  91. return (luma + kRound) >> YUV_FRAC;
  92. }
  93. static inline int GetLumaYfromPtr(uint32* rgba) {
  94. const int r = GetRed(rgba);
  95. const int g = GetGreen(rgba);
  96. const int b = GetBlue(rgba);
  97. return GetLumaY(r, g, b);
  98. }
  99. static inline int GetChromaU(int r, int g, int b) {
  100. // U = -0.1483 * R - 0.2911 * G + 0.4394 * B + 128
  101. return clip_uv(-9719 * r - 19081 * g + 28800 * b);
  102. }
  103. static inline int GetChromaV(int r, int g, int b) {
  104. // V = 0.4394 * R - 0.3679 * G - 0.0715 * B + 128
  105. return clip_uv(+28800 * r - 24116 * g - 4684 * b);
  106. }
  107. /* Converts YUV to RGB and writes into a 32 bit pixel in endian
  108. * neutral fashion
  109. */
  110. enum { RGB_FRAC = 16, RGB_HALF = (1 << RGB_FRAC) / 2,
  111. RGB_RANGE_MIN = -227, RGB_RANGE_MAX = 256 + 226 };
  112. static int init_done = 0;
  113. static int16_t kVToR[256], kUToB[256];
  114. static int32_t kVToG[256], kUToG[256];
  115. static uint8_t kClip[RGB_RANGE_MAX - RGB_RANGE_MIN];
  116. static void InitTables() {
  117. int i;
  118. for (i = 0; i < 256; ++i) {
  119. kVToR[i] = (89858 * (i - 128) + RGB_HALF) >> RGB_FRAC;
  120. kUToG[i] = -22014 * (i - 128) + RGB_HALF;
  121. kVToG[i] = -45773 * (i - 128);
  122. kUToB[i] = (113618 * (i - 128) + RGB_HALF) >> RGB_FRAC;
  123. }
  124. for (i = RGB_RANGE_MIN; i < RGB_RANGE_MAX; ++i) {
  125. const int j = ((i - 16) * 76283 + RGB_HALF) >> RGB_FRAC;
  126. kClip[i - RGB_RANGE_MIN] = (j < 0) ? 0 : (j > 255) ? 255 : j;
  127. }
  128. init_done = 1;
  129. }
  130. static void ToRGB(int y, int u, int v, uint32* const dst) {
  131. const int r_off = kVToR[v];
  132. const int g_off = (kVToG[v] + kUToG[u]) >> RGB_FRAC;
  133. const int b_off = kUToB[u];
  134. const int r = kClip[y + r_off - RGB_RANGE_MIN];
  135. const int g = kClip[y + g_off - RGB_RANGE_MIN];
  136. const int b = kClip[y + b_off - RGB_RANGE_MIN];
  137. *dst = (r << RED_SHIFT) | (g << GREEN_SHIFT) | (b << BLUE_SHIFT);
  138. }
  139. static inline uint32 get_le32(const uint8* const data) {
  140. return data[0] | (data[1] << 8) | (data[2] << 16) | (data[3] << 24);
  141. }
  142. /* Returns the difference (in dB) between two images represented in YUV format
  143. *
  144. * Input:
  145. * Y1/U1/V1: The Y/U/V data of the first image
  146. * Y2/U2/V2: The Y/U/V data of the second image
  147. *
  148. * Returns the PSNR (http://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio)
  149. * value computed between the two images
  150. */
  151. double GetPSNRYuv(const uint8* Y1,
  152. const uint8* U1,
  153. const uint8* V1,
  154. const uint8* Y2,
  155. const uint8* U2,
  156. const uint8* V2,
  157. int y_width,
  158. int y_height) {
  159. int x, y, row_idx;
  160. const int uv_width = ((y_width + 1) >> 1);
  161. const int uv_height = ((y_height + 1) >> 1);
  162. double sse = 0., count = 0.;
  163. for (y = 0; y < y_height; ++y) {
  164. count += y_width;
  165. row_idx = y * y_width;
  166. for (x = 0; x < y_width; ++x) {
  167. double diff = Y1[row_idx + x] - Y2[row_idx + x];
  168. sse += diff * diff;
  169. }
  170. }
  171. for (y = 0; y < uv_height; ++y) {
  172. count += 2 * uv_width;
  173. row_idx = y * uv_width;
  174. for (x = 0; x < uv_width; ++x) {
  175. const double diff_U = U1[row_idx + x] - U2[row_idx + x];
  176. const double diff_V = V1[row_idx + x] - V2[row_idx + x];
  177. sse += diff_U * diff_U + diff_V * diff_V;
  178. }
  179. }
  180. return -4.3429448 * log(sse / (255. * 255. * count));
  181. }
  182. /* Returns the difference (in dB) between two images. One represented
  183. * using Y,U,V vectors and the other is webp image data.
  184. * Input:
  185. * Y1/U1/V1: The Y/U/V data of the first image
  186. * imgdata: data buffer containing webp image
  187. * imgdata_size: size of the imgdata buffer
  188. *
  189. * Returns the PSNR value computed between the two images
  190. */
  191. double WebPGetPSNR(const uint8* Y1,
  192. const uint8* U1,
  193. const uint8* V1,
  194. uint8* imgdata,
  195. int imgdata_size) {
  196. uint8* Y2 = NULL;
  197. uint8* U2 = NULL;
  198. uint8* V2 = NULL;
  199. int w = 0, h = 0;
  200. double psnr = 0;
  201. WebPDecode(imgdata,
  202. imgdata_size,
  203. &Y2,
  204. &U2,
  205. &V2,
  206. &w,
  207. &h);
  208. psnr = GetPSNRYuv(Y1, U1, V1, Y2, U2, V2, w, h);
  209. free(Y2);
  210. return psnr;
  211. }
  212. /*---------------------------------------------------------------------*
  213. * Reading WebP *
  214. *---------------------------------------------------------------------*/
  215. /* RIFF layout is:
  216. * 0ffset tag
  217. * 0...3 "RIFF" 4-byte tag
  218. * 4...7 size of image data (including metadata) starting at offset 8
  219. * 8...11 "WEBP" our form-type signature
  220. * 12..15 "VP8 " 4-byte tags, describing the raw video format used
  221. * 16..19 size of the raw WebP image data, starting at offset 20
  222. * 20.... the WebP bytes
  223. * There can be extra chunks after the "VP8 " chunk (ICMT, ICOP, ...)
  224. * All 32-bits sizes are in little-endian order.
  225. * Note: chunk data must be padded to multiple of 2 in size
  226. */
  227. int SkipRiffHeader(const uint8** data_ptr, int *data_size_ptr) {
  228. /* 20 bytes RIFF header 10 bytes VP8 header */
  229. const int kHeaderSize = (20 + 10);
  230. uint32 chunk_size = 0xffffffffu;
  231. if (*data_size_ptr >= kHeaderSize && !memcmp(*data_ptr, "RIFF", 4)) {
  232. if (memcmp(*data_ptr + 8, "WEBP", 4)) {
  233. return 0; /* wrong image file signature */
  234. } else {
  235. const uint32 riff_size = get_le32(*data_ptr + 4);
  236. if (memcmp(*data_ptr + 12, "VP8 ", 4)) {
  237. return 0; /* invalid compression format */
  238. }
  239. chunk_size = get_le32(*data_ptr + 16);
  240. if ((chunk_size > riff_size + 8) || (chunk_size & 1)) {
  241. return 0; /* inconsistent size information. */
  242. }
  243. /* We have a RIFF container. Skip it. */
  244. *data_ptr += 20;
  245. *data_size_ptr -= 20;
  246. }
  247. }
  248. return chunk_size;
  249. }
  250. /* Generate RGBA row from an YUV row (with width upsampling of chrome data)
  251. * Input:
  252. * 1, 2, 3. y_src, u_src, v_src - Pointers to input Y, U, V row data
  253. * respectively. We reuse these variables, they iterate over all pixels in
  254. * the row.
  255. * 4. y_width: width of the Y image plane (aka image width)
  256. * Output:
  257. * 5. rgb_sat: pointer to the output rgb row. We reuse this variable, it
  258. * iterates over all pixels in the row.
  259. */
  260. static void YUV420toRGBLine(uint8* y_src,
  261. uint8* u_src,
  262. uint8* v_src,
  263. int y_width,
  264. uint32* rgb_dst) {
  265. int x;
  266. for (x = 0; x < (y_width >> 1); ++x) {
  267. const int U = u_src[0];
  268. const int V = v_src[0];
  269. ToRGB(y_src[0], U, V, rgb_dst);
  270. ToRGB(y_src[1], U, V, rgb_dst + 1);
  271. ++u_src;
  272. ++v_src;
  273. y_src += 2;
  274. rgb_dst += 2;
  275. }
  276. if (y_width & 1) { /* Rightmost pixel */
  277. ToRGB(y_src[0], (*u_src), (*v_src), rgb_dst);
  278. }
  279. }
  280. /* Converts from YUV (with color subsampling) such as produced by the WebPDecode
  281. * routine into 32 bits per pixel RGBA data array. This data array can be
  282. * directly used by the Leptonica Pix in-memory image format.
  283. * Input:
  284. * 1, 2, 3. Y, U, V: the input data buffers
  285. * 4. pixwpl: the desired words per line corresponding to the supplied
  286. * output pixdata.
  287. * 5. width, height: the dimensions of the image whose data resides in Y,
  288. * U, V.
  289. * Output:
  290. * 6. pixdata: the output data buffer. Caller should allocate
  291. * height * pixwpl bytes of memory before calling this routine.
  292. */
  293. void YUV420toRGBA(uint8* Y,
  294. uint8* U,
  295. uint8* V,
  296. int words_per_line,
  297. int width,
  298. int height,
  299. uint32* pixdata) {
  300. int y_width = width;
  301. int y_stride = y_width;
  302. int uv_width = ((y_width + 1) >> 1);
  303. int uv_stride = uv_width;
  304. int y;
  305. if (!init_done)
  306. InitTables();
  307. /* note that the U, V upsampling in height is happening here as the U, V
  308. * buffers sent to successive odd-even pair of lines is same.
  309. */
  310. for (y = 0; y < height; ++y) {
  311. YUV420toRGBLine(Y + y * y_stride,
  312. U + (y >> 1) * uv_stride,
  313. V + (y >> 1) * uv_stride,
  314. width,
  315. pixdata + y * words_per_line);
  316. }
  317. }
  318. void gd_YUV420toRGBA(uint8* Y,
  319. uint8* U,
  320. uint8* V,
  321. gdImagePtr im) {
  322. int width = im->sx;
  323. int height = im->sy;
  324. int y_width = width;
  325. int y_stride = y_width;
  326. int uv_width = ((y_width + 1) >> 1);
  327. int uv_stride = uv_width;
  328. int y;
  329. /* output im must be truecolor */
  330. if (!im->trueColor) {
  331. return;
  332. }
  333. if (!init_done)
  334. InitTables();
  335. /* note that the U, V upsampling in height is happening here as the U, V
  336. * buffers sent to successive odd-even pair of lines is same.
  337. */
  338. for (y = 0; y < height; ++y) {
  339. YUV420toRGBLine(Y + y * y_stride,
  340. U + (y >> 1) * uv_stride,
  341. V + (y >> 1) * uv_stride,
  342. width,
  343. im->tpixels[y]);
  344. }
  345. }
  346. static WebPResult VPXDecode(const uint8* data,
  347. int data_size,
  348. uint8** p_Y,
  349. uint8** p_U,
  350. uint8** p_V,
  351. int* p_width,
  352. int* p_height) {
  353. vpx_codec_ctx_t dec;
  354. vp8_postproc_cfg_t ppcfg;
  355. WebPResult result = webp_failure;
  356. if (!data || data_size <= 10 || !p_Y || !p_U || !p_V
  357. || *p_Y != NULL || *p_U != NULL || *p_V != NULL) {
  358. return webp_failure;
  359. }
  360. if (vpx_codec_dec_init(&dec,
  361. &vpx_codec_vp8_dx_algo, NULL, 0) != VPX_CODEC_OK) {
  362. return webp_failure;
  363. }
  364. ppcfg.post_proc_flag = VP8_NOFILTERING;
  365. vpx_codec_control(&dec, VP8_SET_POSTPROC, &ppcfg);
  366. if (vpx_codec_decode(&dec, data, data_size, NULL, 0) == VPX_CODEC_OK) {
  367. vpx_codec_iter_t iter = NULL;
  368. vpx_image_t* const img = vpx_codec_get_frame(&dec, &iter);
  369. if (img) {
  370. int y_width = img->d_w;
  371. int y_height = img->d_h;
  372. int y_stride = y_width;
  373. int uv_width = (y_width + 1) >> 1;
  374. int uv_stride = uv_width;
  375. int uv_height = ((y_height + 1) >> 1);
  376. int y;
  377. *p_width = y_width;
  378. *p_height = y_height;
  379. if ((*p_Y = (uint8 *)(calloc(y_stride * y_height
  380. + 2 * uv_stride * uv_height,
  381. sizeof(uint8)))) != NULL) {
  382. *p_U = *p_Y + y_height * y_stride;
  383. *p_V = *p_U + uv_height * uv_stride;
  384. for (y = 0; y < y_height; ++y) {
  385. memcpy(*p_Y + y * y_stride,
  386. img->planes[0] + y * img->stride[0],
  387. y_width);
  388. }
  389. for (y = 0; y < uv_height; ++y) {
  390. memcpy(*p_U + y * uv_stride,
  391. img->planes[1] + y * img->stride[1],
  392. uv_width);
  393. memcpy(*p_V + y * uv_stride,
  394. img->planes[2] + y * img->stride[2],
  395. uv_width);
  396. }
  397. result = webp_success;
  398. }
  399. }
  400. }
  401. vpx_codec_destroy(&dec);
  402. return result;
  403. }
  404. WebPResult WebPDecode(const uint8* data,
  405. int data_size,
  406. uint8** p_Y,
  407. uint8** p_U,
  408. uint8** p_V,
  409. int* p_width,
  410. int* p_height) {
  411. const uint32 chunk_size = SkipRiffHeader(&data, &data_size);
  412. if (!chunk_size) {
  413. return webp_failure; /* unsupported RIFF header */
  414. }
  415. return VPXDecode(data, data_size, p_Y, p_U, p_V, p_width, p_height);
  416. }
  417. /*---------------------------------------------------------------------*
  418. * Writing WebP *
  419. *---------------------------------------------------------------------*/
  420. /* Takes a pair of RGBA row data as input and generates 2 rows of Y data and one
  421. * row of subsampled U, V data as output
  422. * Input:
  423. * 1, 2. rgb_line1, rgb_line2 - input rgba rows
  424. * 3. width - image width
  425. * Outout:
  426. * 4, 5, 6: Output Y, U, V row
  427. */
  428. static void RGBALinepairToYUV420(uint32* rgb_line1,
  429. uint32* rgb_line2,
  430. int width,
  431. uint8* Y_dst1,
  432. uint8* Y_dst2,
  433. uint8* u_dst,
  434. uint8* v_dst) {
  435. int x;
  436. for (x = (width >> 1); x > 0; --x) {
  437. const int sum_r =
  438. GetRed(rgb_line1 + 0) + GetRed(rgb_line1 + 1) +
  439. GetRed(rgb_line2 + 0) + GetRed(rgb_line2 + 1);
  440. const int sum_g =
  441. GetGreen(rgb_line1 + 0) + GetGreen(rgb_line1 + 1) +
  442. GetGreen(rgb_line2 + 0) + GetGreen(rgb_line2 + 1);
  443. const int sum_b =
  444. GetBlue(rgb_line1 + 0) + GetBlue(rgb_line1 + 1) +
  445. GetBlue(rgb_line2 + 0) + GetBlue(rgb_line2 + 1);
  446. Y_dst1[0] = GetLumaYfromPtr(rgb_line1 + 0);
  447. Y_dst1[1] = GetLumaYfromPtr(rgb_line1 + 1);
  448. Y_dst2[0] = GetLumaYfromPtr(rgb_line2 + 0);
  449. Y_dst2[1] = GetLumaYfromPtr(rgb_line2 + 1);
  450. *u_dst++ = GetChromaU(sum_r, sum_g, sum_b);
  451. *v_dst++ = GetChromaV(sum_r, sum_g, sum_b);
  452. rgb_line1 += 2;
  453. rgb_line2 += 2;
  454. Y_dst1 += 2;
  455. Y_dst2 += 2;
  456. }
  457. if (width & 1) { /* rightmost pixel. */
  458. const int sum_r = GetRed(rgb_line1) + GetRed(rgb_line2);
  459. const int sum_g = GetGreen(rgb_line1) + GetGreen(rgb_line2);
  460. const int sum_b = GetBlue(rgb_line1) + GetBlue(rgb_line2);
  461. Y_dst1[0] = GetLumaYfromPtr(rgb_line1);
  462. Y_dst2[0] = GetLumaYfromPtr(rgb_line2);
  463. *u_dst = GetChromaU(2 * sum_r, 2 * sum_g, 2 * sum_b);
  464. *v_dst = GetChromaV(2 * sum_r, 2 * sum_g, 2 * sum_b);
  465. }
  466. }
  467. /* Generates Y, U, V data (with color subsampling) from 32 bits
  468. * per pixel RGBA data buffer. The resulting YUV data can be directly fed into
  469. * the WebPEncode routine.
  470. * Input:
  471. * 1. pixdatainput rgba data buffer
  472. * 2. words per line corresponding to pixdata
  473. * 3, 4. image width and height respectively
  474. * Output:
  475. * 5, 6, 7. Output YUV data buffers
  476. */
  477. void gd_RGBAToYUV420(gdImagePtr im2,
  478. uint8* Y,
  479. uint8* U,
  480. uint8* V) {
  481. int y_width = im2->sx;
  482. int y_height = im2->sy;
  483. int y_stride = y_width;
  484. int uv_width = ((y_width + 1) >> 1);
  485. int uv_stride = uv_width;
  486. int y;
  487. gdImagePtr im = NULL;
  488. int free_im = 0;
  489. if (!im2->trueColor) {
  490. /* Todo: Replace the color/YUV functions with our own and simplify
  491. that should boost the conversion a bit as well, not only for
  492. palette image. */
  493. im = gdImageCreateTrueColor(im2->sx, im2->sy);
  494. if (!im) {
  495. php_gd_error("gd-webp error: cannot convert palette input to truecolor");
  496. return;
  497. }
  498. gdImageCopy(im, im2, 0, 0, 0, 0, im->sx, im->sy);
  499. free_im = 1;
  500. } else {
  501. im = im2;
  502. }
  503. for (y = 0; y < (y_height >> 1); ++y) {
  504. RGBALinepairToYUV420(im->tpixels[2 * y],
  505. im->tpixels[2 * y + 1],
  506. y_width,
  507. Y + 2 * y * y_stride,
  508. Y + (2 * y + 1) * y_stride,
  509. U + y * uv_stride,
  510. V + y * uv_stride);
  511. }
  512. if (y_height & 1) {
  513. RGBALinepairToYUV420(im->tpixels[y_height - 1],
  514. im->tpixels[y_height - 1],
  515. y_width,
  516. Y + (y_height - 1) * y_stride,
  517. Y + (y_height - 1) * y_stride,
  518. U + (y_height >> 1) * uv_stride,
  519. V + (y_height >> 1) * uv_stride);
  520. }
  521. if (free_im) {
  522. gdImageDestroy(im);
  523. }
  524. }
  525. /* Generates Y, U, V data (with color subsampling) from 32 bits
  526. * per pixel RGBA data buffer. The resulting YUV data can be directly fed into
  527. * the WebPEncode routine.
  528. * Input:
  529. * 1. pixdatainput rgba data buffer
  530. * 2. words per line corresponding to pixdata
  531. * 3, 4. image width and height respectively
  532. * Output:
  533. * 5, 6, 7. Output YUV data buffers
  534. */
  535. void RGBAToYUV420(uint32* pixdata,
  536. int words_per_line,
  537. int width,
  538. int height,
  539. uint8* Y,
  540. uint8* U,
  541. uint8* V) {
  542. int y_width = width;
  543. int y_height = height;
  544. int y_stride = y_width;
  545. int uv_width = ((y_width + 1) >> 1);
  546. int uv_stride = uv_width;
  547. int y;
  548. for (y = 0; y < (y_height >> 1); ++y) {
  549. RGBALinepairToYUV420(pixdata + 2 * y * words_per_line,
  550. pixdata + (2 * y + 1) * words_per_line,
  551. y_width,
  552. Y + 2 * y * y_stride,
  553. Y + (2 * y + 1) * y_stride,
  554. U + y * uv_stride,
  555. V + y * uv_stride);
  556. }
  557. if (y_height & 1) {
  558. RGBALinepairToYUV420(pixdata + (y_height - 1) * words_per_line,
  559. pixdata + (y_height - 1) * words_per_line,
  560. y_width,
  561. Y + (y_height - 1) * y_stride,
  562. Y + (y_height - 1) * y_stride,
  563. U + (y_height >> 1) * uv_stride,
  564. V + (y_height >> 1) * uv_stride);
  565. }
  566. }
  567. static int codec_ctl(vpx_codec_ctx_t *enc,
  568. enum vp8e_enc_control_id id,
  569. int value) {
  570. const vpx_codec_err_t res = vpx_codec_control_(enc, id, value);
  571. if (res != VPX_CODEC_OK) {
  572. return webp_failure;
  573. }
  574. return webp_success;
  575. }
  576. static void SetupParams(vpx_codec_enc_cfg_t* cfg,
  577. int QP) {
  578. cfg->g_threads = 2;
  579. cfg->rc_min_quantizer = QP;
  580. cfg->rc_max_quantizer = QP;
  581. cfg->kf_mode = VPX_KF_FIXED;
  582. }
  583. /* VPXEncode: Takes a Y, U, V data buffers (with color components U and V
  584. * subsampled to 1/2 resolution) and generates the VPX string.
  585. * Output VPX string is placed in the *p_out buffer. container_size
  586. * indicates number of bytes to be left blank at the beginning of
  587. * *p_out buffer to accommodate for a container header.
  588. *
  589. * Return: success/failure
  590. */
  591. static WebPResult VPXEncode(const uint8* Y,
  592. const uint8* U,
  593. const uint8* V,
  594. int y_width,
  595. int y_height,
  596. int y_stride,
  597. int uv_width,
  598. int uv_height,
  599. int uv_stride,
  600. int QP,
  601. int container_size,
  602. unsigned char** p_out,
  603. int* p_out_size_bytes) {
  604. vpx_codec_iface_t* iface = &vpx_codec_vp8_cx_algo;
  605. vpx_codec_err_t res;
  606. vpx_codec_enc_cfg_t cfg;
  607. vpx_codec_ctx_t enc;
  608. WebPResult result = webp_failure;
  609. vpx_image_t img;
  610. *p_out = NULL;
  611. *p_out_size_bytes = 0;
  612. /* validate input parameters. */
  613. if (!p_out || !Y || !U || !V
  614. || y_width <= 0 || y_height <= 0 || uv_width <= 0 || uv_height <= 0
  615. || y_stride < y_width || uv_stride < uv_width
  616. || QP < 0 || QP > 63) {
  617. return webp_failure;
  618. }
  619. res = vpx_codec_enc_config_default(iface, &cfg, 0);
  620. if (res != VPX_CODEC_OK) {
  621. return webp_failure;
  622. }
  623. SetupParams(&cfg, QP);
  624. cfg.g_w = y_width;
  625. cfg.g_h = y_height;
  626. res = vpx_codec_enc_init(&enc, iface, &cfg, 0);
  627. if (res == VPX_CODEC_OK) {
  628. codec_ctl(&enc, VP8E_SET_CPUUSED, 3);
  629. codec_ctl(&enc, VP8E_SET_NOISE_SENSITIVITY, 0);
  630. codec_ctl(&enc, VP8E_SET_SHARPNESS, 0);
  631. codec_ctl(&enc, VP8E_SET_ENABLEAUTOALTREF, 0);
  632. codec_ctl(&enc, VP8E_SET_ARNR_MAXFRAMES, 0);
  633. codec_ctl(&enc, VP8E_SET_ARNR_TYPE, 0);
  634. codec_ctl(&enc, VP8E_SET_ARNR_STRENGTH, 0);
  635. codec_ctl(&enc, VP8E_SET_STATIC_THRESHOLD, 0);
  636. codec_ctl(&enc, VP8E_SET_TOKEN_PARTITIONS, 2);
  637. vpx_img_wrap(&img, VPX_IMG_FMT_I420,
  638. y_width, y_height, 16, (uint8*)(Y));
  639. img.planes[VPX_PLANE_Y] = (uint8*)(Y);
  640. img.planes[VPX_PLANE_U] = (uint8*)(U);
  641. img.planes[VPX_PLANE_V] = (uint8*)(V);
  642. img.stride[VPX_PLANE_Y] = y_stride;
  643. img.stride[VPX_PLANE_U] = uv_stride;
  644. img.stride[VPX_PLANE_V] = uv_stride;
  645. res = vpx_codec_encode(&enc, &img, 0, 1, 0, VPX_DL_BEST_QUALITY);
  646. if (res == VPX_CODEC_OK) {
  647. vpx_codec_iter_t iter = NULL;
  648. const vpx_codec_cx_pkt_t* pkt = vpx_codec_get_cx_data(&enc, &iter);
  649. if (pkt != NULL) {
  650. *p_out = (unsigned char*)(calloc(container_size + pkt->data.frame.sz,
  651. 1));
  652. memcpy(*p_out + container_size,
  653. (const void*)(pkt->data.frame.buf),
  654. pkt->data.frame.sz);
  655. *p_out_size_bytes = container_size + pkt->data.frame.sz;
  656. result = webp_success;
  657. }
  658. }
  659. }
  660. vpx_codec_destroy(&enc);
  661. return result;
  662. }
  663. WebPResult WebPEncode(const uint8* Y,
  664. const uint8* U,
  665. const uint8* V,
  666. int y_width,
  667. int y_height,
  668. int y_stride,
  669. int uv_width,
  670. int uv_height,
  671. int uv_stride,
  672. int QP,
  673. unsigned char** p_out,
  674. int* p_out_size_bytes,
  675. double *psnr) {
  676. const int kRiffHeaderSize = 20;
  677. if (VPXEncode(Y, U, V,
  678. y_width, y_height, y_stride,
  679. uv_width, uv_height, uv_stride,
  680. QP, kRiffHeaderSize,
  681. p_out, p_out_size_bytes) != webp_success) {
  682. return webp_failure;
  683. } else {
  684. /* Write RIFF header */
  685. const int img_size_bytes = *p_out_size_bytes - kRiffHeaderSize;
  686. const int chunk_size = (img_size_bytes + 1) & ~1; /* make size even */
  687. const int riff_size = chunk_size + 12;
  688. const uint8_t kRiffHeader[20] = { 'R', 'I', 'F', 'F',
  689. (riff_size >> 0) & 255,
  690. (riff_size >> 8) & 255,
  691. (riff_size >> 16) & 255,
  692. (riff_size >> 24) & 255,
  693. 'W', 'E', 'B', 'P',
  694. 'V', 'P', '8', ' ',
  695. (chunk_size >> 0) & 255,
  696. (chunk_size >> 8) & 255,
  697. (chunk_size >> 16) & 255,
  698. (chunk_size >> 24) & 255 };
  699. memcpy(*p_out, kRiffHeader, kRiffHeaderSize);
  700. if (img_size_bytes & 1) { /* write a padding byte */
  701. const int new_size = *p_out_size_bytes + 1;
  702. unsigned char* p = (unsigned char*)realloc(*p_out, new_size);
  703. if (p == NULL) {
  704. free(*p_out);
  705. *p_out = NULL;
  706. *p_out_size_bytes = 0;
  707. return webp_failure;
  708. }
  709. p[new_size - 1] = 0;
  710. *p_out = p;
  711. *p_out_size_bytes = new_size;
  712. }
  713. if (psnr) {
  714. *psnr = WebPGetPSNR(Y, U, V, *p_out, *p_out_size_bytes);
  715. }
  716. return webp_success;
  717. }
  718. }
  719. void AdjustColorspace(uint8* Y, uint8* U, uint8* V, int width, int height) {
  720. int y_width = width;
  721. int y_height = height;
  722. int y_stride = y_width;
  723. int uv_width = ((y_width + 1) >> 1);
  724. int uv_height = ((y_height + 1) >> 1);
  725. int uv_stride = uv_width;
  726. int x, y;
  727. /* convert luma */
  728. for (y = 0; y < y_height; ++y) {
  729. uint8* const Yrow = Y + y * y_stride;
  730. for (x = 0; x < y_width; ++x) {
  731. /* maps [0..255] to [16..235] */
  732. Yrow[x] = ((Yrow[x] * 55 + 32) >> 6) + 16;
  733. }
  734. }
  735. /* convert chroma */
  736. for (y = 0; y < uv_height; ++y) {
  737. uint8* const Urow = U + y * uv_stride;
  738. uint8* const Vrow = V + y * uv_stride;
  739. for (x = 0; x < uv_width; ++x) {
  740. /* maps [0..255] to [16..240] */
  741. Urow[x] = (((Urow[x] - 127) * 7) >> 3) + 128;
  742. Vrow[x] = (((Vrow[x] - 127) * 7) >> 3) + 128;
  743. }
  744. }
  745. }
  746. void AdjustColorspaceBack(uint8* Y, uint8* U, uint8* V, int width, int height) {
  747. int y_width = width;
  748. int y_height = height;
  749. int y_stride = y_width;
  750. int uv_width = ((y_width + 1) >> 1);
  751. int uv_height = ((y_height + 1) >> 1);
  752. int uv_stride = uv_width;
  753. int x, y;
  754. /* convert luma */
  755. for (y = 0; y < y_height; ++y) {
  756. uint8* const Yrow = Y + y * y_stride;
  757. for (x = 0; x < y_width; ++x) {
  758. /* maps [16..235] to [0..255] */
  759. const int v = ((Yrow[x] - 16) * 149 + 64) >> 7;
  760. Yrow[x] = (v < 0) ? 0 : (v > 255) ? 255u : v;
  761. }
  762. }
  763. /* convert chroma */
  764. for (y = 0; y < uv_height; ++y) {
  765. uint8* const Urow = U + y * uv_stride;
  766. uint8* const Vrow = V + y * uv_stride;
  767. for (x = 0; x < uv_width; ++x) {
  768. /* maps [0..255] to [16..240] */
  769. const int ru = (((Urow[x] - 128) * 73) >> 6) + 128;
  770. const int rv = (((Vrow[x] - 128) * 73) >> 6) + 128;
  771. Urow[x] = (ru < 0) ? 0 : (ru > 255) ? 255u : ru;
  772. Vrow[x] = (rv < 0) ? 0 : (rv > 255) ? 255u : rv;
  773. }
  774. }
  775. }
  776. WebPResult WebPGetInfo(const uint8* data,
  777. int data_size,
  778. int *width,
  779. int *height) {
  780. const uint32 chunk_size = SkipRiffHeader(&data, &data_size);
  781. if (width) *width = 0;
  782. if (height) *height = 0;
  783. if (!chunk_size) {
  784. return webp_failure; /* unsupported RIFF header */
  785. }
  786. /* Validate raw video data */
  787. if (data_size < 10) {
  788. return webp_failure; /* not enough data */
  789. }
  790. /* check signature */
  791. if (data[3] != 0x9d || data[4] != 0x01 || data[5] != 0x2a) {
  792. return webp_failure; /* Wrong signature. */
  793. } else {
  794. const uint32 bits = data[0] | (data[1] << 8) | (data[2] << 16);
  795. if ((bits & 1)) { /* Not a keyframe. */
  796. return webp_failure;
  797. } else {
  798. const int profile = (bits >> 1) & 7;
  799. const int show_frame = (bits >> 4) & 1;
  800. const uint32 partition_length = (bits >> 5);
  801. if (profile > 3) {
  802. return webp_failure; /* unknown profile */
  803. }
  804. if (!show_frame) {
  805. return webp_failure; /* first frame is invisible! */
  806. }
  807. if (partition_length >= chunk_size) {
  808. return webp_failure; /* inconsistent size information. */
  809. } else {
  810. const int w = ((data[7] << 8) | data[6]) & 0x3fff;
  811. const int h = ((data[9] << 8) | data[8]) & 0x3fff;
  812. if (width) *width = w;
  813. if (height) *height = h;
  814. return webp_success;
  815. }
  816. }
  817. }
  818. return webp_failure;
  819. }
  820. #endif /* HAVE_LIBVPX */