2 Routines for handling h.263 video tags
4 Part of the swftools package.
6 Copyright (c) 2003 Matthias Kramm <kramm@quiss.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
26 #include "../rfxswf.h"
27 #include "h263tables.h"
31 - use prepare* / write* in encode_IFrame_block
32 - check whether mvd steps of 2 lead to (much) smaller results
38 void swf_SetVideoStreamDefine(TAG*tag, VIDEOSTREAM*stream, U16 frames, U16 width, U16 height)
43 memset(stream, 0, sizeof(VIDEOSTREAM));
44 stream->olinex = width;
45 stream->owidth = width;
46 stream->oheight = height;
48 height+=15;height&=~15;
49 stream->linex = width;
50 stream->width = width;
51 stream->height = height;
52 stream->bbx = width/16;
53 stream->bby = height/16;
54 stream->current = (YUV*)malloc(width*height*sizeof(YUV));
55 stream->oldpic = (YUV*)malloc(width*height*sizeof(YUV));
56 stream->mvdx = (int*)malloc(stream->bbx*stream->bby*sizeof(int));
57 stream->mvdy = (int*)malloc(stream->bbx*stream->bby*sizeof(int));
58 stream->do_motion = 0;
60 memset(stream->oldpic, 0, width*height*sizeof(YUV));
61 memset(stream->current, 0, width*height*sizeof(YUV));
63 assert((stream->width&15) == 0);
64 assert((stream->height&15) == 0);
65 assert((stream->bbx*16) == stream->width);
66 assert((stream->bby*16) == stream->height);
68 swf_SetU16(tag, frames);
69 swf_SetU16(tag, width);
70 swf_SetU16(tag, height);
71 //swf_SetU8(tag, 1); /* smoothing on */
72 swf_SetU8(tag, 0); /* smoothing off */
73 swf_SetU8(tag, 2); /* codec = h.263 sorenson spark */
76 void swf_VideoStreamClear(VIDEOSTREAM*stream)
78 free(stream->oldpic);stream->oldpic = 0;
79 free(stream->current);stream->current = 0;
80 free(stream->mvdx);stream->mvdx=0;
81 free(stream->mvdy);stream->mvdy=0;
84 typedef struct _block_t
94 static inline int truncate256(int a)
101 static void getregion(block_t* bb, YUV*pic, int posx, int posy, int linex)
109 p1 = &pic[posy*linex+posx];
113 bb->u[i] = (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
114 bb->v[i] = (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
116 bb->y2[i] = p1[x+8].y;
117 bb->y3[i] = p1[linex*8+x].y;
118 bb->y4[i] = p1[linex*8+x+8].y;
126 /* This function is pretty complex. Let's hope it works correctly */
127 static void getmvdregion(block_t* bb, YUV*pic, int posx, int posy, int mvdx, int mvdy, int linex)
135 posx = posx*16 + ((mvdx&~1)/2); //works also for negative mvdx (unlike mvdx/2)
136 posy = posy*16 + ((mvdy&~1)/2);
137 p1 = &pic[posy*linex+posx];
138 p2 = &pic[(posy&~1)*linex+(posx&~1)];
139 uvhp = ((mvdx&1)|((mvdx>>1)&1))|((mvdy&2)|((mvdy&1)<<1));
140 yhp = ((mvdy&1)<<1|(mvdx&1));
143 if(yhp==0 || yhp==2) {
146 bb->y1[yy] = p1[x].y;
147 bb->y2[yy] = p1[x+8].y;
148 bb->y3[yy] = p1[linex*8+x].y;
149 bb->y4[yy] = p1[linex*8+x+8].y;
157 bb->y1[yy] += p1[x].y; bb->y1[yy] /= 2;
158 bb->y2[yy] += p1[x+8].y; bb->y2[yy] /= 2;
159 bb->y3[yy] += p1[linex*8+x].y; bb->y3[yy] /= 2;
160 bb->y4[yy] += p1[linex*8+x+8].y; bb->y4[yy] /= 2;
165 } else if(yhp==1 || yhp==3) {
168 bb->y1[yy] = (p1[x].y + p1[x+1].y);
169 bb->y2[yy] = (p1[x+8].y + p1[x+8+1].y);
170 bb->y3[yy] = (p1[linex*8+x].y + p1[linex*8+x+1].y);
171 bb->y4[yy] = (p1[linex*8+x+8].y + p1[linex*8+x+8+1].y);
178 bb->y1[yy] += (p1[x].y + p1[x+1].y); bb->y1[yy]/=4;
179 bb->y2[yy] += (p1[x+8].y + p1[x+8+1].y); bb->y2[yy]/=4;
180 bb->y3[yy] += (p1[linex*8+x].y + p1[linex*8+x+1].y); bb->y3[yy]/=4;
181 bb->y4[yy] += (p1[linex*8+x+8].y + p1[linex*8+x+8+1].y); bb->y4[yy]/=4;
186 bb->y1[yy]/=2; bb->y2[yy]/=2; bb->y3[yy]/=2; bb->y4[yy]/=2;
194 if(uvhp==0 || uvhp==2) {
197 bb->u[uv] = (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
198 bb->v[uv] = (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
205 bb->u[uv] += (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
206 bb->v[uv] += (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
213 } else /* uvhp==1 || uvhp==3 */ {
216 bb->u[uv] = ((p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4+
217 (p2[x*2+2].u + p2[x*2+1+2].u + p2[linex+x*2+2].u + p2[linex+x*2+1+2].u)/4);
218 bb->v[uv] = ((p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4+
219 (p2[x*2+2].v + p2[x*2+1+2].v + p2[linex+x*2+2].v + p2[linex+x*2+1+2].v)/4);
226 bb->u[uv] += ((p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4+
227 (p2[x*2+2].u + p2[x*2+1+2].u + p2[linex+x*2+2].u + p2[linex+x*2+1+2].u)/4);
228 bb->v[uv] += ((p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4+
229 (p2[x*2+2].v + p2[x*2+1+2].v + p2[linex+x*2+2].v + p2[linex+x*2+1+2].v)/4);
245 static void rgb2yuv(YUV*dest, RGBA*src, int dlinex, int slinex, int width, int height)
248 for(y=0;y<height;y++) {
249 for(x=0;x<width;x++) {
251 r = src[y*slinex+x].r;
252 g = src[y*slinex+x].g;
253 b = src[y*slinex+x].b;
254 /*dest[y*dlinex+x].y = (r*0.299 + g*0.587 + b*0.114);
255 dest[y*dlinex+x].u = (r*-0.169 + g*-0.332 + b*0.500 + 128.0);
256 dest[y*dlinex+x].v = (r*0.500 + g*-0.419 + b*-0.0813 + 128.0);*/
258 //dest[y*dlinex+x].y = 128;//(r*((int)( 0.299*256)) + g*((int)( 0.587*256)) + b*((int)( 0.114 *256)))>>8;
260 dest[y*dlinex+x].y = (r*((int)( 0.299*256)) + g*((int)( 0.587*256)) + b*((int)( 0.114 *256)))>>8;
261 dest[y*dlinex+x].u = (r*((int)(-0.169*256)) + g*((int)(-0.332*256)) + b*((int)( 0.500 *256))+ 128*256)>>8;
262 dest[y*dlinex+x].v = (r*((int)( 0.500*256)) + g*((int)(-0.419*256)) + b*((int)(-0.0813*256))+ 128*256)>>8;
267 static void copyregion(VIDEOSTREAM*s, YUV*dest, YUV*src, int bx, int by)
269 YUV*p1 = &dest[by*s->linex*16+bx*16];
270 YUV*p2 = &src[by*s->linex*16+bx*16];
273 memcpy(p1, p2, 16*sizeof(YUV));
274 p1+=s->linex;p2+=s->linex;
278 static void yuv2rgb(RGBA*dest, YUV*src, int linex, int width, int height)
281 for(y=0;y<height;y++) {
282 for(x=0;x<width;x++) {
284 u = src[y*linex+x].u;
285 v = src[y*linex+x].v;
286 yy = src[y*linex+x].y;
287 dest[y*linex+x].r = truncate256(yy + ((360*(v-128))>>8));
288 dest[y*linex+x].g = truncate256(yy - ((88*(u-128)+183*(v-128))>>8));
289 dest[y*linex+x].b = truncate256(yy + ((455 * (u-128))>>8));
293 static void copy_block_pic(VIDEOSTREAM*s, YUV*dest, block_t*b, int bx, int by)
295 YUV*p1 = &dest[(by*16)*s->linex+bx*16];
296 YUV*p2 = &dest[(by*16+8)*s->linex+bx*16];
301 p1[x+0].u = b->u[(y/2)*8+(x/2)];
302 p1[x+0].v = b->v[(y/2)*8+(x/2)];
303 p1[x+0].y = b->y1[y*8+x];
304 p1[x+8].u = b->u[(y/2)*8+(x/2)+4];
305 p1[x+8].v = b->v[(y/2)*8+(x/2)+4];
306 p1[x+8].y = b->y2[y*8+x];
307 p2[x+0].u = b->u[(y/2+4)*8+(x/2)];
308 p2[x+0].v = b->v[(y/2+4)*8+(x/2)];
309 p2[x+0].y = b->y3[y*8+x];
310 p2[x+8].u = b->u[(y/2+4)*8+(x/2)+4];
311 p2[x+8].v = b->v[(y/2+4)*8+(x/2)+4];
312 p2[x+8].y = b->y4[y*8+x];
319 static int compare_pic_pic(VIDEOSTREAM*s, YUV*pp1, YUV*pp2, int bx, int by)
321 int linex = s->width;
322 YUV*p1 = &pp1[by*linex*16+bx*16];
323 YUV*p2 = &pp2[by*linex*16+bx*16];
324 int diffy=0, diffuv = 0;
334 diffuv += abs(u)+abs(v);
339 return diffy + diffuv/4;
342 static int compare_pic_block(VIDEOSTREAM*s, block_t* b, YUV*pic, int bx, int by)
344 int linex = s->width;
345 YUV*y1 = &pic[(by*2)*linex*8+bx*16];
346 YUV*y2 = &pic[(by*2)*linex*8+bx*16+8];
347 YUV*y3 = &pic[(by*2+1)*linex*8+bx*16];
348 YUV*y4 = &pic[(by*2+1)*linex*8+bx*16+8];
350 YUV*uv2 = &y1[linex];
351 int diffy=0, diffuv = 0;
355 int yy,u1,v1,u2,v2,u3,v3,u4,v4;
357 yy = y1[x].y - b->y1[y8x];
359 yy = y2[x].y - b->y2[y8x];
361 yy = y3[x].y - b->y3[y8x];
363 yy = y4[x].y - b->y4[y8x];
365 u1 = uv1[x*2].u - b->u[y8x];
366 v1 = uv1[x*2].v - b->v[y8x];
367 u2 = uv1[x*2+1].u - b->u[y8x];
368 v2 = uv1[x*2+1].v - b->v[y8x];
369 u3 = uv2[x*2].u - b->u[y8x];
370 v3 = uv2[x*2].v - b->v[y8x];
371 u4 = uv2[x*2+1].u - b->u[y8x];
372 v4 = uv2[x*2+1].v - b->v[y8x];
373 diffuv += (abs(u1)+abs(v1));
374 diffuv += (abs(u2)+abs(v2));
375 diffuv += (abs(u3)+abs(v3));
376 diffuv += (abs(u4)+abs(v4));
385 return diffy + diffuv/4;
388 static inline int valtodc(int val)
396 /* TODO: what to do for zero values? skip the block? */
405 static int dctoval(int dc)
418 /* TODO: we could also just let the caller pass only the string table[index] here */
419 static int codehuffman(TAG*tag, struct huffcode*table, int index)
421 /* TODO: !optimize! */
423 while(table[index].code[i]) {
424 if(table[index].code[i]=='0')
425 swf_SetBits(tag, 0, 1);
427 swf_SetBits(tag, 1, 1);
433 static void quantize8x8(int*src, int*dest, int has_dc, int quant)
436 double q = 1.0/(quant*2);
438 dest[0] = valtodc((int)src[0]); /*DC*/
443 //dest[t] = (int)src[t];
444 /* exact: if(quant&1){dest[t] = (dest[t]/quant - 1)/2;}else{dest[t] = ((dest[t]+1)/quant - 1)/2;} */
445 //if(quant&1){dest[t] = (dest[t]/quant - 1)/2;}else{dest[t] = ((dest[t]+1)/quant - 1)/2;}
446 //dest[t] = dest[t]/(quant*2);
447 dest[t] = (int)(src[t]*q);
448 /* TODO: warn if this happens- the video will be buggy */
449 if(dest[t]>127) dest[t]=127;
450 if(dest[t]<-127) dest[t]=-127;
454 static void dequantize8x8(int*b, int has_dc, int quant)
458 b[0] = dctoval(b[0]); //DC
461 for(t=pos;t<64;t++) {
470 b[t] = quant*(2*b[t]+1); //-7,8,24,40
472 b[t] = quant*(2*b[t]+1)-1; //-8,7,23,39
479 /* paragraph 6.2.2, "clipping of reconstruction levels": */
480 if(b[t]>2047) b[t]=2047;
481 if(b[t]<-2048) b[t]=-2048;
485 static int hascoef(int*b, int has_dc)
491 for(t=pos;t<64;t++) {
498 static int coefbits8x8(int*bb, int has_dc)
509 for(last=63;last>=pos;last--) {
516 int run=0, level=0, islast=0,t;
517 while(!bb[pos] && pos<last) {
524 if(level<0) level=-level;
526 for(t=0;t<RLE_ESCAPE;t++) {
527 if(rle_params[t].run == run &&
528 rle_params[t].level == level &&
529 rle_params[t].last == islast) {
530 bits += rle[t].len + 1;
535 bits += rle[RLE_ESCAPE].len + 1 + 6 + 8;
544 static int encode8x8(TAG*tag, int*bb, int has_dc, int has_tcoef)
551 swf_SetBits(tag, bb[0], 8);
558 /* determine last non-null coefficient */
559 for(last=63;last>=pos;last--) {
560 /* TODO: we could leave out small coefficients
561 after a certain point (32?) */
565 /* blocks without coefficients should not be included
566 in the cbpy/cbpc patterns: */
575 while(!bb[pos] && pos<last) {
587 for(t=0;t<RLE_ESCAPE;t++) {
588 /* TODO: lookup table */
589 if(rle_params[t].run == run &&
590 rle_params[t].level == level &&
591 rle_params[t].last == islast) {
592 bits += codehuffman(tag, rle, t);
593 swf_SetBits(tag, sign, 1);
599 bits += codehuffman(tag, rle, RLE_ESCAPE);
602 if(!level || level<-127 || level>127) {
603 fprintf(stderr, "Warning: Overflow- Level %d at pos %d\n", level, pos);
604 if(level<-127) level=-127;
605 if(level>127) level=127;
610 assert(level<=127); //TODO: known to fail for pos=0 (with custom frames?)
612 swf_SetBits(tag, islast, 1);
613 swf_SetBits(tag, run, 6);
614 swf_SetBits(tag, level, 8); //FIXME: fixme??
626 static void quantize(block_t*fb, block_t*b, int has_dc, int quant)
628 quantize8x8(fb->y1, b->y1, has_dc, quant);
629 quantize8x8(fb->y2, b->y2, has_dc, quant);
630 quantize8x8(fb->y3, b->y3, has_dc, quant);
631 quantize8x8(fb->y4, b->y4, has_dc, quant);
632 quantize8x8(fb->u, b->u, has_dc, quant);
633 quantize8x8(fb->v, b->v, has_dc, quant);
636 static void dodct(block_t*fb)
638 dct(fb->y1); dct(fb->y2); dct(fb->y3); dct(fb->y4);
639 dct(fb->u); dct(fb->v);
648 static void dodctandquant(block_t*fb, block_t*b, int has_dc, int quant)
653 quantize(fb,b,has_dc,quant);
657 dct2(fb->y1,b->y1); dct2(fb->y2,b->y2); dct2(fb->y3,b->y3); dct2(fb->y4,b->y4);
658 dct2(fb->u,b->u); dct2(fb->v,b->v);
661 /* prepare for encoding (only values in (-127..-1,1..127) are
662 allowed as non-zero, non-dc values */
663 if(b->y1[t]<-127) b->y1[t]=-127;
664 if(b->y2[t]<-127) b->y2[t]=-127;
665 if(b->y3[t]<-127) b->y3[t]=-127;
666 if(b->y4[t]<-127) b->y4[t]=-127;
667 if(b->u[t]<-127) b->u[t]=-127;
668 if(b->v[t]<-127) b->v[t]=-127;
670 if(b->y1[t]>127) b->y1[t]=127;
671 if(b->y2[t]>127) b->y2[t]=127;
672 if(b->y3[t]>127) b->y3[t]=127;
673 if(b->y4[t]>127) b->y4[t]=127;
674 if(b->u[t]>127) b->u[t]=127;
675 if(b->v[t]>127) b->v[t]=127;
679 static void doidct(block_t*b)
684 fb.y1[t] = b->y1[zigzagtable[t]];
685 fb.y2[t] = b->y2[zigzagtable[t]];
686 fb.y3[t] = b->y3[zigzagtable[t]];
687 fb.y4[t] = b->y4[zigzagtable[t]];
688 fb.u[t] = b->u[zigzagtable[t]];
689 fb.v[t] = b->v[zigzagtable[t]];
691 idct(fb.y1); idct(fb.y2); idct(fb.y3); idct(fb.y4);
692 idct(fb.u); idct(fb.v);
694 memcpy(b, &fb, sizeof(block_t));
697 static void truncateblock(block_t*b)
701 b->y1[t] = truncate256(b->y1[t]);
702 b->y2[t] = truncate256(b->y2[t]);
703 b->y3[t] = truncate256(b->y3[t]);
704 b->y4[t] = truncate256(b->y4[t]);
705 b->u[t] = truncate256(b->u[t]);
706 b->v[t] = truncate256(b->v[t]);
710 static void dequantize(block_t*b, int has_dc, int quant)
712 dequantize8x8(b->y1, has_dc, quant);
713 dequantize8x8(b->y2, has_dc, quant);
714 dequantize8x8(b->y3, has_dc, quant);
715 dequantize8x8(b->y4, has_dc, quant);
716 dequantize8x8(b->u, has_dc, quant);
717 dequantize8x8(b->v, has_dc, quant);
720 static void getblockpatterns(block_t*b, int*cbpybits,int*cbpcbits, int has_dc)
725 *cbpybits|=hascoef(b->y1, has_dc)*8;
726 *cbpybits|=hascoef(b->y2, has_dc)*4;
727 *cbpybits|=hascoef(b->y3, has_dc)*2;
728 *cbpybits|=hascoef(b->y4, has_dc)*1;
730 *cbpcbits|=hascoef(b->u, has_dc)*2;
731 *cbpcbits|=hascoef(b->v, has_dc)*1;
734 static void setQuant(TAG*tag, int dquant)
741 swf_SetBits(tag, 0x0, 2);
742 } else if(dquant == -2) {
743 swf_SetBits(tag, 0x1, 2);
744 } else if(dquant == +1) {
745 swf_SetBits(tag, 0x2, 2);
746 } else if(dquant == +2) {
747 swf_SetBits(tag, 0x3, 2);
749 assert(0*strlen("invalid dquant"));
753 static void change_quant(int quant, int*dquant)
759 static void yuvdiff(block_t*a, block_t*b)
763 a->y1[t] = (a->y1[t] - b->y1[t]);
764 a->y2[t] = (a->y2[t] - b->y2[t]);
765 a->y3[t] = (a->y3[t] - b->y3[t]);
766 a->y4[t] = (a->y4[t] - b->y4[t]);
767 a->u[t] = (a->u[t] - b->u[t]);
768 a->v[t] = (a->v[t] - b->v[t]);
772 static void predictmvd(VIDEOSTREAM*s, int bx, int by, int*px, int*py)
775 int x1,y1,x2,y2,x3,y3;
777 if(bx) {x1=s->mvdx[by*s->bbx+bx-1];
778 y1=s->mvdy[by*s->bbx+bx-1];
781 if(by) {x2=s->mvdx[(by-1)*s->bbx+bx];
782 y2=s->mvdy[(by-1)*s->bbx+bx];
784 x3=s->mvdx[(by-1)*s->bbx+bx+1];
785 y3=s->mvdy[(by-1)*s->bbx+bx+1];
790 else {x2=x3=x1;y2=y3=y1;}
792 if((x1 <= x2 && x2 <= x3) ||
793 (x3 <= x2 && x2 <= x1)) {
795 } else if((x2 <= x1 && x1 <= x3) ||
796 (x3 <= x1 && x1 <= x2)) {
798 } else if((x1 <= x3 && x3 <= x2) ||
799 (x2 <= x3 && x3 <= x1)) {
806 if((y1 <= y2 && y2 <= y3) ||
807 (y3 <= y2 && y2 <= y1)) {
809 } else if((y2 <= y1 && y1 <= y3) ||
810 (y3 <= y1 && y1 <= y2)) {
812 } else if((y1 <= y3 && y3 <= y2) ||
813 (y2 <= y3 && y3 <= y1)) {
822 assert((x4>=-32 && x4<=31) && (y4>=-32 && y4<=31));
825 static inline int mvd2index(int px, int py, int x, int y, int xy)
828 if((x<-32 && x>31) || (y<-32 && y>31))
829 fprintf(stderr, "(%d,%d)\n", x,y);
830 assert((x>=-32 && x<=31) && (y>=-32 && y<=31));
831 //assert((x&1)==0 && (y&1)==0);//for now
832 //assert((x&2)==0 && (y&2)==0);//for now(2)
847 assert(x>=0 && x<64);
851 typedef struct _iblockdata_t
853 block_t b; //transformed quantized coefficients
854 block_t reconstruction;
857 struct huffcode*ctable; //table to use for chrominance encoding (different for i-frames)
858 int iframe; // 1 if this is part of an iframe
861 typedef struct _mvdblockdata_t
865 block_t reconstruction;
874 void prepareIBlock(VIDEOSTREAM*s, iblockdata_t*data, int bx, int by, block_t* fb, int*bits, int iframe)
876 /* consider I-block */
880 struct huffcode*ctable;
885 data->iframe = iframe;
887 data->ctable = &mcbpc_inter[3*4];
889 data->ctable = &mcbpc_intra[0];
892 memcpy(&fb_i, fb, sizeof(block_t));
893 dodctandquant(&fb_i, &data->b, 1, s->quant);
894 getblockpatterns(&data->b, &y, &c, 1);
899 *bits += data->ctable[c].len;
900 *bits += cbpy[y].len;
901 *bits += coefbits8x8(data->b.y1, 1);
902 *bits += coefbits8x8(data->b.y2, 1);
903 *bits += coefbits8x8(data->b.y3, 1);
904 *bits += coefbits8x8(data->b.y4, 1);
905 *bits += coefbits8x8(data->b.u, 1);
906 *bits += coefbits8x8(data->b.v, 1);
909 /* -- reconstruction -- */
910 memcpy(&data->reconstruction,&data->b,sizeof(block_t));
911 dequantize(&data->reconstruction, 1, s->quant);
912 doidct(&data->reconstruction);
913 truncateblock(&data->reconstruction);
916 int writeIBlock(VIDEOSTREAM*s, TAG*tag, iblockdata_t*data)
923 getblockpatterns(&data->b, &y, &c, has_dc);
925 swf_SetBits(tag,0,1); bits += 1; // COD
927 bits += codehuffman(tag, data->ctable, c);
928 bits += codehuffman(tag, cbpy, y);
931 bits += encode8x8(tag, data->b.y1, has_dc, y&8);
932 bits += encode8x8(tag, data->b.y2, has_dc, y&4);
933 bits += encode8x8(tag, data->b.y3, has_dc, y&2);
934 bits += encode8x8(tag, data->b.y4, has_dc, y&1);
937 bits += encode8x8(tag, data->b.u, has_dc, c&2);
938 bits += encode8x8(tag, data->b.v, has_dc, c&1);
940 copy_block_pic(s, s->current, &data->reconstruction, data->bx, data->by);
941 assert(data->bits == bits);
945 int getmvdbits(VIDEOSTREAM*s,block_t*fb, int bx,int by,int hx,int hy)
951 memcpy(&fbdiff, fb, sizeof(block_t));
952 getmvdregion(&fbold, s->oldpic, bx, by, hx, hy, s->linex);
953 yuvdiff(&fbdiff, &fbold);
954 dodctandquant(&fbdiff, &b, 0, s->quant);
955 bits += coefbits8x8(b.y1, 0);
956 bits += coefbits8x8(b.y2, 0);
957 bits += coefbits8x8(b.y3, 0);
958 bits += coefbits8x8(b.y4, 0);
959 bits += coefbits8x8(b.u, 0);
960 bits += coefbits8x8(b.v, 0);
964 void prepareMVDBlock(VIDEOSTREAM*s, mvdblockdata_t*data, int bx, int by, block_t* fb, int*bits)
965 { /* consider mvd(x,y)-block */
975 predictmvd(s,bx,by,&predictmvdx,&predictmvdy);
983 int bestx=0,besty=0,bestbits=65536;
984 int startx=-32,endx=31;
985 int starty=-32,endy=31;
989 if(bx==s->bbx-1) endx=0;
990 if(by==s->bby-1) endy=0;
992 for(hx=startx;hx<=endx;hx+=4)
993 for(hy=starty;hy<=endy;hy+=4)
996 bits = getmvdbits(s,fb,bx,by,hx,hy);
1004 if(bestx-3 > startx) startx = bestx-3;
1005 if(besty-3 > starty) starty = besty-3;
1006 if(bestx+3 < endx) endx = bestx+3;
1007 if(besty+3 < endy) endy = besty+3;
1009 for(hx=startx;hx<=endx;hx++)
1010 for(hy=starty;hy<=endy;hy++)
1013 bits = getmvdbits(s,fb,bx,by,hx,hy);
1020 data->movex = bestx;
1021 data->movey = besty;
1024 memcpy(&fbdiff, fb, sizeof(block_t));
1025 getmvdregion(&data->fbold, s->oldpic, bx, by, data->movex, data->movey, s->linex);
1026 yuvdiff(&fbdiff, &data->fbold);
1027 dodctandquant(&fbdiff, &data->b, 0, s->quant);
1028 getblockpatterns(&data->b, &y, &c, 0);
1030 data->xindex = mvd2index(predictmvdx, predictmvdy, data->movex, data->movey, 0);
1031 data->yindex = mvd2index(predictmvdx, predictmvdy, data->movex, data->movey, 1);
1034 *bits += mcbpc_inter[0*4+c].len;
1035 *bits += cbpy[y^15].len;
1036 *bits += mvd[data->xindex].len; // (0,0)
1037 *bits += mvd[data->yindex].len;
1038 *bits += coefbits8x8(data->b.y1, 0);
1039 *bits += coefbits8x8(data->b.y2, 0);
1040 *bits += coefbits8x8(data->b.y3, 0);
1041 *bits += coefbits8x8(data->b.y4, 0);
1042 *bits += coefbits8x8(data->b.u, 0);
1043 *bits += coefbits8x8(data->b.v, 0);
1046 /* -- reconstruction -- */
1047 memcpy(&data->reconstruction, &data->b, sizeof(block_t));
1048 dequantize(&data->reconstruction, 0, s->quant);
1049 doidct(&data->reconstruction);
1051 data->reconstruction.y1[t] =
1052 truncate256(data->reconstruction.y1[t] + (int)data->fbold.y1[t]);
1053 data->reconstruction.y2[t] =
1054 truncate256(data->reconstruction.y2[t] + (int)data->fbold.y2[t]);
1055 data->reconstruction.y3[t] =
1056 truncate256(data->reconstruction.y3[t] + (int)data->fbold.y3[t]);
1057 data->reconstruction.y4[t] =
1058 truncate256(data->reconstruction.y4[t] + (int)data->fbold.y4[t]);
1059 data->reconstruction.u[t] =
1060 truncate256(data->reconstruction.u[t] + (int)data->fbold.u[t]);
1061 data->reconstruction.v[t] =
1062 truncate256(data->reconstruction.v[t] + (int)data->fbold.v[t]);
1066 int writeMVDBlock(VIDEOSTREAM*s, TAG*tag, mvdblockdata_t*data)
1070 int has_dc=0; // mvd w/o mvd24
1071 /* mvd (0,0) block (mode=0) */
1077 getblockpatterns(&data->b, &y, &c, has_dc);
1078 swf_SetBits(tag,0,1); bits += 1; // COD
1079 bits += codehuffman(tag, mcbpc_inter, mode*4+c);
1080 bits += codehuffman(tag, cbpy, y^15);
1083 bits += codehuffman(tag, mvd, data->xindex);
1084 bits += codehuffman(tag, mvd, data->yindex);
1087 bits += encode8x8(tag, data->b.y1, has_dc, y&8);
1088 bits += encode8x8(tag, data->b.y2, has_dc, y&4);
1089 bits += encode8x8(tag, data->b.y3, has_dc, y&2);
1090 bits += encode8x8(tag, data->b.y4, has_dc, y&1);
1093 bits += encode8x8(tag, data->b.u, has_dc, c&2);
1094 bits += encode8x8(tag, data->b.v, has_dc, c&1);
1096 s->mvdx[by*s->bbx+bx] = data->movex;
1097 s->mvdy[by*s->bbx+bx] = data->movey;
1099 copy_block_pic(s, s->current, &data->reconstruction, data->bx, data->by);
1100 assert(data->bits == bits);
1104 static int encode_PFrame_block(TAG*tag, VIDEOSTREAM*s, int bx, int by)
1111 iblockdata_t iblock;
1112 mvdblockdata_t mvdblock;
1114 getregion(&fb, s->current, bx, by, s->linex);
1115 prepareIBlock(s, &iblock, bx, by, &fb, &bits_i, 0);
1117 /* encoded last frame <=> original current block: */
1118 diff1 = compare_pic_pic(s, s->current, s->oldpic, bx, by);
1119 /* encoded current frame <=> original current block: */
1120 diff2 = compare_pic_block(s, &iblock.reconstruction, s->current, bx, by);
1122 if(diff1 <= diff2) {
1123 swf_SetBits(tag, 1,1); /* cod=1, block skipped */
1124 /* copy the region from the last frame so that we have a complete reconstruction */
1125 copyregion(s, s->current, s->oldpic, bx, by);
1128 prepareMVDBlock(s, &mvdblock, bx, by, &fb, &bits_vxy);
1130 if(bits_i > bits_vxy) {
1131 return writeMVDBlock(s, tag, &mvdblock);
1133 return writeIBlock(s, tag, &iblock);
1137 /* should be called encode_IFrameBlock */
1138 static void encode_IFrame_block(TAG*tag, VIDEOSTREAM*s, int bx, int by)
1144 getregion(&fb, s->current, bx, by, s->width);
1145 prepareIBlock(s, &data, bx, by, &fb, &bits, 1);
1146 writeIBlock(s, tag, &data);
1150 static int bmid = 0;
1152 void setdbgpic(TAG*tag, RGBA*pic, int width, int height)
1157 tag = swf_InsertTag(tag,ST_REMOVEOBJECT2);
1158 swf_SetU16(tag, 133);
1160 tag = swf_InsertTag(tag, ST_DEFINEBITSLOSSLESS);
1161 swf_SetU16(tag, 1000+bmid);
1162 swf_SetLosslessBits(tag, width, height, (void*)pic, BMF_32BIT);
1164 tag = swf_InsertTag(tag, ST_DEFINESHAPE);
1165 swf_SetU16(tag, 2000+bmid);
1166 swf_ShapeSetBitmapRect(tag, 1000+bmid, width, height);
1168 tag = swf_InsertTag(tag,ST_PLACEOBJECT2);
1169 swf_GetMatrix(0,&m);
1171 swf_ObjectPlace(tag, 2000+bmid, 133, &m, 0, 0);
1177 #define TYPE_IFRAME 0
1178 #define TYPE_PFRAME 1
1180 static void writeHeader(TAG*tag, int width, int height, int frame, int quant, int type)
1183 swf_SetU16(tag, frame);
1184 swf_SetBits(tag, 1, 17); /* picture start code*/
1185 swf_SetBits(tag, 0, 5); /* version=0, version 1 would optimize rle behaviour*/
1186 swf_SetBits(tag, frame, 8); /* time reference */
1188 /* write dimensions, taking advantage of some predefined sizes
1189 if the opportunity presents itself */
1190 i32 = width<<16|height;
1193 case 352<<16|288: swf_SetBits(tag, 2, 3);break;
1194 case 176<<16|144: swf_SetBits(tag, 3, 3);break;
1195 case 128<<16|96: swf_SetBits(tag, 4, 3);break;
1196 case 320<<16|240: swf_SetBits(tag, 5, 3);break;
1197 case 160<<16|120: swf_SetBits(tag, 6, 3);break;
1199 if(width>255 || height>255) {
1200 swf_SetBits(tag, 1, 3);
1201 swf_SetBits(tag, width, 16);
1202 swf_SetBits(tag, height, 16);
1204 swf_SetBits(tag, 0, 3);
1205 swf_SetBits(tag, width, 8);
1206 swf_SetBits(tag, height, 8);
1210 swf_SetBits(tag, type, 2); /* I-Frame or P-Frame */
1211 swf_SetBits(tag, 0, 1); /* No deblock filter */
1214 swf_SetBits(tag, quant, 5); /* quantizer (1-31), may be updated later on*/
1215 swf_SetBits(tag, 0, 1); /* No extra info */
1218 void swf_SetVideoStreamIFrame(TAG*tag, VIDEOSTREAM*s, RGBA*pic, int quant)
1222 if(quant<1) quant=1;
1223 if(quant>31) quant=31;
1226 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_IFRAME);
1228 /* fixme: should fill with 0,128,128, not 0,0,0 */
1229 memset(s->current, 0, s->linex*s->height*sizeof(YUV));
1231 rgb2yuv(s->current, pic, s->linex, s->olinex, s->owidth, s->oheight);
1233 for(by=0;by<s->bby;by++)
1235 for(bx=0;bx<s->bbx;bx++)
1237 encode_IFrame_block(tag, s, bx, by);
1241 memcpy(s->oldpic, s->current, s->width*s->height*sizeof(YUV));
1243 void swf_SetVideoStreamBlackFrame(TAG*tag, VIDEOSTREAM*s)
1250 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_IFRAME);
1252 for(y=0;y<s->height;y++)
1253 for(x=0;x<s->width;x++) {
1254 s->current[y*s->width+x].y = 0;
1255 s->current[y*s->width+x].u = 128;
1256 s->current[y*s->width+x].v = 128;
1260 s->current[y*s->width+x].y = 64;
1261 s->current[y*s->width+x].u = 128;
1262 s->current[y*s->width+x].v = 128;
1265 for(by=0;by<s->bby;by++)
1267 for(bx=0;bx<s->bbx;bx++)
1269 encode_IFrame_block(tag, s, bx, by);
1273 memcpy(s->oldpic, s->current, s->width*s->height*sizeof(YUV));
1276 void swf_SetVideoStreamPFrame(TAG*tag, VIDEOSTREAM*s, RGBA*pic, int quant)
1280 if(quant<1) quant=1;
1281 if(quant>31) quant=31;
1284 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_PFRAME);
1286 /* fixme: should fill with 0,128,128, not 0,0,0 */
1287 memset(s->current, 0, s->linex*s->height*sizeof(YUV));
1289 rgb2yuv(s->current, pic, s->linex, s->olinex, s->owidth, s->oheight);
1290 memset(s->mvdx, 0, s->bbx*s->bby*sizeof(int));
1291 memset(s->mvdy, 0, s->bbx*s->bby*sizeof(int));
1293 for(by=0;by<s->bby;by++)
1295 for(bx=0;bx<s->bbx;bx++)
1297 encode_PFrame_block(tag, s, bx, by);
1301 memcpy(s->oldpic, s->current, s->width*s->height*sizeof(YUV));
1305 yuv2rgb(pic, s->current, s->linex, s->width, s->height);
1306 setdbgpic(tag, pic, s->width, s->height);
1311 void swf_SetVideoStreamMover(TAG*tag, VIDEOSTREAM*s, signed char* movex, signed char* movey, void**pictures, int quant)
1316 if(quant<1) quant=1;
1317 if(quant>31) quant=31;
1320 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_PFRAME);
1322 memset(s->mvdx, 0, s->bbx*s->bby*sizeof(int));
1323 memset(s->mvdy, 0, s->bbx*s->bby*sizeof(int));
1325 for(by=0;by<s->bby;by++)
1327 for(bx=0;bx<s->bbx;bx++)
1329 int predictmvdx=0, predictmvdy=0;
1330 int mvx=movex[by*s->bbx+bx];
1331 int mvy=movey[by*s->bbx+bx];
1332 void*picture = pictures?pictures[by*s->bbx+bx]:0;
1334 if(mvx<-32) mvx=-32;
1336 if(mvy<-32) mvy=-32;
1339 if(mvx == 0 && mvy == 0 && picture == 0) {
1340 swf_SetBits(tag,1,1); // COD skip
1348 swf_SetBits(tag,0,1); // COD
1350 if(mvx==0 && mvy==0 && picture) { // only picture
1356 RGBA* picblock = (RGBA*)picture;
1357 rgb2yuv(pic, picblock,16,16,16,16);
1358 /* TODO: if has_dc!=1, subtract 128 from rgb values */
1359 getregion(&b, pic, 0,0,16);
1360 dodctandquant(&b, &b2, 1, s->quant);
1361 getblockpatterns(&b2, &y, &c, 1);
1366 codehuffman(tag, mcbpc_inter, mode*4+c);
1367 codehuffman(tag, cbpy, mode==3?y:y^15);
1370 /* has motion vector */
1371 predictmvd(s,bx,by,&predictmvdx,&predictmvdy);
1372 codehuffman(tag, mvd, mvd2index(predictmvdx, predictmvdy, mvx, mvy, 0));
1373 codehuffman(tag, mvd, mvd2index(predictmvdx, predictmvdy, mvx, mvy, 1));
1374 s->mvdx[by*s->bbx+bx] = mvx;
1375 s->mvdy[by*s->bbx+bx] = mvy;
1379 encode8x8(tag, b2.y1, has_dc, y&8);
1380 encode8x8(tag, b2.y2, has_dc, y&4);
1381 encode8x8(tag, b2.y3, has_dc, y&2);
1382 encode8x8(tag, b2.y4, has_dc, y&1);
1383 encode8x8(tag, b2.u, has_dc, c&2);
1384 encode8x8(tag, b2.v, has_dc, c&1);
1394 void test_copy_diff()
1397 VIDEOSTREAM* s = &stream;
1399 RGBA*pic = malloc(256*256*sizeof(RGBA));
1404 for(y=0;y<256;y++) {
1405 pic[y*256+x].r = x*y;
1406 pic[y*256+x].g = x+y;
1407 pic[y*256+x].b = (x+1)%(y+1);
1409 tag = swf_InsertTag(0, ST_DEFINEVIDEOSTREAM);
1410 swf_SetU16(tag, 33);
1411 swf_SetVideoStreamDefine(tag, s, 10, 256, 256);
1413 rgb2yuv(s->current, pic, s->linex, s->olinex, s->owidth, s->oheight);
1414 for(by=0;by<16;by++)
1415 for(bx=0;bx<16;bx++) {
1417 /* test1: does compare pic pic return zero for identical blocks? */
1418 diff1 = compare_pic_pic(s, s->current, s->current, bx, by);
1420 /* test2: do blocks which are copied back return zero diff? */
1421 getregion(&fb, s->current, bx, by, s->linex);
1422 copy_block_pic(s, s->oldpic, &fb, bx, by);
1423 diff1 = compare_pic_block(s, &fb, s->oldpic, bx, by);
1425 /* test3: does compare_pic_block return the same result as compare_pic_pic? */
1426 getregion(&fb, s->current, 15-bx, 15-by, s->linex);
1427 copy_block_pic(s, s->oldpic, &fb, bx, by);
1428 diff1 = compare_pic_block(s, &fb, s->current, bx, by);
1429 diff2 = compare_pic_pic(s, s->current, s->oldpic, bx, by);
1430 assert(diff1 == diff2);
1439 int compileSWFActionCode(const char *script, int version, void**data, int*len) {return 0;}
1454 pic = malloc(width*height*4);
1455 memset(pic, 0, width*height*4);
1457 memset(&swf,0,sizeof(SWF));
1458 memset(&obj,0,sizeof(obj));
1460 swf.fileVersion = 6;
1461 swf.frameRate = 15*256;
1462 swf.movieSize.xmax = 20*width;
1463 swf.movieSize.ymax = 20*height;
1465 swf.firstTag = swf_InsertTag(NULL,ST_SETBACKGROUNDCOLOR);
1467 rgb.r = 0x00;rgb.g = 0x30;rgb.b = 0xff;
1468 swf_SetRGB(tag,&rgb);
1470 tag = swf_InsertTag(tag, ST_DEFINEVIDEOSTREAM);
1472 swf_SetVideoStreamDefine(tag, &stream, frames, width, height);
1473 stream.do_motion = 0;
1475 for(y=0;y<height;y++) {
1476 for(x=0;x<width;x++) {
1480 pic[y*width+x].r = 0;
1481 pic[y*width+x].g = 0;
1482 pic[y*width+x].b = 0;
1483 pic[y*width+x].a = 0;
1486 tag = swf_InsertTag(tag, ST_VIDEOFRAME);
1489 swf_SetVideoStreamIFrame(tag, &stream, pic, 7);
1491 tag = swf_InsertTag(tag, ST_PLACEOBJECT2);
1492 swf_GetPlaceObject(0, &obj);
1497 swf_SetPlaceObject(tag,&obj);
1499 tag = swf_InsertTag(tag, ST_SHOWFRAME);
1501 swf_VideoStreamClear(&stream);
1503 tag = swf_InsertTag(tag, ST_END);
1505 int fi = open("black.swf", O_WRONLY|O_CREAT|O_TRUNC, 0644);
1506 if(swf_WriteSWC(fi,&swf)<0) {
1507 fprintf(stderr,"WriteSWF() failed.\n");
1513 int main(int argn, char*argv[])
1519 RGBA* pic, *pic2, rgb;
1526 char* fname = "/home/kramm/pics/peppers_fromjpg.png";
1527 //char* fname = "/home/kramm/pics/baboon.png";
1537 memset(&stream, 0, sizeof(stream));
1539 getPNG(fname, &width, &height, &data);
1540 pic = (RGBA*)malloc(width*height*sizeof(RGBA));
1541 pic2 = (RGBA*)malloc(width*height*sizeof(RGBA));
1542 memcpy(pic, data, width*height*sizeof(RGBA));
1545 printf("Compressing %s, size %dx%d\n", fname, width, height);
1547 memset(&swf,0,sizeof(SWF));
1548 memset(&obj,0,sizeof(obj));
1550 swf.fileVersion = 6;
1551 swf.frameRate = framerate*256;
1552 swf.movieSize.xmax = 20*width*2;
1553 swf.movieSize.ymax = 20*height;
1555 swf.firstTag = swf_InsertTag(NULL,ST_SETBACKGROUNDCOLOR);
1557 rgb.r = 0x00;rgb.g = 0x30;rgb.b = 0xff;
1558 swf_SetRGB(tag,&rgb);
1560 tag = swf_InsertTag(tag, ST_DEFINEVIDEOSTREAM);
1561 swf_SetU16(tag, 33);
1562 swf_SetVideoStreamDefine(tag, &stream, frames, width, height);
1563 stream.do_motion = 0;
1567 for(t=0;t<frames;t++)
1571 for(y=0,yy=0;y<height;y++,yy+=d) {
1572 RGBA*line = &pic[((int)yy)*width];
1573 for(x=0,xx=0;x<width;x++,xx+=d) {
1576 if(dx==0 && dy==0) {
1577 pic2[y*width+x] = line[((int)xx)];
1578 pic2[y*width+x].r+=2;
1579 pic2[y*width+x].g+=2;
1580 pic2[y*width+x].b+=2;
1582 //pic2[y*width+x] = line[((int)xx)];
1583 //pic2[y*width+x].r = lrand48();//line[((int)xx)];
1584 //pic2[y*width+x].g = lrand48();//line[((int)xx)];
1585 //pic2[y*width+x].b = lrand48();//line[((int)xx)];
1586 pic2[y*width+x].r = 0;
1587 pic2[y*width+x].g = 0;
1588 pic2[y*width+x].b = 0;
1590 /*if(dx==16 && dy==16)
1591 pic2[y*width+x] = pic[(y-16*16)*width+(x-16*16)];*/
1592 /*if(dx<=0 && dy<=0) {
1593 pic2[y*width+x] = line[((int)xx)];*/
1594 /*if(x==0 && y==0) {
1596 memset(&color, 0, sizeof(RGBA));
1597 pic2[y*width+x] = color;*/
1600 color.r = lrand48();
1601 color.g = lrand48();
1602 color.b = lrand48();
1604 pic2[y*width+x] = color;
1608 printf("frame:%d\n", t);fflush(stdout);
1613 tag = swf_InsertTag(tag, ST_VIDEOFRAME);
1614 swf_SetU16(tag, 33);
1616 swf_SetVideoStreamIFrame(tag, &stream, pic2, 7);
1618 swf_SetVideoStreamPFrame(tag, &stream, pic2, 7);
1621 tag = swf_InsertTag(tag, ST_PLACEOBJECT2);
1622 swf_GetPlaceObject(0, &obj);
1631 swf_SetPlaceObject(tag,&obj);
1633 tag = swf_InsertTag(tag, ST_SHOWFRAME);
1636 swf_VideoStreamClear(&stream);
1638 tag = swf_InsertTag(tag, ST_END);
1640 fi = open("video3.swf", O_WRONLY|O_CREAT|O_TRUNC, 0644);
1641 if(swf_WriteSWC(fi,&swf)<0) {
1642 fprintf(stderr,"WriteSWF() failed.\n");