2 Routines for handling h.263 video tags
4 Part of the swftools package.
6 Copyright (c) 2003 Matthias Kramm <kramm@quiss.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
26 #include "../rfxswf.h"
27 #include "h263tables.h"
31 - use prepare* / write* in encode_IFrame_block
32 - check whether mvd steps of 2 lead to (much) smaller results
38 void swf_SetVideoStreamDefine(TAG*tag, VIDEOSTREAM*stream, U16 frames, U16 width, U16 height)
40 swf_SetU16(tag, frames);
41 swf_SetU16(tag, width);
42 swf_SetU16(tag, height);
43 //swf_SetU8(tag, 1); /* smoothing on */
44 swf_SetU8(tag, 0); /* smoothing off */
45 swf_SetU8(tag, 2); /* codec = h.263 sorenson spark */
50 memset(stream, 0, sizeof(VIDEOSTREAM));
51 stream->olinex = width;
52 stream->owidth = width;
53 stream->oheight = height;
55 height+=15;height&=~15;
56 stream->linex = width;
57 stream->width = width;
58 stream->height = height;
59 stream->bbx = width/16;
60 stream->bby = height/16;
61 stream->current = (YUV*)malloc(width*height*sizeof(YUV));
62 stream->oldpic = (YUV*)malloc(width*height*sizeof(YUV));
63 stream->mvdx = (int*)malloc(stream->bbx*stream->bby*sizeof(int));
64 stream->mvdy = (int*)malloc(stream->bbx*stream->bby*sizeof(int));
65 stream->do_motion = 0;
67 memset(stream->oldpic, 0, width*height*sizeof(YUV));
68 memset(stream->current, 0, width*height*sizeof(YUV));
70 assert((stream->width&15) == 0);
71 assert((stream->height&15) == 0);
72 assert((stream->bbx*16) == stream->width);
73 assert((stream->bby*16) == stream->height);
75 void swf_VideoStreamClear(VIDEOSTREAM*stream)
77 free(stream->oldpic);stream->oldpic = 0;
78 free(stream->current);stream->current = 0;
79 free(stream->mvdx);stream->mvdx=0;
80 free(stream->mvdy);stream->mvdy=0;
83 typedef struct _block_t
93 static inline int truncate256(int a)
100 static void getregion(block_t* bb, YUV*pic, int posx, int posy, int linex)
108 p1 = &pic[posy*linex+posx];
112 bb->u[i] = (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
113 bb->v[i] = (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
115 bb->y2[i] = p1[x+8].y;
116 bb->y3[i] = p1[linex*8+x].y;
117 bb->y4[i] = p1[linex*8+x+8].y;
125 /* This function is pretty complex. Let's hope it works correctly */
126 static void getmvdregion(block_t* bb, YUV*pic, int posx, int posy, int mvdx, int mvdy, int linex)
134 posx = posx*16 + ((mvdx&~1)/2); //works also for negative mvdx (unlike mvdx/2)
135 posy = posy*16 + ((mvdy&~1)/2);
136 p1 = &pic[posy*linex+posx];
137 p2 = &pic[(posy&~1)*linex+(posx&~1)];
138 uvhp = ((mvdx&1)|((mvdx>>1)&1))|((mvdy&2)|((mvdy&1)<<1));
139 yhp = ((mvdy&1)<<1|(mvdx&1));
142 if(yhp==0 || yhp==2) {
145 bb->y1[yy] = p1[x].y;
146 bb->y2[yy] = p1[x+8].y;
147 bb->y3[yy] = p1[linex*8+x].y;
148 bb->y4[yy] = p1[linex*8+x+8].y;
156 bb->y1[yy] += p1[x].y; bb->y1[yy] /= 2;
157 bb->y2[yy] += p1[x+8].y; bb->y2[yy] /= 2;
158 bb->y3[yy] += p1[linex*8+x].y; bb->y3[yy] /= 2;
159 bb->y4[yy] += p1[linex*8+x+8].y; bb->y4[yy] /= 2;
164 } else if(yhp==1 || yhp==3) {
167 bb->y1[yy] = (p1[x].y + p1[x+1].y);
168 bb->y2[yy] = (p1[x+8].y + p1[x+8+1].y);
169 bb->y3[yy] = (p1[linex*8+x].y + p1[linex*8+x+1].y);
170 bb->y4[yy] = (p1[linex*8+x+8].y + p1[linex*8+x+8+1].y);
177 bb->y1[yy] += (p1[x].y + p1[x+1].y); bb->y1[yy]/=4;
178 bb->y2[yy] += (p1[x+8].y + p1[x+8+1].y); bb->y2[yy]/=4;
179 bb->y3[yy] += (p1[linex*8+x].y + p1[linex*8+x+1].y); bb->y3[yy]/=4;
180 bb->y4[yy] += (p1[linex*8+x+8].y + p1[linex*8+x+8+1].y); bb->y4[yy]/=4;
185 bb->y1[yy]/=2; bb->y2[yy]/=2; bb->y3[yy]/=2; bb->y4[yy]/=2;
193 if(uvhp==0 || uvhp==2) {
196 bb->u[uv] = (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
197 bb->v[uv] = (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
204 bb->u[uv] += (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
205 bb->v[uv] += (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
212 } else /* uvhp==1 || uvhp==3 */ {
215 bb->u[uv] = ((p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4+
216 (p2[x*2+2].u + p2[x*2+1+2].u + p2[linex+x*2+2].u + p2[linex+x*2+1+2].u)/4);
217 bb->v[uv] = ((p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4+
218 (p2[x*2+2].v + p2[x*2+1+2].v + p2[linex+x*2+2].v + p2[linex+x*2+1+2].v)/4);
225 bb->u[uv] += ((p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4+
226 (p2[x*2+2].u + p2[x*2+1+2].u + p2[linex+x*2+2].u + p2[linex+x*2+1+2].u)/4);
227 bb->v[uv] += ((p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4+
228 (p2[x*2+2].v + p2[x*2+1+2].v + p2[linex+x*2+2].v + p2[linex+x*2+1+2].v)/4);
244 static void rgb2yuv(YUV*dest, RGBA*src, int dlinex, int slinex, int width, int height)
247 for(y=0;y<height;y++) {
248 for(x=0;x<width;x++) {
250 r = src[y*slinex+x].r;
251 g = src[y*slinex+x].g;
252 b = src[y*slinex+x].b;
253 /*dest[y*dlinex+x].y = (r*0.299 + g*0.587 + b*0.114);
254 dest[y*dlinex+x].u = (r*-0.169 + g*-0.332 + b*0.500 + 128.0);
255 dest[y*dlinex+x].v = (r*0.500 + g*-0.419 + b*-0.0813 + 128.0);*/
257 //dest[y*dlinex+x].y = 128;//(r*((int)( 0.299*256)) + g*((int)( 0.587*256)) + b*((int)( 0.114 *256)))>>8;
258 dest[y*dlinex+x].y = (r*((int)( 0.299*256)) + g*((int)( 0.587*256)) + b*((int)( 0.114 *256)))>>8;
259 dest[y*dlinex+x].u = (r*((int)(-0.169*256)) + g*((int)(-0.332*256)) + b*((int)( 0.500 *256))+ 128*256)>>8;
260 dest[y*dlinex+x].v = (r*((int)( 0.500*256)) + g*((int)(-0.419*256)) + b*((int)(-0.0813*256))+ 128*256)>>8;
265 static void copyregion(VIDEOSTREAM*s, YUV*dest, YUV*src, int bx, int by)
267 YUV*p1 = &dest[by*s->linex*16+bx*16];
268 YUV*p2 = &src[by*s->linex*16+bx*16];
271 memcpy(p1, p2, 16*sizeof(YUV));
272 p1+=s->linex;p2+=s->linex;
276 static void yuv2rgb(RGBA*dest, YUV*src, int linex, int width, int height)
279 for(y=0;y<height;y++) {
280 for(x=0;x<width;x++) {
282 u = src[y*linex+x].u;
283 v = src[y*linex+x].v;
284 yy = src[y*linex+x].y;
285 dest[y*linex+x].r = truncate256(yy + ((360*(v-128))>>8));
286 dest[y*linex+x].g = truncate256(yy - ((88*(u-128)+183*(v-128))>>8));
287 dest[y*linex+x].b = truncate256(yy + ((455 * (u-128))>>8));
291 static void copy_block_pic(VIDEOSTREAM*s, YUV*dest, block_t*b, int bx, int by)
293 YUV*p1 = &dest[(by*16)*s->linex+bx*16];
294 YUV*p2 = &dest[(by*16+8)*s->linex+bx*16];
299 p1[x+0].u = b->u[(y/2)*8+(x/2)];
300 p1[x+0].v = b->v[(y/2)*8+(x/2)];
301 p1[x+0].y = b->y1[y*8+x];
302 p1[x+8].u = b->u[(y/2)*8+(x/2)+4];
303 p1[x+8].v = b->v[(y/2)*8+(x/2)+4];
304 p1[x+8].y = b->y2[y*8+x];
305 p2[x+0].u = b->u[(y/2+4)*8+(x/2)];
306 p2[x+0].v = b->v[(y/2+4)*8+(x/2)];
307 p2[x+0].y = b->y3[y*8+x];
308 p2[x+8].u = b->u[(y/2+4)*8+(x/2)+4];
309 p2[x+8].v = b->v[(y/2+4)*8+(x/2)+4];
310 p2[x+8].y = b->y4[y*8+x];
317 static int compare_pic_pic(VIDEOSTREAM*s, YUV*pp1, YUV*pp2, int bx, int by)
319 int linex = s->width;
320 YUV*p1 = &pp1[by*linex*16+bx*16];
321 YUV*p2 = &pp2[by*linex*16+bx*16];
322 int diffy=0, diffuv = 0;
332 diffuv += abs(u)+abs(v);
337 return diffy + diffuv/4;
340 static int compare_pic_block(VIDEOSTREAM*s, block_t* b, YUV*pic, int bx, int by)
342 int linex = s->width;
343 YUV*y1 = &pic[(by*2)*linex*8+bx*16];
344 YUV*y2 = &pic[(by*2)*linex*8+bx*16+8];
345 YUV*y3 = &pic[(by*2+1)*linex*8+bx*16];
346 YUV*y4 = &pic[(by*2+1)*linex*8+bx*16+8];
348 YUV*uv2 = &y1[linex];
349 int diffy=0, diffuv = 0;
353 int yy,u1,v1,u2,v2,u3,v3,u4,v4;
355 yy = y1[x].y - b->y1[y8x];
357 yy = y2[x].y - b->y2[y8x];
359 yy = y3[x].y - b->y3[y8x];
361 yy = y4[x].y - b->y4[y8x];
363 u1 = uv1[x*2].u - b->u[y8x];
364 v1 = uv1[x*2].v - b->v[y8x];
365 u2 = uv1[x*2+1].u - b->u[y8x];
366 v2 = uv1[x*2+1].v - b->v[y8x];
367 u3 = uv2[x*2].u - b->u[y8x];
368 v3 = uv2[x*2].v - b->v[y8x];
369 u4 = uv2[x*2+1].u - b->u[y8x];
370 v4 = uv2[x*2+1].v - b->v[y8x];
371 diffuv += (abs(u1)+abs(v1));
372 diffuv += (abs(u2)+abs(v2));
373 diffuv += (abs(u3)+abs(v3));
374 diffuv += (abs(u4)+abs(v4));
383 return diffy + diffuv/4;
386 static inline int valtodc(int val)
394 /* TODO: what to do for zero values? skip the block? */
403 static int dctoval(int dc)
416 /* TODO: we could also just let the caller pass only the string table[index] here */
417 static int codehuffman(TAG*tag, struct huffcode*table, int index)
419 /* TODO: !optimize! */
421 while(table[index].code[i]) {
422 if(table[index].code[i]=='0')
423 swf_SetBits(tag, 0, 1);
425 swf_SetBits(tag, 1, 1);
431 static void quantize8x8(int*src, int*dest, int has_dc, int quant)
434 double q = 1.0/(quant*2);
436 dest[0] = valtodc((int)src[0]); /*DC*/
441 //dest[t] = (int)src[t];
442 /* exact: if(quant&1){dest[t] = (dest[t]/quant - 1)/2;}else{dest[t] = ((dest[t]+1)/quant - 1)/2;} */
443 //if(quant&1){dest[t] = (dest[t]/quant - 1)/2;}else{dest[t] = ((dest[t]+1)/quant - 1)/2;}
444 //dest[t] = dest[t]/(quant*2);
445 dest[t] = (int)(src[t]*q);
446 /* TODO: warn if this happens- the video will be buggy */
447 if(dest[t]>127) dest[t]=127;
448 if(dest[t]<-127) dest[t]=-127;
452 static void dequantize8x8(int*b, int has_dc, int quant)
456 b[0] = dctoval(b[0]); //DC
459 for(t=pos;t<64;t++) {
468 b[t] = quant*(2*b[t]+1); //-7,8,24,40
470 b[t] = quant*(2*b[t]+1)-1; //-8,7,23,39
477 /* paragraph 6.2.2, "clipping of reconstruction levels": */
478 if(b[t]>2047) b[t]=2047;
479 if(b[t]<-2048) b[t]=-2048;
483 static int hascoef(int*b, int has_dc)
489 for(t=pos;t<64;t++) {
496 static int coefbits8x8(int*bb, int has_dc)
507 for(last=63;last>=pos;last--) {
514 int run=0, level=0, islast=0,t;
515 while(!bb[pos] && pos<last) {
522 if(level<0) level=-level;
524 for(t=0;t<RLE_ESCAPE;t++) {
525 if(rle_params[t].run == run &&
526 rle_params[t].level == level &&
527 rle_params[t].last == islast) {
528 bits += rle[t].len + 1;
533 bits += rle[RLE_ESCAPE].len + 1 + 6 + 8;
542 static int encode8x8(TAG*tag, int*bb, int has_dc, int has_tcoef)
549 swf_SetBits(tag, bb[0], 8);
556 /* determine last non-null coefficient */
557 for(last=63;last>=pos;last--) {
558 /* TODO: we could leave out small coefficients
559 after a certain point (32?) */
563 /* blocks without coefficients should not be included
564 in the cbpy/cbpc patterns: */
573 while(!bb[pos] && pos<last) {
585 for(t=0;t<RLE_ESCAPE;t++) {
586 /* TODO: lookup table */
587 if(rle_params[t].run == run &&
588 rle_params[t].level == level &&
589 rle_params[t].last == islast) {
590 bits += codehuffman(tag, rle, t);
591 swf_SetBits(tag, sign, 1);
597 bits += codehuffman(tag, rle, RLE_ESCAPE);
604 swf_SetBits(tag, islast, 1);
605 swf_SetBits(tag, run, 6);
606 swf_SetBits(tag, level, 8); //FIXME: fixme??
618 static void quantize(block_t*fb, block_t*b, int has_dc, int quant)
620 quantize8x8(fb->y1, b->y1, has_dc, quant);
621 quantize8x8(fb->y2, b->y2, has_dc, quant);
622 quantize8x8(fb->y3, b->y3, has_dc, quant);
623 quantize8x8(fb->y4, b->y4, has_dc, quant);
624 quantize8x8(fb->u, b->u, has_dc, quant);
625 quantize8x8(fb->v, b->v, has_dc, quant);
628 static void dodct(block_t*fb)
630 dct(fb->y1); dct(fb->y2); dct(fb->y3); dct(fb->y4);
631 dct(fb->u); dct(fb->v);
640 static void dodctandquant(block_t*fb, block_t*b, int has_dc, int quant)
645 quantize(fb,b,has_dc,quant);
649 dct2(fb->y1,b->y1); dct2(fb->y2,b->y2); dct2(fb->y3,b->y3); dct2(fb->y4,b->y4);
650 dct2(fb->u,b->u); dct2(fb->v,b->v);
653 /* prepare for encoding (only values in (-127..-1,1..127) are
654 allowed as non-zero, non-dc values */
655 if(b->y1[t]<-127) b->y1[t]=-127;
656 if(b->y2[t]<-127) b->y2[t]=-127;
657 if(b->y3[t]<-127) b->y3[t]=-127;
658 if(b->y4[t]<-127) b->y4[t]=-127;
659 if(b->u[t]<-127) b->u[t]=-127;
660 if(b->v[t]<-127) b->v[t]=-127;
662 if(b->y1[t]>127) b->y1[t]=127;
663 if(b->y2[t]>127) b->y2[t]=127;
664 if(b->y3[t]>127) b->y3[t]=127;
665 if(b->y4[t]>127) b->y4[t]=127;
666 if(b->u[t]>127) b->u[t]=127;
667 if(b->v[t]>127) b->v[t]=127;
671 static void doidct(block_t*b)
676 fb.y1[t] = b->y1[zigzagtable[t]];
677 fb.y2[t] = b->y2[zigzagtable[t]];
678 fb.y3[t] = b->y3[zigzagtable[t]];
679 fb.y4[t] = b->y4[zigzagtable[t]];
680 fb.u[t] = b->u[zigzagtable[t]];
681 fb.v[t] = b->v[zigzagtable[t]];
683 idct(fb.y1); idct(fb.y2); idct(fb.y3); idct(fb.y4);
684 idct(fb.u); idct(fb.v);
686 memcpy(b, &fb, sizeof(block_t));
689 static void truncateblock(block_t*b)
693 b->y1[t] = truncate256(b->y1[t]);
694 b->y2[t] = truncate256(b->y2[t]);
695 b->y3[t] = truncate256(b->y3[t]);
696 b->y4[t] = truncate256(b->y4[t]);
697 b->u[t] = truncate256(b->u[t]);
698 b->v[t] = truncate256(b->v[t]);
702 static void dequantize(block_t*b, int has_dc, int quant)
704 dequantize8x8(b->y1, has_dc, quant);
705 dequantize8x8(b->y2, has_dc, quant);
706 dequantize8x8(b->y3, has_dc, quant);
707 dequantize8x8(b->y4, has_dc, quant);
708 dequantize8x8(b->u, has_dc, quant);
709 dequantize8x8(b->v, has_dc, quant);
712 static void getblockpatterns(block_t*b, int*cbpybits,int*cbpcbits, int has_dc)
717 *cbpybits|=hascoef(b->y1, has_dc)*8;
718 *cbpybits|=hascoef(b->y2, has_dc)*4;
719 *cbpybits|=hascoef(b->y3, has_dc)*2;
720 *cbpybits|=hascoef(b->y4, has_dc)*1;
722 *cbpcbits|=hascoef(b->u, has_dc)*2;
723 *cbpcbits|=hascoef(b->v, has_dc)*1;
726 static void setQuant(TAG*tag, int dquant)
733 swf_SetBits(tag, 0x0, 2);
734 } else if(dquant == -2) {
735 swf_SetBits(tag, 0x1, 2);
736 } else if(dquant == +1) {
737 swf_SetBits(tag, 0x2, 2);
738 } else if(dquant == +2) {
739 swf_SetBits(tag, 0x3, 2);
741 assert(0*strlen("invalid dquant"));
745 static void change_quant(int quant, int*dquant)
751 static void yuvdiff(block_t*a, block_t*b)
755 a->y1[t] = (a->y1[t] - b->y1[t]);
756 a->y2[t] = (a->y2[t] - b->y2[t]);
757 a->y3[t] = (a->y3[t] - b->y3[t]);
758 a->y4[t] = (a->y4[t] - b->y4[t]);
759 a->u[t] = (a->u[t] - b->u[t]);
760 a->v[t] = (a->v[t] - b->v[t]);
764 static void predictmvd(VIDEOSTREAM*s, int bx, int by, int*px, int*py)
767 int x1,y1,x2,y2,x3,y3;
769 if(bx) {x1=s->mvdx[by*s->bbx+bx-1];
770 y1=s->mvdy[by*s->bbx+bx-1];
773 if(by) {x2=s->mvdx[(by-1)*s->bbx+bx];
774 y2=s->mvdy[(by-1)*s->bbx+bx];
776 x3=s->mvdx[(by-1)*s->bbx+bx+1];
777 y3=s->mvdy[(by-1)*s->bbx+bx+1];
782 else {x2=x3=x1;y2=y3=y1;}
784 if((x1 <= x2 && x2 <= x3) ||
785 (x3 <= x2 && x2 <= x1)) {
787 } else if((x2 <= x1 && x1 <= x3) ||
788 (x3 <= x1 && x1 <= x2)) {
790 } else if((x1 <= x3 && x3 <= x2) ||
791 (x2 <= x3 && x3 <= x1)) {
798 if((y1 <= y2 && y2 <= y3) ||
799 (y3 <= y2 && y2 <= y1)) {
801 } else if((y2 <= y1 && y1 <= y3) ||
802 (y3 <= y1 && y1 <= y2)) {
804 } else if((y1 <= y3 && y3 <= y2) ||
805 (y2 <= y3 && y3 <= y1)) {
814 assert((x4>=-32 && x4<=31) && (y4>=-32 && y4<=31));
817 static inline int mvd2index(int px, int py, int x, int y, int xy)
820 if((x<-32 && x>31) || (y<-32 && y>31))
821 fprintf(stderr, "(%d,%d)\n", x,y);
822 assert((x>=-32 && x<=31) && (y>=-32 && y<=31));
823 //assert((x&1)==0 && (y&1)==0);//for now
824 //assert((x&2)==0 && (y&2)==0);//for now(2)
839 assert(x>=0 && x<64);
843 typedef struct _iblockdata_t
845 block_t b; //transformed quantized coefficients
846 block_t reconstruction;
849 struct huffcode*ctable; //table to use for chrominance encoding (different for i-frames)
850 int iframe; // 1 if this is part of an iframe
853 typedef struct _mvdblockdata_t
857 block_t reconstruction;
866 void prepareIBlock(VIDEOSTREAM*s, iblockdata_t*data, int bx, int by, block_t* fb, int*bits, int iframe)
868 /* consider I-block */
872 struct huffcode*ctable;
877 data->iframe = iframe;
879 data->ctable = &mcbpc_inter[3*4];
881 data->ctable = &mcbpc_intra[0];
884 memcpy(&fb_i, fb, sizeof(block_t));
885 dodctandquant(&fb_i, &data->b, 1, s->quant);
886 getblockpatterns(&data->b, &y, &c, 1);
891 *bits += data->ctable[c].len;
892 *bits += cbpy[y].len;
893 *bits += coefbits8x8(data->b.y1, 1);
894 *bits += coefbits8x8(data->b.y2, 1);
895 *bits += coefbits8x8(data->b.y3, 1);
896 *bits += coefbits8x8(data->b.y4, 1);
897 *bits += coefbits8x8(data->b.u, 1);
898 *bits += coefbits8x8(data->b.v, 1);
901 /* -- reconstruction -- */
902 memcpy(&data->reconstruction,&data->b,sizeof(block_t));
903 dequantize(&data->reconstruction, 1, s->quant);
904 doidct(&data->reconstruction);
905 truncateblock(&data->reconstruction);
908 int writeIBlock(VIDEOSTREAM*s, TAG*tag, iblockdata_t*data)
915 getblockpatterns(&data->b, &y, &c, has_dc);
917 swf_SetBits(tag,0,1); bits += 1; // COD
919 bits += codehuffman(tag, data->ctable, c);
920 bits += codehuffman(tag, cbpy, y);
923 bits += encode8x8(tag, data->b.y1, has_dc, y&8);
924 bits += encode8x8(tag, data->b.y2, has_dc, y&4);
925 bits += encode8x8(tag, data->b.y3, has_dc, y&2);
926 bits += encode8x8(tag, data->b.y4, has_dc, y&1);
929 bits += encode8x8(tag, data->b.u, has_dc, c&2);
930 bits += encode8x8(tag, data->b.v, has_dc, c&1);
932 copy_block_pic(s, s->current, &data->reconstruction, data->bx, data->by);
933 assert(data->bits == bits);
937 int getmvdbits(VIDEOSTREAM*s,block_t*fb, int bx,int by,int hx,int hy)
943 memcpy(&fbdiff, fb, sizeof(block_t));
944 getmvdregion(&fbold, s->oldpic, bx, by, hx, hy, s->linex);
945 yuvdiff(&fbdiff, &fbold);
946 dodctandquant(&fbdiff, &b, 0, s->quant);
947 bits += coefbits8x8(b.y1, 0);
948 bits += coefbits8x8(b.y2, 0);
949 bits += coefbits8x8(b.y3, 0);
950 bits += coefbits8x8(b.y4, 0);
951 bits += coefbits8x8(b.u, 0);
952 bits += coefbits8x8(b.v, 0);
956 void prepareMVDBlock(VIDEOSTREAM*s, mvdblockdata_t*data, int bx, int by, block_t* fb, int*bits)
957 { /* consider mvd(x,y)-block */
967 predictmvd(s,bx,by,&predictmvdx,&predictmvdy);
975 int bestx=0,besty=0,bestbits=65536;
976 int startx=-32,endx=31;
977 int starty=-32,endy=31;
981 if(bx==s->bbx-1) endx=0;
982 if(by==s->bby-1) endy=0;
984 for(hx=startx;hx<=endx;hx+=4)
985 for(hy=starty;hy<=endy;hy+=4)
988 bits = getmvdbits(s,fb,bx,by,hx,hy);
996 if(bestx-3 > startx) startx = bestx-3;
997 if(besty-3 > starty) starty = besty-3;
998 if(bestx+3 < endx) endx = bestx+3;
999 if(besty+3 < endy) endy = besty+3;
1001 for(hx=startx;hx<=endx;hx++)
1002 for(hy=starty;hy<=endy;hy++)
1005 bits = getmvdbits(s,fb,bx,by,hx,hy);
1012 data->movex = bestx;
1013 data->movey = besty;
1016 memcpy(&fbdiff, fb, sizeof(block_t));
1017 getmvdregion(&data->fbold, s->oldpic, bx, by, data->movex, data->movey, s->linex);
1018 yuvdiff(&fbdiff, &data->fbold);
1019 dodctandquant(&fbdiff, &data->b, 0, s->quant);
1020 getblockpatterns(&data->b, &y, &c, 0);
1022 data->xindex = mvd2index(predictmvdx, predictmvdy, data->movex, data->movey, 0);
1023 data->yindex = mvd2index(predictmvdx, predictmvdy, data->movex, data->movey, 1);
1026 *bits += mcbpc_inter[0*4+c].len;
1027 *bits += cbpy[y^15].len;
1028 *bits += mvd[data->xindex].len; // (0,0)
1029 *bits += mvd[data->yindex].len;
1030 *bits += coefbits8x8(data->b.y1, 0);
1031 *bits += coefbits8x8(data->b.y2, 0);
1032 *bits += coefbits8x8(data->b.y3, 0);
1033 *bits += coefbits8x8(data->b.y4, 0);
1034 *bits += coefbits8x8(data->b.u, 0);
1035 *bits += coefbits8x8(data->b.v, 0);
1038 /* -- reconstruction -- */
1039 memcpy(&data->reconstruction, &data->b, sizeof(block_t));
1040 dequantize(&data->reconstruction, 0, s->quant);
1041 doidct(&data->reconstruction);
1043 data->reconstruction.y1[t] =
1044 truncate256(data->reconstruction.y1[t] + (int)data->fbold.y1[t]);
1045 data->reconstruction.y2[t] =
1046 truncate256(data->reconstruction.y2[t] + (int)data->fbold.y2[t]);
1047 data->reconstruction.y3[t] =
1048 truncate256(data->reconstruction.y3[t] + (int)data->fbold.y3[t]);
1049 data->reconstruction.y4[t] =
1050 truncate256(data->reconstruction.y4[t] + (int)data->fbold.y4[t]);
1051 data->reconstruction.u[t] =
1052 truncate256(data->reconstruction.u[t] + (int)data->fbold.u[t]);
1053 data->reconstruction.v[t] =
1054 truncate256(data->reconstruction.v[t] + (int)data->fbold.v[t]);
1058 int writeMVDBlock(VIDEOSTREAM*s, TAG*tag, mvdblockdata_t*data)
1062 int has_dc=0; // mvd w/o mvd24
1063 /* mvd (0,0) block (mode=0) */
1069 getblockpatterns(&data->b, &y, &c, has_dc);
1070 swf_SetBits(tag,0,1); bits += 1; // COD
1071 bits += codehuffman(tag, mcbpc_inter, mode*4+c);
1072 bits += codehuffman(tag, cbpy, y^15);
1075 bits += codehuffman(tag, mvd, data->xindex);
1076 bits += codehuffman(tag, mvd, data->yindex);
1079 bits += encode8x8(tag, data->b.y1, has_dc, y&8);
1080 bits += encode8x8(tag, data->b.y2, has_dc, y&4);
1081 bits += encode8x8(tag, data->b.y3, has_dc, y&2);
1082 bits += encode8x8(tag, data->b.y4, has_dc, y&1);
1085 bits += encode8x8(tag, data->b.u, has_dc, c&2);
1086 bits += encode8x8(tag, data->b.v, has_dc, c&1);
1088 s->mvdx[by*s->bbx+bx] = data->movex;
1089 s->mvdy[by*s->bbx+bx] = data->movey;
1091 copy_block_pic(s, s->current, &data->reconstruction, data->bx, data->by);
1092 assert(data->bits == bits);
1096 static int encode_PFrame_block(TAG*tag, VIDEOSTREAM*s, int bx, int by)
1103 iblockdata_t iblock;
1104 mvdblockdata_t mvdblock;
1106 getregion(&fb, s->current, bx, by, s->linex);
1107 prepareIBlock(s, &iblock, bx, by, &fb, &bits_i, 0);
1109 /* encoded last frame <=> original current block: */
1110 diff1 = compare_pic_pic(s, s->current, s->oldpic, bx, by);
1111 /* encoded current frame <=> original current block: */
1112 diff2 = compare_pic_block(s, &iblock.reconstruction, s->current, bx, by);
1114 if(diff1 <= diff2) {
1115 swf_SetBits(tag, 1,1); /* cod=1, block skipped */
1116 /* copy the region from the last frame so that we have a complete reconstruction */
1117 copyregion(s, s->current, s->oldpic, bx, by);
1120 prepareMVDBlock(s, &mvdblock, bx, by, &fb, &bits_vxy);
1122 if(bits_i > bits_vxy) {
1123 return writeMVDBlock(s, tag, &mvdblock);
1125 return writeIBlock(s, tag, &iblock);
1129 /* should be called encode_IFrameBlock */
1130 static void encode_IFrame_block(TAG*tag, VIDEOSTREAM*s, int bx, int by)
1136 getregion(&fb, s->current, bx, by, s->width);
1137 prepareIBlock(s, &data, bx, by, &fb, &bits, 1);
1138 writeIBlock(s, tag, &data);
1142 static int bmid = 0;
1144 void setdbgpic(TAG*tag, RGBA*pic, int width, int height)
1149 tag = swf_InsertTag(tag,ST_REMOVEOBJECT2);
1150 swf_SetU16(tag, 133);
1152 tag = swf_InsertTag(tag, ST_DEFINEBITSLOSSLESS);
1153 swf_SetU16(tag, 1000+bmid);
1154 swf_SetLosslessBits(tag, width, height, (void*)pic, BMF_32BIT);
1156 tag = swf_InsertTag(tag, ST_DEFINESHAPE);
1157 swf_SetU16(tag, 2000+bmid);
1158 swf_ShapeSetBitmapRect(tag, 1000+bmid, width, height);
1160 tag = swf_InsertTag(tag,ST_PLACEOBJECT2);
1161 swf_GetMatrix(0,&m);
1163 swf_ObjectPlace(tag, 2000+bmid, 133, &m, 0, 0);
1169 #define TYPE_IFRAME 0
1170 #define TYPE_PFRAME 1
1172 static void writeHeader(TAG*tag, int width, int height, int frame, int quant, int type)
1175 swf_SetU16(tag, frame);
1176 swf_SetBits(tag, 1, 17); /* picture start code*/
1177 swf_SetBits(tag, 0, 5); /* version=0, version 1 would optimize rle behaviour*/
1178 swf_SetBits(tag, frame, 8); /* time reference */
1180 /* write dimensions, taking advantage of some predefined sizes
1181 if the opportunity presents itself */
1182 i32 = width<<16|height;
1185 case 352<<16|288: swf_SetBits(tag, 2, 3);break;
1186 case 176<<16|144: swf_SetBits(tag, 3, 3);break;
1187 case 128<<16|96: swf_SetBits(tag, 4, 3);break;
1188 case 320<<16|240: swf_SetBits(tag, 5, 3);break;
1189 case 160<<16|120: swf_SetBits(tag, 6, 3);break;
1191 if(width>255 || height>255) {
1192 swf_SetBits(tag, 1, 3);
1193 swf_SetBits(tag, width, 16);
1194 swf_SetBits(tag, height, 16);
1196 swf_SetBits(tag, 0, 3);
1197 swf_SetBits(tag, width, 8);
1198 swf_SetBits(tag, height, 8);
1202 swf_SetBits(tag, type, 2); /* I-Frame or P-Frame */
1203 swf_SetBits(tag, 0, 1); /* No deblock filter */
1206 swf_SetBits(tag, quant, 5); /* quantizer (1-31), may be updated later on*/
1207 swf_SetBits(tag, 0, 1); /* No extra info */
1210 void swf_SetVideoStreamIFrame(TAG*tag, VIDEOSTREAM*s, RGBA*pic, int quant)
1214 if(quant<1) quant=1;
1215 if(quant>31) quant=31;
1218 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_IFRAME);
1220 /* fixme: should fill with 0,128,128, not 0,0,0 */
1221 memset(s->current, 0, s->linex*s->height*sizeof(YUV));
1223 rgb2yuv(s->current, pic, s->linex, s->olinex, s->owidth, s->oheight);
1225 for(by=0;by<s->bby;by++)
1227 for(bx=0;bx<s->bbx;bx++)
1229 encode_IFrame_block(tag, s, bx, by);
1233 memcpy(s->oldpic, s->current, s->width*s->height*sizeof(YUV));
1236 void swf_SetVideoStreamPFrame(TAG*tag, VIDEOSTREAM*s, RGBA*pic, int quant)
1240 if(quant<1) quant=1;
1241 if(quant>31) quant=31;
1244 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_PFRAME);
1246 /* fixme: should fill with 0,128,128, not 0,0,0 */
1247 memset(s->current, 0, s->linex*s->height*sizeof(YUV));
1249 rgb2yuv(s->current, pic, s->linex, s->olinex, s->owidth, s->oheight);
1250 memset(s->mvdx, 0, s->bbx*s->bby*sizeof(int));
1251 memset(s->mvdy, 0, s->bbx*s->bby*sizeof(int));
1253 for(by=0;by<s->bby;by++)
1255 for(bx=0;bx<s->bbx;bx++)
1257 encode_PFrame_block(tag, s, bx, by);
1261 memcpy(s->oldpic, s->current, s->width*s->height*sizeof(YUV));
1265 yuv2rgb(pic, s->current, s->linex, s->width, s->height);
1266 setdbgpic(tag, pic, s->width, s->height);
1271 void swf_SetVideoStreamMover(TAG*tag, VIDEOSTREAM*s, signed char* movex, signed char* movey, void**picture, int quant)
1275 if(quant<1) quant=1;
1276 if(quant>31) quant=31;
1279 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_PFRAME);
1281 memset(s->mvdx, 0, s->bbx*s->bby*sizeof(int));
1282 memset(s->mvdy, 0, s->bbx*s->bby*sizeof(int));
1284 for(by=0;by<s->bby;by++)
1286 for(bx=0;bx<s->bbx;bx++)
1288 int predictmvdx=0, predictmvdy=0;
1289 int mvx=movex[by*s->bbx+bx];
1290 int mvy=movey[by*s->bbx+bx];
1292 if(mvx<-32) mvx=-32;
1294 if(mvy<-32) mvy=-32;
1297 if(mvx == 0 && mvy == 0 && picture == 0) {
1298 swf_SetBits(tag,1,1); // COD skip
1306 swf_SetBits(tag,0,1); // COD
1308 if(mvx==0 && mvy==0) { // only picture
1314 /* todo: store picture in b */
1315 dodctandquant(&b, &b2, 1, s->quant);
1316 getblockpatterns(&b2, &y, &c, 1);
1321 codehuffman(tag, mcbpc_inter, mode*4+c);
1322 codehuffman(tag, cbpy, y^15);
1325 /* has motion vector */
1326 predictmvd(s,bx,by,&predictmvdx,&predictmvdy);
1327 codehuffman(tag, mvd, mvd2index(predictmvdx, predictmvdy, mvx, mvy, 0));
1328 codehuffman(tag, mvd, mvd2index(predictmvdx, predictmvdy, mvx, mvy, 1));
1329 s->mvdx[by*s->bbx+bx] = mvx;
1330 s->mvdy[by*s->bbx+bx] = mvy;
1334 encode8x8(tag, b2.y1, has_dc, y&8);
1335 encode8x8(tag, b2.y2, has_dc, y&4);
1336 encode8x8(tag, b2.y3, has_dc, y&2);
1337 encode8x8(tag, b2.y4, has_dc, y&1);
1338 encode8x8(tag, b2.u, has_dc, c&2);
1339 encode8x8(tag, b2.v, has_dc, c&1);
1349 void test_copy_diff()
1352 VIDEOSTREAM* s = &stream;
1354 RGBA*pic = malloc(256*256*sizeof(RGBA));
1359 for(y=0;y<256;y++) {
1360 pic[y*256+x].r = x*y;
1361 pic[y*256+x].g = x+y;
1362 pic[y*256+x].b = (x+1)%(y+1);
1364 tag = swf_InsertTag(0, ST_DEFINEVIDEOSTREAM);
1365 swf_SetU16(tag, 33);
1366 swf_SetVideoStreamDefine(tag, s, 10, 256, 256);
1368 rgb2yuv(s->current, pic, s->linex, s->olinex, s->owidth, s->oheight);
1369 for(by=0;by<16;by++)
1370 for(bx=0;bx<16;bx++) {
1372 /* test1: does compare pic pic return zero for identical blocks? */
1373 diff1 = compare_pic_pic(s, s->current, s->current, bx, by);
1375 /* test2: do blocks which are copied back return zero diff? */
1376 getregion(&fb, s->current, bx, by, s->linex);
1377 copy_block_pic(s, s->oldpic, &fb, bx, by);
1378 diff1 = compare_pic_block(s, &fb, s->oldpic, bx, by);
1380 /* test3: does compare_pic_block return the same result as compare_pic_pic? */
1381 getregion(&fb, s->current, 15-bx, 15-by, s->linex);
1382 copy_block_pic(s, s->oldpic, &fb, bx, by);
1383 diff1 = compare_pic_block(s, &fb, s->current, bx, by);
1384 diff2 = compare_pic_pic(s, s->current, s->oldpic, bx, by);
1385 assert(diff1 == diff2);
1394 int compileSWFActionCode(const char *script, int version, void**data, int*len) {return 0;}
1409 pic = malloc(width*height*4);
1410 memset(pic, 0, width*height*4);
1412 memset(&swf,0,sizeof(SWF));
1413 memset(&obj,0,sizeof(obj));
1415 swf.fileVersion = 6;
1416 swf.frameRate = 15*256;
1417 swf.movieSize.xmax = 20*width;
1418 swf.movieSize.ymax = 20*height;
1420 swf.firstTag = swf_InsertTag(NULL,ST_SETBACKGROUNDCOLOR);
1422 rgb.r = 0x00;rgb.g = 0x30;rgb.b = 0xff;
1423 swf_SetRGB(tag,&rgb);
1425 tag = swf_InsertTag(tag, ST_DEFINEVIDEOSTREAM);
1427 swf_SetVideoStreamDefine(tag, &stream, frames, width, height);
1428 stream.do_motion = 0;
1430 for(y=0;y<height;y++) {
1431 for(x=0;x<width;x++) {
1435 pic[y*width+x].r = 0;
1436 pic[y*width+x].g = 0;
1437 pic[y*width+x].b = 0;
1438 pic[y*width+x].a = 0;
1441 tag = swf_InsertTag(tag, ST_VIDEOFRAME);
1444 swf_SetVideoStreamIFrame(tag, &stream, pic, 7);
1446 tag = swf_InsertTag(tag, ST_PLACEOBJECT2);
1447 swf_GetPlaceObject(0, &obj);
1452 swf_SetPlaceObject(tag,&obj);
1454 tag = swf_InsertTag(tag, ST_SHOWFRAME);
1456 swf_VideoStreamClear(&stream);
1458 tag = swf_InsertTag(tag, ST_END);
1460 int fi = open("black.swf", O_WRONLY|O_CREAT|O_TRUNC, 0644);
1461 if(swf_WriteSWC(fi,&swf)<0) {
1462 fprintf(stderr,"WriteSWF() failed.\n");
1468 int main(int argn, char*argv[])
1474 RGBA* pic, *pic2, rgb;
1481 char* fname = "/home/kramm/pics/peppers_fromjpg.png";
1482 //char* fname = "/home/kramm/pics/baboon.png";
1492 memset(&stream, 0, sizeof(stream));
1494 getPNG(fname, &width, &height, &data);
1495 pic = (RGBA*)malloc(width*height*sizeof(RGBA));
1496 pic2 = (RGBA*)malloc(width*height*sizeof(RGBA));
1497 memcpy(pic, data, width*height*sizeof(RGBA));
1500 printf("Compressing %s, size %dx%d\n", fname, width, height);
1502 memset(&swf,0,sizeof(SWF));
1503 memset(&obj,0,sizeof(obj));
1505 swf.fileVersion = 6;
1506 swf.frameRate = framerate*256;
1507 swf.movieSize.xmax = 20*width*2;
1508 swf.movieSize.ymax = 20*height;
1510 swf.firstTag = swf_InsertTag(NULL,ST_SETBACKGROUNDCOLOR);
1512 rgb.r = 0x00;rgb.g = 0x30;rgb.b = 0xff;
1513 swf_SetRGB(tag,&rgb);
1515 tag = swf_InsertTag(tag, ST_DEFINEVIDEOSTREAM);
1516 swf_SetU16(tag, 33);
1517 swf_SetVideoStreamDefine(tag, &stream, frames, width, height);
1518 stream.do_motion = 0;
1522 for(t=0;t<frames;t++)
1526 for(y=0,yy=0;y<height;y++,yy+=d) {
1527 RGBA*line = &pic[((int)yy)*width];
1528 for(x=0,xx=0;x<width;x++,xx+=d) {
1531 if(dx==0 && dy==0) {
1532 pic2[y*width+x] = line[((int)xx)];
1533 pic2[y*width+x].r+=2;
1534 pic2[y*width+x].g+=2;
1535 pic2[y*width+x].b+=2;
1537 //pic2[y*width+x] = line[((int)xx)];
1538 //pic2[y*width+x].r = lrand48();//line[((int)xx)];
1539 //pic2[y*width+x].g = lrand48();//line[((int)xx)];
1540 //pic2[y*width+x].b = lrand48();//line[((int)xx)];
1541 pic2[y*width+x].r = 0;
1542 pic2[y*width+x].g = 0;
1543 pic2[y*width+x].b = 0;
1545 /*if(dx==16 && dy==16)
1546 pic2[y*width+x] = pic[(y-16*16)*width+(x-16*16)];*/
1547 /*if(dx<=0 && dy<=0) {
1548 pic2[y*width+x] = line[((int)xx)];*/
1549 /*if(x==0 && y==0) {
1551 memset(&color, 0, sizeof(RGBA));
1552 pic2[y*width+x] = color;*/
1555 color.r = lrand48();
1556 color.g = lrand48();
1557 color.b = lrand48();
1559 pic2[y*width+x] = color;
1563 printf("frame:%d\n", t);fflush(stdout);
1568 tag = swf_InsertTag(tag, ST_VIDEOFRAME);
1569 swf_SetU16(tag, 33);
1571 swf_SetVideoStreamIFrame(tag, &stream, pic2, 7);
1573 swf_SetVideoStreamPFrame(tag, &stream, pic2, 7);
1576 tag = swf_InsertTag(tag, ST_PLACEOBJECT2);
1577 swf_GetPlaceObject(0, &obj);
1586 swf_SetPlaceObject(tag,&obj);
1588 tag = swf_InsertTag(tag, ST_SHOWFRAME);
1591 swf_VideoStreamClear(&stream);
1593 tag = swf_InsertTag(tag, ST_END);
1595 fi = open("video3.swf", O_WRONLY|O_CREAT|O_TRUNC, 0644);
1596 if(swf_WriteSWC(fi,&swf)<0) {
1597 fprintf(stderr,"WriteSWF() failed.\n");