2 Routines for handling h.263 video tags
4 Part of the swftools package.
6 Copyright (c) 2003 Matthias Kramm <kramm@quiss.org>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
26 #include "../rfxswf.h"
27 #include "h263tables.h"
31 - use prepare* / write* in encode_IFrame_block
32 - check whether mvd steps of 2 lead to (much) smaller results
38 void swf_SetVideoStreamDefine(TAG*tag, VIDEOSTREAM*stream, U16 frames, U16 width, U16 height)
40 swf_SetU16(tag, frames);
41 swf_SetU16(tag, width);
42 swf_SetU16(tag, height);
43 //swf_SetU8(tag, 1); /* smoothing on */
44 swf_SetU8(tag, 0); /* smoothing off */
45 swf_SetU8(tag, 2); /* codec = h.263 sorenson spark */
50 memset(stream, 0, sizeof(VIDEOSTREAM));
51 stream->olinex = width;
52 stream->owidth = width;
53 stream->oheight = height;
55 height+=15;height&=~15;
56 stream->linex = width;
57 stream->width = width;
58 stream->height = height;
59 stream->bbx = width/16;
60 stream->bby = height/16;
61 stream->current = (YUV*)malloc(width*height*sizeof(YUV));
62 stream->oldpic = (YUV*)malloc(width*height*sizeof(YUV));
63 stream->mvdx = (int*)malloc(stream->bbx*stream->bby*sizeof(int));
64 stream->mvdy = (int*)malloc(stream->bbx*stream->bby*sizeof(int));
65 stream->do_motion = 0;
67 memset(stream->oldpic, 0, width*height*sizeof(YUV));
68 memset(stream->current, 0, width*height*sizeof(YUV));
70 assert((stream->width&15) == 0);
71 assert((stream->height&15) == 0);
72 assert((stream->bbx*16) == stream->width);
73 assert((stream->bby*16) == stream->height);
75 void swf_VideoStreamClear(VIDEOSTREAM*stream)
77 free(stream->oldpic);stream->oldpic = 0;
78 free(stream->current);stream->current = 0;
79 free(stream->mvdx);stream->mvdx=0;
80 free(stream->mvdy);stream->mvdy=0;
83 typedef struct _block_t
93 static inline int truncate256(int a)
100 static void getregion(block_t* bb, YUV*pic, int posx, int posy, int linex)
108 p1 = &pic[posy*linex+posx];
112 bb->u[i] = (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
113 bb->v[i] = (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
115 bb->y2[i] = p1[x+8].y;
116 bb->y3[i] = p1[linex*8+x].y;
117 bb->y4[i] = p1[linex*8+x+8].y;
125 /* This function is pretty complex. Let's hope it works correctly */
126 static void getmvdregion(block_t* bb, YUV*pic, int posx, int posy, int mvdx, int mvdy, int linex)
134 posx = posx*16 + ((mvdx&~1)/2); //works also for negative mvdx (unlike mvdx/2)
135 posy = posy*16 + ((mvdy&~1)/2);
136 p1 = &pic[posy*linex+posx];
137 p2 = &pic[(posy&~1)*linex+(posx&~1)];
138 uvhp = ((mvdx&1)|((mvdx>>1)&1))|((mvdy&2)|((mvdy&1)<<1));
139 yhp = ((mvdy&1)<<1|(mvdx&1));
142 if(yhp==0 || yhp==2) {
145 bb->y1[yy] = p1[x].y;
146 bb->y2[yy] = p1[x+8].y;
147 bb->y3[yy] = p1[linex*8+x].y;
148 bb->y4[yy] = p1[linex*8+x+8].y;
156 bb->y1[yy] += p1[x].y; bb->y1[yy] /= 2;
157 bb->y2[yy] += p1[x+8].y; bb->y2[yy] /= 2;
158 bb->y3[yy] += p1[linex*8+x].y; bb->y3[yy] /= 2;
159 bb->y4[yy] += p1[linex*8+x+8].y; bb->y4[yy] /= 2;
164 } else if(yhp==1 || yhp==3) {
167 bb->y1[yy] = (p1[x].y + p1[x+1].y);
168 bb->y2[yy] = (p1[x+8].y + p1[x+8+1].y);
169 bb->y3[yy] = (p1[linex*8+x].y + p1[linex*8+x+1].y);
170 bb->y4[yy] = (p1[linex*8+x+8].y + p1[linex*8+x+8+1].y);
177 bb->y1[yy] += (p1[x].y + p1[x+1].y); bb->y1[yy]/=4;
178 bb->y2[yy] += (p1[x+8].y + p1[x+8+1].y); bb->y2[yy]/=4;
179 bb->y3[yy] += (p1[linex*8+x].y + p1[linex*8+x+1].y); bb->y3[yy]/=4;
180 bb->y4[yy] += (p1[linex*8+x+8].y + p1[linex*8+x+8+1].y); bb->y4[yy]/=4;
185 bb->y1[yy]/=2; bb->y2[yy]/=2; bb->y3[yy]/=2; bb->y4[yy]/=2;
193 if(uvhp==0 || uvhp==2) {
196 bb->u[uv] = (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
197 bb->v[uv] = (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
204 bb->u[uv] += (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
205 bb->v[uv] += (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
212 } else /* uvhp==1 || uvhp==3 */ {
215 bb->u[uv] = ((p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4+
216 (p2[x*2+2].u + p2[x*2+1+2].u + p2[linex+x*2+2].u + p2[linex+x*2+1+2].u)/4);
217 bb->v[uv] = ((p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4+
218 (p2[x*2+2].v + p2[x*2+1+2].v + p2[linex+x*2+2].v + p2[linex+x*2+1+2].v)/4);
225 bb->u[uv] += ((p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4+
226 (p2[x*2+2].u + p2[x*2+1+2].u + p2[linex+x*2+2].u + p2[linex+x*2+1+2].u)/4);
227 bb->v[uv] += ((p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4+
228 (p2[x*2+2].v + p2[x*2+1+2].v + p2[linex+x*2+2].v + p2[linex+x*2+1+2].v)/4);
244 static void rgb2yuv(YUV*dest, RGBA*src, int dlinex, int slinex, int width, int height)
247 for(y=0;y<height;y++) {
248 for(x=0;x<width;x++) {
250 r = src[y*slinex+x].r;
251 g = src[y*slinex+x].g;
252 b = src[y*slinex+x].b;
253 /*dest[y*dlinex+x].y = (r*0.299 + g*0.587 + b*0.114);
254 dest[y*dlinex+x].u = (r*-0.169 + g*-0.332 + b*0.500 + 128.0);
255 dest[y*dlinex+x].v = (r*0.500 + g*-0.419 + b*-0.0813 + 128.0);*/
257 //dest[y*dlinex+x].y = 128;//(r*((int)( 0.299*256)) + g*((int)( 0.587*256)) + b*((int)( 0.114 *256)))>>8;
259 dest[y*dlinex+x].y = (r*((int)( 0.299*256)) + g*((int)( 0.587*256)) + b*((int)( 0.114 *256)))>>8;
260 dest[y*dlinex+x].u = (r*((int)(-0.169*256)) + g*((int)(-0.332*256)) + b*((int)( 0.500 *256))+ 128*256)>>8;
261 dest[y*dlinex+x].v = (r*((int)( 0.500*256)) + g*((int)(-0.419*256)) + b*((int)(-0.0813*256))+ 128*256)>>8;
266 static void copyregion(VIDEOSTREAM*s, YUV*dest, YUV*src, int bx, int by)
268 YUV*p1 = &dest[by*s->linex*16+bx*16];
269 YUV*p2 = &src[by*s->linex*16+bx*16];
272 memcpy(p1, p2, 16*sizeof(YUV));
273 p1+=s->linex;p2+=s->linex;
277 static void yuv2rgb(RGBA*dest, YUV*src, int linex, int width, int height)
280 for(y=0;y<height;y++) {
281 for(x=0;x<width;x++) {
283 u = src[y*linex+x].u;
284 v = src[y*linex+x].v;
285 yy = src[y*linex+x].y;
286 dest[y*linex+x].r = truncate256(yy + ((360*(v-128))>>8));
287 dest[y*linex+x].g = truncate256(yy - ((88*(u-128)+183*(v-128))>>8));
288 dest[y*linex+x].b = truncate256(yy + ((455 * (u-128))>>8));
292 static void copy_block_pic(VIDEOSTREAM*s, YUV*dest, block_t*b, int bx, int by)
294 YUV*p1 = &dest[(by*16)*s->linex+bx*16];
295 YUV*p2 = &dest[(by*16+8)*s->linex+bx*16];
300 p1[x+0].u = b->u[(y/2)*8+(x/2)];
301 p1[x+0].v = b->v[(y/2)*8+(x/2)];
302 p1[x+0].y = b->y1[y*8+x];
303 p1[x+8].u = b->u[(y/2)*8+(x/2)+4];
304 p1[x+8].v = b->v[(y/2)*8+(x/2)+4];
305 p1[x+8].y = b->y2[y*8+x];
306 p2[x+0].u = b->u[(y/2+4)*8+(x/2)];
307 p2[x+0].v = b->v[(y/2+4)*8+(x/2)];
308 p2[x+0].y = b->y3[y*8+x];
309 p2[x+8].u = b->u[(y/2+4)*8+(x/2)+4];
310 p2[x+8].v = b->v[(y/2+4)*8+(x/2)+4];
311 p2[x+8].y = b->y4[y*8+x];
318 static int compare_pic_pic(VIDEOSTREAM*s, YUV*pp1, YUV*pp2, int bx, int by)
320 int linex = s->width;
321 YUV*p1 = &pp1[by*linex*16+bx*16];
322 YUV*p2 = &pp2[by*linex*16+bx*16];
323 int diffy=0, diffuv = 0;
333 diffuv += abs(u)+abs(v);
338 return diffy + diffuv/4;
341 static int compare_pic_block(VIDEOSTREAM*s, block_t* b, YUV*pic, int bx, int by)
343 int linex = s->width;
344 YUV*y1 = &pic[(by*2)*linex*8+bx*16];
345 YUV*y2 = &pic[(by*2)*linex*8+bx*16+8];
346 YUV*y3 = &pic[(by*2+1)*linex*8+bx*16];
347 YUV*y4 = &pic[(by*2+1)*linex*8+bx*16+8];
349 YUV*uv2 = &y1[linex];
350 int diffy=0, diffuv = 0;
354 int yy,u1,v1,u2,v2,u3,v3,u4,v4;
356 yy = y1[x].y - b->y1[y8x];
358 yy = y2[x].y - b->y2[y8x];
360 yy = y3[x].y - b->y3[y8x];
362 yy = y4[x].y - b->y4[y8x];
364 u1 = uv1[x*2].u - b->u[y8x];
365 v1 = uv1[x*2].v - b->v[y8x];
366 u2 = uv1[x*2+1].u - b->u[y8x];
367 v2 = uv1[x*2+1].v - b->v[y8x];
368 u3 = uv2[x*2].u - b->u[y8x];
369 v3 = uv2[x*2].v - b->v[y8x];
370 u4 = uv2[x*2+1].u - b->u[y8x];
371 v4 = uv2[x*2+1].v - b->v[y8x];
372 diffuv += (abs(u1)+abs(v1));
373 diffuv += (abs(u2)+abs(v2));
374 diffuv += (abs(u3)+abs(v3));
375 diffuv += (abs(u4)+abs(v4));
384 return diffy + diffuv/4;
387 static inline int valtodc(int val)
395 /* TODO: what to do for zero values? skip the block? */
404 static int dctoval(int dc)
417 /* TODO: we could also just let the caller pass only the string table[index] here */
418 static int codehuffman(TAG*tag, struct huffcode*table, int index)
420 /* TODO: !optimize! */
422 while(table[index].code[i]) {
423 if(table[index].code[i]=='0')
424 swf_SetBits(tag, 0, 1);
426 swf_SetBits(tag, 1, 1);
432 static void quantize8x8(int*src, int*dest, int has_dc, int quant)
435 double q = 1.0/(quant*2);
437 dest[0] = valtodc((int)src[0]); /*DC*/
442 //dest[t] = (int)src[t];
443 /* exact: if(quant&1){dest[t] = (dest[t]/quant - 1)/2;}else{dest[t] = ((dest[t]+1)/quant - 1)/2;} */
444 //if(quant&1){dest[t] = (dest[t]/quant - 1)/2;}else{dest[t] = ((dest[t]+1)/quant - 1)/2;}
445 //dest[t] = dest[t]/(quant*2);
446 dest[t] = (int)(src[t]*q);
447 /* TODO: warn if this happens- the video will be buggy */
448 if(dest[t]>127) dest[t]=127;
449 if(dest[t]<-127) dest[t]=-127;
453 static void dequantize8x8(int*b, int has_dc, int quant)
457 b[0] = dctoval(b[0]); //DC
460 for(t=pos;t<64;t++) {
469 b[t] = quant*(2*b[t]+1); //-7,8,24,40
471 b[t] = quant*(2*b[t]+1)-1; //-8,7,23,39
478 /* paragraph 6.2.2, "clipping of reconstruction levels": */
479 if(b[t]>2047) b[t]=2047;
480 if(b[t]<-2048) b[t]=-2048;
484 static int hascoef(int*b, int has_dc)
490 for(t=pos;t<64;t++) {
497 static int coefbits8x8(int*bb, int has_dc)
508 for(last=63;last>=pos;last--) {
515 int run=0, level=0, islast=0,t;
516 while(!bb[pos] && pos<last) {
523 if(level<0) level=-level;
525 for(t=0;t<RLE_ESCAPE;t++) {
526 if(rle_params[t].run == run &&
527 rle_params[t].level == level &&
528 rle_params[t].last == islast) {
529 bits += rle[t].len + 1;
534 bits += rle[RLE_ESCAPE].len + 1 + 6 + 8;
543 static int encode8x8(TAG*tag, int*bb, int has_dc, int has_tcoef)
550 swf_SetBits(tag, bb[0], 8);
557 /* determine last non-null coefficient */
558 for(last=63;last>=pos;last--) {
559 /* TODO: we could leave out small coefficients
560 after a certain point (32?) */
564 /* blocks without coefficients should not be included
565 in the cbpy/cbpc patterns: */
574 while(!bb[pos] && pos<last) {
586 for(t=0;t<RLE_ESCAPE;t++) {
587 /* TODO: lookup table */
588 if(rle_params[t].run == run &&
589 rle_params[t].level == level &&
590 rle_params[t].last == islast) {
591 bits += codehuffman(tag, rle, t);
592 swf_SetBits(tag, sign, 1);
598 bits += codehuffman(tag, rle, RLE_ESCAPE);
601 if(!level || level<-127 || level>127) {
602 fprintf(stderr, "Warning: Overflow- Level %d at pos %d\n", level, pos);
603 if(level<-127) level=-127;
604 if(level>127) level=127;
609 assert(level<=127); //TODO: known to fail for pos=0 (with custom frames?)
611 swf_SetBits(tag, islast, 1);
612 swf_SetBits(tag, run, 6);
613 swf_SetBits(tag, level, 8); //FIXME: fixme??
625 static void quantize(block_t*fb, block_t*b, int has_dc, int quant)
627 quantize8x8(fb->y1, b->y1, has_dc, quant);
628 quantize8x8(fb->y2, b->y2, has_dc, quant);
629 quantize8x8(fb->y3, b->y3, has_dc, quant);
630 quantize8x8(fb->y4, b->y4, has_dc, quant);
631 quantize8x8(fb->u, b->u, has_dc, quant);
632 quantize8x8(fb->v, b->v, has_dc, quant);
635 static void dodct(block_t*fb)
637 dct(fb->y1); dct(fb->y2); dct(fb->y3); dct(fb->y4);
638 dct(fb->u); dct(fb->v);
647 static void dodctandquant(block_t*fb, block_t*b, int has_dc, int quant)
652 quantize(fb,b,has_dc,quant);
656 dct2(fb->y1,b->y1); dct2(fb->y2,b->y2); dct2(fb->y3,b->y3); dct2(fb->y4,b->y4);
657 dct2(fb->u,b->u); dct2(fb->v,b->v);
660 /* prepare for encoding (only values in (-127..-1,1..127) are
661 allowed as non-zero, non-dc values */
662 if(b->y1[t]<-127) b->y1[t]=-127;
663 if(b->y2[t]<-127) b->y2[t]=-127;
664 if(b->y3[t]<-127) b->y3[t]=-127;
665 if(b->y4[t]<-127) b->y4[t]=-127;
666 if(b->u[t]<-127) b->u[t]=-127;
667 if(b->v[t]<-127) b->v[t]=-127;
669 if(b->y1[t]>127) b->y1[t]=127;
670 if(b->y2[t]>127) b->y2[t]=127;
671 if(b->y3[t]>127) b->y3[t]=127;
672 if(b->y4[t]>127) b->y4[t]=127;
673 if(b->u[t]>127) b->u[t]=127;
674 if(b->v[t]>127) b->v[t]=127;
678 static void doidct(block_t*b)
683 fb.y1[t] = b->y1[zigzagtable[t]];
684 fb.y2[t] = b->y2[zigzagtable[t]];
685 fb.y3[t] = b->y3[zigzagtable[t]];
686 fb.y4[t] = b->y4[zigzagtable[t]];
687 fb.u[t] = b->u[zigzagtable[t]];
688 fb.v[t] = b->v[zigzagtable[t]];
690 idct(fb.y1); idct(fb.y2); idct(fb.y3); idct(fb.y4);
691 idct(fb.u); idct(fb.v);
693 memcpy(b, &fb, sizeof(block_t));
696 static void truncateblock(block_t*b)
700 b->y1[t] = truncate256(b->y1[t]);
701 b->y2[t] = truncate256(b->y2[t]);
702 b->y3[t] = truncate256(b->y3[t]);
703 b->y4[t] = truncate256(b->y4[t]);
704 b->u[t] = truncate256(b->u[t]);
705 b->v[t] = truncate256(b->v[t]);
709 static void dequantize(block_t*b, int has_dc, int quant)
711 dequantize8x8(b->y1, has_dc, quant);
712 dequantize8x8(b->y2, has_dc, quant);
713 dequantize8x8(b->y3, has_dc, quant);
714 dequantize8x8(b->y4, has_dc, quant);
715 dequantize8x8(b->u, has_dc, quant);
716 dequantize8x8(b->v, has_dc, quant);
719 static void getblockpatterns(block_t*b, int*cbpybits,int*cbpcbits, int has_dc)
724 *cbpybits|=hascoef(b->y1, has_dc)*8;
725 *cbpybits|=hascoef(b->y2, has_dc)*4;
726 *cbpybits|=hascoef(b->y3, has_dc)*2;
727 *cbpybits|=hascoef(b->y4, has_dc)*1;
729 *cbpcbits|=hascoef(b->u, has_dc)*2;
730 *cbpcbits|=hascoef(b->v, has_dc)*1;
733 static void setQuant(TAG*tag, int dquant)
740 swf_SetBits(tag, 0x0, 2);
741 } else if(dquant == -2) {
742 swf_SetBits(tag, 0x1, 2);
743 } else if(dquant == +1) {
744 swf_SetBits(tag, 0x2, 2);
745 } else if(dquant == +2) {
746 swf_SetBits(tag, 0x3, 2);
748 assert(0*strlen("invalid dquant"));
752 static void change_quant(int quant, int*dquant)
758 static void yuvdiff(block_t*a, block_t*b)
762 a->y1[t] = (a->y1[t] - b->y1[t]);
763 a->y2[t] = (a->y2[t] - b->y2[t]);
764 a->y3[t] = (a->y3[t] - b->y3[t]);
765 a->y4[t] = (a->y4[t] - b->y4[t]);
766 a->u[t] = (a->u[t] - b->u[t]);
767 a->v[t] = (a->v[t] - b->v[t]);
771 static void predictmvd(VIDEOSTREAM*s, int bx, int by, int*px, int*py)
774 int x1,y1,x2,y2,x3,y3;
776 if(bx) {x1=s->mvdx[by*s->bbx+bx-1];
777 y1=s->mvdy[by*s->bbx+bx-1];
780 if(by) {x2=s->mvdx[(by-1)*s->bbx+bx];
781 y2=s->mvdy[(by-1)*s->bbx+bx];
783 x3=s->mvdx[(by-1)*s->bbx+bx+1];
784 y3=s->mvdy[(by-1)*s->bbx+bx+1];
789 else {x2=x3=x1;y2=y3=y1;}
791 if((x1 <= x2 && x2 <= x3) ||
792 (x3 <= x2 && x2 <= x1)) {
794 } else if((x2 <= x1 && x1 <= x3) ||
795 (x3 <= x1 && x1 <= x2)) {
797 } else if((x1 <= x3 && x3 <= x2) ||
798 (x2 <= x3 && x3 <= x1)) {
805 if((y1 <= y2 && y2 <= y3) ||
806 (y3 <= y2 && y2 <= y1)) {
808 } else if((y2 <= y1 && y1 <= y3) ||
809 (y3 <= y1 && y1 <= y2)) {
811 } else if((y1 <= y3 && y3 <= y2) ||
812 (y2 <= y3 && y3 <= y1)) {
821 assert((x4>=-32 && x4<=31) && (y4>=-32 && y4<=31));
824 static inline int mvd2index(int px, int py, int x, int y, int xy)
827 if((x<-32 && x>31) || (y<-32 && y>31))
828 fprintf(stderr, "(%d,%d)\n", x,y);
829 assert((x>=-32 && x<=31) && (y>=-32 && y<=31));
830 //assert((x&1)==0 && (y&1)==0);//for now
831 //assert((x&2)==0 && (y&2)==0);//for now(2)
846 assert(x>=0 && x<64);
850 typedef struct _iblockdata_t
852 block_t b; //transformed quantized coefficients
853 block_t reconstruction;
856 struct huffcode*ctable; //table to use for chrominance encoding (different for i-frames)
857 int iframe; // 1 if this is part of an iframe
860 typedef struct _mvdblockdata_t
864 block_t reconstruction;
873 void prepareIBlock(VIDEOSTREAM*s, iblockdata_t*data, int bx, int by, block_t* fb, int*bits, int iframe)
875 /* consider I-block */
879 struct huffcode*ctable;
884 data->iframe = iframe;
886 data->ctable = &mcbpc_inter[3*4];
888 data->ctable = &mcbpc_intra[0];
891 memcpy(&fb_i, fb, sizeof(block_t));
892 dodctandquant(&fb_i, &data->b, 1, s->quant);
893 getblockpatterns(&data->b, &y, &c, 1);
898 *bits += data->ctable[c].len;
899 *bits += cbpy[y].len;
900 *bits += coefbits8x8(data->b.y1, 1);
901 *bits += coefbits8x8(data->b.y2, 1);
902 *bits += coefbits8x8(data->b.y3, 1);
903 *bits += coefbits8x8(data->b.y4, 1);
904 *bits += coefbits8x8(data->b.u, 1);
905 *bits += coefbits8x8(data->b.v, 1);
908 /* -- reconstruction -- */
909 memcpy(&data->reconstruction,&data->b,sizeof(block_t));
910 dequantize(&data->reconstruction, 1, s->quant);
911 doidct(&data->reconstruction);
912 truncateblock(&data->reconstruction);
915 int writeIBlock(VIDEOSTREAM*s, TAG*tag, iblockdata_t*data)
922 getblockpatterns(&data->b, &y, &c, has_dc);
924 swf_SetBits(tag,0,1); bits += 1; // COD
926 bits += codehuffman(tag, data->ctable, c);
927 bits += codehuffman(tag, cbpy, y);
930 bits += encode8x8(tag, data->b.y1, has_dc, y&8);
931 bits += encode8x8(tag, data->b.y2, has_dc, y&4);
932 bits += encode8x8(tag, data->b.y3, has_dc, y&2);
933 bits += encode8x8(tag, data->b.y4, has_dc, y&1);
936 bits += encode8x8(tag, data->b.u, has_dc, c&2);
937 bits += encode8x8(tag, data->b.v, has_dc, c&1);
939 copy_block_pic(s, s->current, &data->reconstruction, data->bx, data->by);
940 assert(data->bits == bits);
944 int getmvdbits(VIDEOSTREAM*s,block_t*fb, int bx,int by,int hx,int hy)
950 memcpy(&fbdiff, fb, sizeof(block_t));
951 getmvdregion(&fbold, s->oldpic, bx, by, hx, hy, s->linex);
952 yuvdiff(&fbdiff, &fbold);
953 dodctandquant(&fbdiff, &b, 0, s->quant);
954 bits += coefbits8x8(b.y1, 0);
955 bits += coefbits8x8(b.y2, 0);
956 bits += coefbits8x8(b.y3, 0);
957 bits += coefbits8x8(b.y4, 0);
958 bits += coefbits8x8(b.u, 0);
959 bits += coefbits8x8(b.v, 0);
963 void prepareMVDBlock(VIDEOSTREAM*s, mvdblockdata_t*data, int bx, int by, block_t* fb, int*bits)
964 { /* consider mvd(x,y)-block */
974 predictmvd(s,bx,by,&predictmvdx,&predictmvdy);
982 int bestx=0,besty=0,bestbits=65536;
983 int startx=-32,endx=31;
984 int starty=-32,endy=31;
988 if(bx==s->bbx-1) endx=0;
989 if(by==s->bby-1) endy=0;
991 for(hx=startx;hx<=endx;hx+=4)
992 for(hy=starty;hy<=endy;hy+=4)
995 bits = getmvdbits(s,fb,bx,by,hx,hy);
1003 if(bestx-3 > startx) startx = bestx-3;
1004 if(besty-3 > starty) starty = besty-3;
1005 if(bestx+3 < endx) endx = bestx+3;
1006 if(besty+3 < endy) endy = besty+3;
1008 for(hx=startx;hx<=endx;hx++)
1009 for(hy=starty;hy<=endy;hy++)
1012 bits = getmvdbits(s,fb,bx,by,hx,hy);
1019 data->movex = bestx;
1020 data->movey = besty;
1023 memcpy(&fbdiff, fb, sizeof(block_t));
1024 getmvdregion(&data->fbold, s->oldpic, bx, by, data->movex, data->movey, s->linex);
1025 yuvdiff(&fbdiff, &data->fbold);
1026 dodctandquant(&fbdiff, &data->b, 0, s->quant);
1027 getblockpatterns(&data->b, &y, &c, 0);
1029 data->xindex = mvd2index(predictmvdx, predictmvdy, data->movex, data->movey, 0);
1030 data->yindex = mvd2index(predictmvdx, predictmvdy, data->movex, data->movey, 1);
1033 *bits += mcbpc_inter[0*4+c].len;
1034 *bits += cbpy[y^15].len;
1035 *bits += mvd[data->xindex].len; // (0,0)
1036 *bits += mvd[data->yindex].len;
1037 *bits += coefbits8x8(data->b.y1, 0);
1038 *bits += coefbits8x8(data->b.y2, 0);
1039 *bits += coefbits8x8(data->b.y3, 0);
1040 *bits += coefbits8x8(data->b.y4, 0);
1041 *bits += coefbits8x8(data->b.u, 0);
1042 *bits += coefbits8x8(data->b.v, 0);
1045 /* -- reconstruction -- */
1046 memcpy(&data->reconstruction, &data->b, sizeof(block_t));
1047 dequantize(&data->reconstruction, 0, s->quant);
1048 doidct(&data->reconstruction);
1050 data->reconstruction.y1[t] =
1051 truncate256(data->reconstruction.y1[t] + (int)data->fbold.y1[t]);
1052 data->reconstruction.y2[t] =
1053 truncate256(data->reconstruction.y2[t] + (int)data->fbold.y2[t]);
1054 data->reconstruction.y3[t] =
1055 truncate256(data->reconstruction.y3[t] + (int)data->fbold.y3[t]);
1056 data->reconstruction.y4[t] =
1057 truncate256(data->reconstruction.y4[t] + (int)data->fbold.y4[t]);
1058 data->reconstruction.u[t] =
1059 truncate256(data->reconstruction.u[t] + (int)data->fbold.u[t]);
1060 data->reconstruction.v[t] =
1061 truncate256(data->reconstruction.v[t] + (int)data->fbold.v[t]);
1065 int writeMVDBlock(VIDEOSTREAM*s, TAG*tag, mvdblockdata_t*data)
1069 int has_dc=0; // mvd w/o mvd24
1070 /* mvd (0,0) block (mode=0) */
1076 getblockpatterns(&data->b, &y, &c, has_dc);
1077 swf_SetBits(tag,0,1); bits += 1; // COD
1078 bits += codehuffman(tag, mcbpc_inter, mode*4+c);
1079 bits += codehuffman(tag, cbpy, y^15);
1082 bits += codehuffman(tag, mvd, data->xindex);
1083 bits += codehuffman(tag, mvd, data->yindex);
1086 bits += encode8x8(tag, data->b.y1, has_dc, y&8);
1087 bits += encode8x8(tag, data->b.y2, has_dc, y&4);
1088 bits += encode8x8(tag, data->b.y3, has_dc, y&2);
1089 bits += encode8x8(tag, data->b.y4, has_dc, y&1);
1092 bits += encode8x8(tag, data->b.u, has_dc, c&2);
1093 bits += encode8x8(tag, data->b.v, has_dc, c&1);
1095 s->mvdx[by*s->bbx+bx] = data->movex;
1096 s->mvdy[by*s->bbx+bx] = data->movey;
1098 copy_block_pic(s, s->current, &data->reconstruction, data->bx, data->by);
1099 assert(data->bits == bits);
1103 static int encode_PFrame_block(TAG*tag, VIDEOSTREAM*s, int bx, int by)
1110 iblockdata_t iblock;
1111 mvdblockdata_t mvdblock;
1113 getregion(&fb, s->current, bx, by, s->linex);
1114 prepareIBlock(s, &iblock, bx, by, &fb, &bits_i, 0);
1116 /* encoded last frame <=> original current block: */
1117 diff1 = compare_pic_pic(s, s->current, s->oldpic, bx, by);
1118 /* encoded current frame <=> original current block: */
1119 diff2 = compare_pic_block(s, &iblock.reconstruction, s->current, bx, by);
1121 if(diff1 <= diff2) {
1122 swf_SetBits(tag, 1,1); /* cod=1, block skipped */
1123 /* copy the region from the last frame so that we have a complete reconstruction */
1124 copyregion(s, s->current, s->oldpic, bx, by);
1127 prepareMVDBlock(s, &mvdblock, bx, by, &fb, &bits_vxy);
1129 if(bits_i > bits_vxy) {
1130 return writeMVDBlock(s, tag, &mvdblock);
1132 return writeIBlock(s, tag, &iblock);
1136 /* should be called encode_IFrameBlock */
1137 static void encode_IFrame_block(TAG*tag, VIDEOSTREAM*s, int bx, int by)
1143 getregion(&fb, s->current, bx, by, s->width);
1144 prepareIBlock(s, &data, bx, by, &fb, &bits, 1);
1145 writeIBlock(s, tag, &data);
1149 static int bmid = 0;
1151 void setdbgpic(TAG*tag, RGBA*pic, int width, int height)
1156 tag = swf_InsertTag(tag,ST_REMOVEOBJECT2);
1157 swf_SetU16(tag, 133);
1159 tag = swf_InsertTag(tag, ST_DEFINEBITSLOSSLESS);
1160 swf_SetU16(tag, 1000+bmid);
1161 swf_SetLosslessBits(tag, width, height, (void*)pic, BMF_32BIT);
1163 tag = swf_InsertTag(tag, ST_DEFINESHAPE);
1164 swf_SetU16(tag, 2000+bmid);
1165 swf_ShapeSetBitmapRect(tag, 1000+bmid, width, height);
1167 tag = swf_InsertTag(tag,ST_PLACEOBJECT2);
1168 swf_GetMatrix(0,&m);
1170 swf_ObjectPlace(tag, 2000+bmid, 133, &m, 0, 0);
1176 #define TYPE_IFRAME 0
1177 #define TYPE_PFRAME 1
1179 static void writeHeader(TAG*tag, int width, int height, int frame, int quant, int type)
1182 swf_SetU16(tag, frame);
1183 swf_SetBits(tag, 1, 17); /* picture start code*/
1184 swf_SetBits(tag, 0, 5); /* version=0, version 1 would optimize rle behaviour*/
1185 swf_SetBits(tag, frame, 8); /* time reference */
1187 /* write dimensions, taking advantage of some predefined sizes
1188 if the opportunity presents itself */
1189 i32 = width<<16|height;
1192 case 352<<16|288: swf_SetBits(tag, 2, 3);break;
1193 case 176<<16|144: swf_SetBits(tag, 3, 3);break;
1194 case 128<<16|96: swf_SetBits(tag, 4, 3);break;
1195 case 320<<16|240: swf_SetBits(tag, 5, 3);break;
1196 case 160<<16|120: swf_SetBits(tag, 6, 3);break;
1198 if(width>255 || height>255) {
1199 swf_SetBits(tag, 1, 3);
1200 swf_SetBits(tag, width, 16);
1201 swf_SetBits(tag, height, 16);
1203 swf_SetBits(tag, 0, 3);
1204 swf_SetBits(tag, width, 8);
1205 swf_SetBits(tag, height, 8);
1209 swf_SetBits(tag, type, 2); /* I-Frame or P-Frame */
1210 swf_SetBits(tag, 0, 1); /* No deblock filter */
1213 swf_SetBits(tag, quant, 5); /* quantizer (1-31), may be updated later on*/
1214 swf_SetBits(tag, 0, 1); /* No extra info */
1217 void swf_SetVideoStreamIFrame(TAG*tag, VIDEOSTREAM*s, RGBA*pic, int quant)
1221 if(quant<1) quant=1;
1222 if(quant>31) quant=31;
1225 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_IFRAME);
1227 /* fixme: should fill with 0,128,128, not 0,0,0 */
1228 memset(s->current, 0, s->linex*s->height*sizeof(YUV));
1230 rgb2yuv(s->current, pic, s->linex, s->olinex, s->owidth, s->oheight);
1232 for(by=0;by<s->bby;by++)
1234 for(bx=0;bx<s->bbx;bx++)
1236 encode_IFrame_block(tag, s, bx, by);
1240 memcpy(s->oldpic, s->current, s->width*s->height*sizeof(YUV));
1242 void swf_SetVideoStreamBlackFrame(TAG*tag, VIDEOSTREAM*s)
1248 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_IFRAME);
1251 for(y=0;y<s->height;y++)
1252 for(x=0;x<s->width;x++) {
1253 s->current[y*s->width+x].y = 0;
1254 s->current[y*s->width+x].u = 128;
1255 s->current[y*s->width+x].v = 128;
1259 s->current[y*s->width+x].y = 64;
1260 s->current[y*s->width+x].u = 128;
1261 s->current[y*s->width+x].v = 128;
1264 for(by=0;by<s->bby;by++)
1266 for(bx=0;bx<s->bbx;bx++)
1268 encode_IFrame_block(tag, s, bx, by);
1272 memcpy(s->oldpic, s->current, s->width*s->height*sizeof(YUV));
1275 void swf_SetVideoStreamPFrame(TAG*tag, VIDEOSTREAM*s, RGBA*pic, int quant)
1279 if(quant<1) quant=1;
1280 if(quant>31) quant=31;
1283 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_PFRAME);
1285 /* fixme: should fill with 0,128,128, not 0,0,0 */
1286 memset(s->current, 0, s->linex*s->height*sizeof(YUV));
1288 rgb2yuv(s->current, pic, s->linex, s->olinex, s->owidth, s->oheight);
1289 memset(s->mvdx, 0, s->bbx*s->bby*sizeof(int));
1290 memset(s->mvdy, 0, s->bbx*s->bby*sizeof(int));
1292 for(by=0;by<s->bby;by++)
1294 for(bx=0;bx<s->bbx;bx++)
1296 encode_PFrame_block(tag, s, bx, by);
1300 memcpy(s->oldpic, s->current, s->width*s->height*sizeof(YUV));
1304 yuv2rgb(pic, s->current, s->linex, s->width, s->height);
1305 setdbgpic(tag, pic, s->width, s->height);
1310 void swf_SetVideoStreamMover(TAG*tag, VIDEOSTREAM*s, signed char* movex, signed char* movey, void**pictures, int quant)
1315 if(quant<1) quant=1;
1316 if(quant>31) quant=31;
1319 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_PFRAME);
1321 memset(s->mvdx, 0, s->bbx*s->bby*sizeof(int));
1322 memset(s->mvdy, 0, s->bbx*s->bby*sizeof(int));
1324 for(by=0;by<s->bby;by++)
1326 for(bx=0;bx<s->bbx;bx++)
1328 int predictmvdx=0, predictmvdy=0;
1329 int mvx=movex[by*s->bbx+bx];
1330 int mvy=movey[by*s->bbx+bx];
1331 void*picture = pictures?pictures[by*s->bbx+bx]:0;
1333 if(mvx<-32) mvx=-32;
1335 if(mvy<-32) mvy=-32;
1338 if(mvx == 0 && mvy == 0 && picture == 0) {
1339 swf_SetBits(tag,1,1); // COD skip
1347 swf_SetBits(tag,0,1); // COD
1349 if(mvx==0 && mvy==0 && picture) { // only picture
1355 RGBA* picblock = (RGBA*)picture;
1356 rgb2yuv(pic, picblock,16,16,16,16);
1357 /* TODO: if has_dc!=1, subtract 128 from rgb values */
1358 getregion(&b, pic, 0,0,16);
1359 dodctandquant(&b, &b2, 1, s->quant);
1360 getblockpatterns(&b2, &y, &c, 1);
1365 codehuffman(tag, mcbpc_inter, mode*4+c);
1366 codehuffman(tag, cbpy, mode==3?y:y^15);
1369 /* has motion vector */
1370 predictmvd(s,bx,by,&predictmvdx,&predictmvdy);
1371 codehuffman(tag, mvd, mvd2index(predictmvdx, predictmvdy, mvx, mvy, 0));
1372 codehuffman(tag, mvd, mvd2index(predictmvdx, predictmvdy, mvx, mvy, 1));
1373 s->mvdx[by*s->bbx+bx] = mvx;
1374 s->mvdy[by*s->bbx+bx] = mvy;
1378 encode8x8(tag, b2.y1, has_dc, y&8);
1379 encode8x8(tag, b2.y2, has_dc, y&4);
1380 encode8x8(tag, b2.y3, has_dc, y&2);
1381 encode8x8(tag, b2.y4, has_dc, y&1);
1382 encode8x8(tag, b2.u, has_dc, c&2);
1383 encode8x8(tag, b2.v, has_dc, c&1);
1393 void test_copy_diff()
1396 VIDEOSTREAM* s = &stream;
1398 RGBA*pic = malloc(256*256*sizeof(RGBA));
1403 for(y=0;y<256;y++) {
1404 pic[y*256+x].r = x*y;
1405 pic[y*256+x].g = x+y;
1406 pic[y*256+x].b = (x+1)%(y+1);
1408 tag = swf_InsertTag(0, ST_DEFINEVIDEOSTREAM);
1409 swf_SetU16(tag, 33);
1410 swf_SetVideoStreamDefine(tag, s, 10, 256, 256);
1412 rgb2yuv(s->current, pic, s->linex, s->olinex, s->owidth, s->oheight);
1413 for(by=0;by<16;by++)
1414 for(bx=0;bx<16;bx++) {
1416 /* test1: does compare pic pic return zero for identical blocks? */
1417 diff1 = compare_pic_pic(s, s->current, s->current, bx, by);
1419 /* test2: do blocks which are copied back return zero diff? */
1420 getregion(&fb, s->current, bx, by, s->linex);
1421 copy_block_pic(s, s->oldpic, &fb, bx, by);
1422 diff1 = compare_pic_block(s, &fb, s->oldpic, bx, by);
1424 /* test3: does compare_pic_block return the same result as compare_pic_pic? */
1425 getregion(&fb, s->current, 15-bx, 15-by, s->linex);
1426 copy_block_pic(s, s->oldpic, &fb, bx, by);
1427 diff1 = compare_pic_block(s, &fb, s->current, bx, by);
1428 diff2 = compare_pic_pic(s, s->current, s->oldpic, bx, by);
1429 assert(diff1 == diff2);
1438 int compileSWFActionCode(const char *script, int version, void**data, int*len) {return 0;}
1453 pic = malloc(width*height*4);
1454 memset(pic, 0, width*height*4);
1456 memset(&swf,0,sizeof(SWF));
1457 memset(&obj,0,sizeof(obj));
1459 swf.fileVersion = 6;
1460 swf.frameRate = 15*256;
1461 swf.movieSize.xmax = 20*width;
1462 swf.movieSize.ymax = 20*height;
1464 swf.firstTag = swf_InsertTag(NULL,ST_SETBACKGROUNDCOLOR);
1466 rgb.r = 0x00;rgb.g = 0x30;rgb.b = 0xff;
1467 swf_SetRGB(tag,&rgb);
1469 tag = swf_InsertTag(tag, ST_DEFINEVIDEOSTREAM);
1471 swf_SetVideoStreamDefine(tag, &stream, frames, width, height);
1472 stream.do_motion = 0;
1474 for(y=0;y<height;y++) {
1475 for(x=0;x<width;x++) {
1479 pic[y*width+x].r = 0;
1480 pic[y*width+x].g = 0;
1481 pic[y*width+x].b = 0;
1482 pic[y*width+x].a = 0;
1485 tag = swf_InsertTag(tag, ST_VIDEOFRAME);
1488 swf_SetVideoStreamIFrame(tag, &stream, pic, 7);
1490 tag = swf_InsertTag(tag, ST_PLACEOBJECT2);
1491 swf_GetPlaceObject(0, &obj);
1496 swf_SetPlaceObject(tag,&obj);
1498 tag = swf_InsertTag(tag, ST_SHOWFRAME);
1500 swf_VideoStreamClear(&stream);
1502 tag = swf_InsertTag(tag, ST_END);
1504 int fi = open("black.swf", O_WRONLY|O_CREAT|O_TRUNC, 0644);
1505 if(swf_WriteSWC(fi,&swf)<0) {
1506 fprintf(stderr,"WriteSWF() failed.\n");
1512 int main(int argn, char*argv[])
1518 RGBA* pic, *pic2, rgb;
1525 char* fname = "/home/kramm/pics/peppers_fromjpg.png";
1526 //char* fname = "/home/kramm/pics/baboon.png";
1536 memset(&stream, 0, sizeof(stream));
1538 getPNG(fname, &width, &height, &data);
1539 pic = (RGBA*)malloc(width*height*sizeof(RGBA));
1540 pic2 = (RGBA*)malloc(width*height*sizeof(RGBA));
1541 memcpy(pic, data, width*height*sizeof(RGBA));
1544 printf("Compressing %s, size %dx%d\n", fname, width, height);
1546 memset(&swf,0,sizeof(SWF));
1547 memset(&obj,0,sizeof(obj));
1549 swf.fileVersion = 6;
1550 swf.frameRate = framerate*256;
1551 swf.movieSize.xmax = 20*width*2;
1552 swf.movieSize.ymax = 20*height;
1554 swf.firstTag = swf_InsertTag(NULL,ST_SETBACKGROUNDCOLOR);
1556 rgb.r = 0x00;rgb.g = 0x30;rgb.b = 0xff;
1557 swf_SetRGB(tag,&rgb);
1559 tag = swf_InsertTag(tag, ST_DEFINEVIDEOSTREAM);
1560 swf_SetU16(tag, 33);
1561 swf_SetVideoStreamDefine(tag, &stream, frames, width, height);
1562 stream.do_motion = 0;
1566 for(t=0;t<frames;t++)
1570 for(y=0,yy=0;y<height;y++,yy+=d) {
1571 RGBA*line = &pic[((int)yy)*width];
1572 for(x=0,xx=0;x<width;x++,xx+=d) {
1575 if(dx==0 && dy==0) {
1576 pic2[y*width+x] = line[((int)xx)];
1577 pic2[y*width+x].r+=2;
1578 pic2[y*width+x].g+=2;
1579 pic2[y*width+x].b+=2;
1581 //pic2[y*width+x] = line[((int)xx)];
1582 //pic2[y*width+x].r = lrand48();//line[((int)xx)];
1583 //pic2[y*width+x].g = lrand48();//line[((int)xx)];
1584 //pic2[y*width+x].b = lrand48();//line[((int)xx)];
1585 pic2[y*width+x].r = 0;
1586 pic2[y*width+x].g = 0;
1587 pic2[y*width+x].b = 0;
1589 /*if(dx==16 && dy==16)
1590 pic2[y*width+x] = pic[(y-16*16)*width+(x-16*16)];*/
1591 /*if(dx<=0 && dy<=0) {
1592 pic2[y*width+x] = line[((int)xx)];*/
1593 /*if(x==0 && y==0) {
1595 memset(&color, 0, sizeof(RGBA));
1596 pic2[y*width+x] = color;*/
1599 color.r = lrand48();
1600 color.g = lrand48();
1601 color.b = lrand48();
1603 pic2[y*width+x] = color;
1607 printf("frame:%d\n", t);fflush(stdout);
1612 tag = swf_InsertTag(tag, ST_VIDEOFRAME);
1613 swf_SetU16(tag, 33);
1615 swf_SetVideoStreamIFrame(tag, &stream, pic2, 7);
1617 swf_SetVideoStreamPFrame(tag, &stream, pic2, 7);
1620 tag = swf_InsertTag(tag, ST_PLACEOBJECT2);
1621 swf_GetPlaceObject(0, &obj);
1630 swf_SetPlaceObject(tag,&obj);
1632 tag = swf_InsertTag(tag, ST_SHOWFRAME);
1635 swf_VideoStreamClear(&stream);
1637 tag = swf_InsertTag(tag, ST_END);
1639 fi = open("video3.swf", O_WRONLY|O_CREAT|O_TRUNC, 0644);
1640 if(swf_WriteSWC(fi,&swf)<0) {
1641 fprintf(stderr,"WriteSWF() failed.\n");