4 Part of the swftools package.
6 Copyright (c) 2003 Matthias Kramm <kramm@quiss.org> */
12 #include "../rfxswf.h"
13 #include "h263tables.c"
16 void swf_SetVideoStreamDefine(TAG*tag, VIDEOSTREAM*stream, U16 frames, U16 width, U16 height)
18 width=width&~15; height=height&~15;
19 swf_SetU16(tag, frames);
20 swf_SetU16(tag, width);
21 swf_SetU16(tag, height);
22 swf_SetU8(tag, 1); /* smoothing on */
23 swf_SetU8(tag, 2); /* codec = h.263 sorenson spark */
25 memset(stream, 0, sizeof(VIDEOSTREAM));
26 stream->olinex = width;
28 height+=15;height&=~15;
29 stream->linex = width;
30 stream->width = width;
31 stream->height = height;
32 stream->bbx = width/16;
33 stream->bby = height/16;
34 stream->current = (YUV*)malloc(width*height*sizeof(YUV));
35 stream->oldpic = (YUV*)malloc(width*height*sizeof(YUV));
36 stream->mvdx = (int*)malloc(stream->bbx*stream->bby*sizeof(int));
37 stream->mvdy = (int*)malloc(stream->bbx*stream->bby*sizeof(int));
38 stream->do_motion = 0;
40 memset(stream->oldpic, 0, width*height*sizeof(YUV));
41 memset(stream->current, 0, width*height*sizeof(YUV));
43 void swf_VideoStreamClear(VIDEOSTREAM*stream)
45 free(stream->oldpic);stream->oldpic = 0;
46 free(stream->current);stream->current = 0;
47 free(stream->mvdx);stream->mvdx=0;
48 free(stream->mvdy);stream->mvdy=0;
51 typedef struct _block_t
61 static int zigzagtable[64] = {
62 0, 1, 5, 6, 14, 15, 27, 28,
63 2, 4, 7, 13, 16, 26, 29, 42,
64 3, 8, 12, 17, 25, 30, 41, 43,
65 9, 11, 18, 24, 31, 40, 44, 53,
66 10, 19, 23, 32, 39, 45, 52, 54,
67 20, 22, 33, 38, 46, 51, 55, 60,
68 21, 34, 37, 47, 50, 56, 59, 61,
69 35, 36, 48, 49, 57, 58, 62, 63};
71 static void zigzag(int*src)
76 tmp[zigzagtable[t]] = src[t];
78 memcpy(src, tmp, sizeof(int)*64);
81 #define PI 3.14159265358979
82 #define SQRT2 1.414214
83 #define RSQRT2 (1.0/1.414214)
85 static double table[8][8] =
87 {0.707106781186548,0.707106781186548,0.707106781186548,0.707106781186548,0.707106781186548,0.707106781186548,0.707106781186548,0.707106781186548},
88 {0.980785280403230,0.831469612302545,0.555570233019602,0.195090322016128,-0.195090322016128,-0.555570233019602,-0.831469612302545,-0.980785280403230},
89 {0.923879532511287,0.382683432365090,-0.382683432365090,-0.923879532511287,-0.923879532511287,-0.382683432365090,0.382683432365090,0.923879532511287},
90 {0.831469612302545,-0.195090322016128,-0.980785280403230,-0.555570233019602,0.555570233019602,0.980785280403230,0.195090322016129,-0.831469612302545},
91 {0.707106781186548,-0.707106781186547,-0.707106781186548,0.707106781186547,0.707106781186548,-0.707106781186547,-0.707106781186547,0.707106781186547},
92 {0.555570233019602,-0.980785280403230,0.195090322016128,0.831469612302545,-0.831469612302545,-0.195090322016128,0.980785280403231,-0.555570233019602},
93 {0.382683432365090,-0.923879532511287,0.923879532511287,-0.382683432365090,-0.382683432365091,0.923879532511287,-0.923879532511286,0.382683432365090},
94 {0.195090322016128,-0.555570233019602,0.831469612302545,-0.980785280403231,0.980785280403230,-0.831469612302545,0.555570233019602,-0.195090322016129}
97 static void dct(int*src)
108 c+=table[u][x]*src[v*8+x];
118 c+=table[v][y]*tmp[y*8+u];
120 src[v*8+u] = (int)(c*0.25);
124 static void idct(int*src)
134 c+=table[u][x]*src[y*8+u];
144 c+=table[v][y]*tmp[v*8+x];
146 src[y*8+x] = (int)(c*0.25);
150 static double c[8] = {1.0,
151 0.980785280403230, // cos(Pi*1/16), sin(Pi*7/16)
152 0.923879532511287, // cos(Pi*2/16), sin(Pi*6/16)
153 0.831469612302545, // cos(Pi*3/16), sin(Pi*5/16)
154 0.707106781186548, // cos(Pi*4/16), sin(Pi*4/16), 1/sqrt(2)
155 0.555570233019602, // cos(Pi*5/16), sin(Pi*3/16)
156 0.382683432365090, // cos(Pi*6/16), sin(Pi*2/16)
157 0.195090322016128 // cos(Pi*7/16), sin(Pi*1/16)
163 static void preparequant(int quant)
167 cc[0] = c[0]/(quant*2*4);
168 cc[1] = c[1]/(quant*2*4);
169 cc[2] = c[2]/(quant*2*4);
170 cc[3] = c[3]/(quant*2*4);
171 cc[4] = c[4]/(quant*2*4);
172 cc[5] = c[5]/(quant*2*4);
173 cc[6] = c[6]/(quant*2*4);
174 cc[7] = c[7]/(quant*2*4);
178 inline static void innerdct(double*a,double*b, double*c)
185 //{ 1, 3, 5, 7, -7, -5, -3, -1},
186 //{ 3, -7, -1, -5, 5, 1, 7, -3},
187 //{ 5, -1, 7, 3, -3, -7, 1, -5},
188 //{ 7, -5, 3, -1, 1, -3, 5, -7}
189 double b0,b1,b2,b3,b4,b5;
199 b[2*8] = (b2-b5)*c[2] + (b3-b4)*c[6];
200 b[6*8] = (b2-b5)*c[6] + (b4-b3)*c[2];
207 b[1*8] = b0*c[1] + b1*c[3] + b2*c[5] + b3*c[7];
208 b[3*8] = b0*c[3] - b1*c[7] - b2*c[1] - b3*c[5];
209 b[5*8] = b0*c[5] - b1*c[1] + b2*c[7] + b3*c[3];
210 b[7*8] = b0*c[7] - b1*c[5] + b2*c[3] - b3*c[1];
213 static void dct2(int*src, int*dest)
215 double tmp[64], tmp2[64];
224 double* a=&tmp2[v*8];
235 dest[zigzagtable[t]] = (int)(tmp2[t]);
240 static inline int truncate256(int a)
242 if(a>255) return 255;
247 static void getregion(block_t* bb, YUV*pic, int posx, int posy, int linex)
251 int y1=0, y2=0, y3=0, y4=0;
254 int hp = (posy&1)<<1|(posx&1);
257 p1 = &pic[posy*linex+posx];
262 bb->u[u++] = (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
263 bb->v[v++] = (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
264 bb->y1[y1++] = p1[x].y;
265 bb->y2[y2++] = p1[x+8].y;
266 bb->y3[y3++] = p1[linex*8+x].y;
267 bb->y4[y4++] = p1[linex*8+x+8].y;
275 bb->u[u++] = (p2[x*2].u + p2[x*2+1].u + p2[linex+x*2].u + p2[linex+x*2+1].u)/4;
276 bb->v[v++] = (p2[x*2].v + p2[x*2+1].v + p2[linex+x*2].v + p2[linex+x*2+1].v)/4;
277 bb->y1[y1++] = (p1[x].y + p1[x+1].y)/2;
278 bb->y2[y2++] = (p1[x+8].y + p1[x+8+1].y)/2;
279 bb->y3[y3++] = (p1[linex*8+x].y + p1[linex*8+x+1].y)/2;
280 bb->y4[y4++] = (p1[linex*8+x+8].y + p1[linex*8+x+8+1].y)/2;
289 static void rgb2yuv(YUV*dest, RGBA*src, int dlinex, int slinex, int width, int height)
292 for(y=0;y<height;y++) {
293 for(x=0;x<width;x++) {
295 r = src[y*slinex+x].r;
296 g = src[y*slinex+x].g;
297 b = src[y*slinex+x].b;
298 /*dest[y*dlinex+x].y = (r*0.299 + g*0.587 + b*0.114);
299 dest[y*dlinex+x].u = (r*-0.169 + g*-0.332 + b*0.500 + 128.0);
300 dest[y*dlinex+x].v = (r*0.500 + g*-0.419 + b*-0.0813 + 128.0);*/
301 dest[y*dlinex+x].y = (r*((int)( 0.299*256)) + g*((int)( 0.587*256)) + b*((int)( 0.114 *256)))>>8;
302 dest[y*dlinex+x].u = (r*((int)(-0.169*256)) + g*((int)(-0.332*256)) + b*((int)( 0.500 *256))+ 128*256)>>8;
303 dest[y*dlinex+x].v = (r*((int)( 0.500*256)) + g*((int)(-0.419*256)) + b*((int)(-0.0813*256))+ 128*256)>>8;
307 static void copyregion(VIDEOSTREAM*s, YUV*dest, YUV*src, int bx, int by)
309 YUV*p1 = &src[by*s->linex*16+bx*16];
310 YUV*p2 = &dest[by*s->linex*16+bx*16];
313 memcpy(p1, p2, 16*sizeof(YUV));
314 p1+=s->linex;p2+=s->linex;
318 static void yuv2rgb(RGBA*dest, YUV*src, int linex, int width, int height)
321 for(y=0;y<height;y++) {
322 for(x=0;x<width;x++) {
324 u = src[y*linex+x].u;
325 v = src[y*linex+x].v;
326 yy = src[y*linex+x].y;
327 dest[y*linex+x].r = truncate256(yy + ((360*(v-128))>>8));
328 dest[y*linex+x].g = truncate256(yy - ((88*(u-128)+183*(v-128))>>8));
329 dest[y*linex+x].b = truncate256(yy + ((455 * (u-128))>>8));
333 static void copyblock(VIDEOSTREAM*s, YUV*dest, block_t*b, int bx, int by)
335 YUV*p1 = &dest[(by*16)*s->linex+bx*16];
336 YUV*p2 = &dest[(by*16+8)*s->linex+bx*16];
341 p1[x+0].u = b->u[(y/2)*8+(x/2)];
342 p1[x+0].v = b->v[(y/2)*8+(x/2)];
343 p1[x+0].y = b->y1[y*8+x];
344 p1[x+8].u = b->u[(y/2)*8+(x/2)+4];
345 p1[x+8].v = b->v[(y/2)*8+(x/2)+4];
346 p1[x+8].y = b->y2[y*8+x];
347 p2[x+0].u = b->u[(y/2+4)*8+(x/2)];
348 p2[x+0].v = b->v[(y/2+4)*8+(x/2)];
349 p2[x+0].y = b->y3[y*8+x];
350 p2[x+8].u = b->u[(y/2+4)*8+(x/2)+4];
351 p2[x+8].v = b->v[(y/2+4)*8+(x/2)+4];
352 p2[x+8].y = b->y4[y*8+x];
359 static int compareregions(VIDEOSTREAM*s, int bx, int by)
361 int linex = s->width;
362 YUV*p1 = &s->current[by*linex*16+bx*16];
363 YUV*p2 = &s->oldpic[by*linex*16+bx*16];
373 diff += y*y+(u*u+v*v)/4;
381 static inline int valtodc(int val)
389 /* TODO: what to do for zero values? skip the block? */
398 static int dctoval(int dc)
411 static int codehuffman(TAG*tag, struct huffcode*table, int index)
413 /* TODO: !optimize! */
415 while(table[index].code[i]) {
416 if(table[index].code[i]=='0')
417 swf_SetBits(tag, 0, 1);
419 swf_SetBits(tag, 1, 1);
425 static void quantize8x8(int*src, int*dest, int has_dc, int quant)
428 double q = 1.0/(quant*2);
430 dest[0] = valtodc((int)src[0]); /*DC*/
435 //dest[t] = (int)src[t];
436 /* exact: if(quant&1){dest[t] = (dest[t]/quant - 1)/2;}else{dest[t] = ((dest[t]+1)/quant - 1)/2;} */
437 //if(quant&1){dest[t] = (dest[t]/quant - 1)/2;}else{dest[t] = ((dest[t]+1)/quant - 1)/2;}
438 //dest[t] = dest[t]/(quant*2);
439 dest[t] = (int)(src[t]*q);
440 /* TODO: warn if this happens- the video will be buggy */
441 if(dest[t]>127) dest[t]=127;
442 if(dest[t]<-127) dest[t]=-127;
446 static void dequantize8x8(int*b, int has_dc, int quant)
450 b[0] = dctoval(b[0]); //DC
453 for(t=pos;t<64;t++) {
462 b[t] = quant*(2*b[t]+1); //-7,8,24,40
464 b[t] = quant*(2*b[t]+1)-1; //-8,7,23,39
471 /* paragraph 6.2.2, "clipping of reconstruction levels": */
472 if(b[t]>2047) b[t]=2047;
473 if(b[t]<-2048) b[t]=-2048;
477 static int hascoef(int*b, int has_dc)
483 for(t=pos;t<64;t++) {
490 static int coefbits8x8(int*bb, int has_dc)
501 for(last=63;last>=pos;last--) {
508 int run=0, level=0, islast=0,t;
509 while(!bb[pos] && pos<last) {
516 if(level<0) level=-level;
518 for(t=0;t<RLE_ESCAPE;t++) {
519 if(rle_params[t].run == run &&
520 rle_params[t].level == level &&
521 rle_params[t].last == islast) {
522 bits += rle[t].len + 1;
527 bits += rle[RLE_ESCAPE].len + 1 + 6 + 8;
536 static void encode8x8(TAG*tag, int*bb, int has_dc, int has_tcoef)
543 swf_SetBits(tag, bb[0], 8);
549 /* determine last non-null coefficient */
550 for(last=63;last>=pos;last--) {
551 /* TODO: we could leave out small coefficients
552 after a certain point (32?) */
556 /* blocks without coefficients should not be included
557 in the cbpy/cbpc patterns: */
566 while(!bb[pos] && pos<last) {
578 for(t=0;t<RLE_ESCAPE;t++) {
579 /* TODO: lookup table */
580 if(rle_params[t].run == run &&
581 rle_params[t].level == level &&
582 rle_params[t].last == islast) {
583 codehuffman(tag, rle, t);
584 swf_SetBits(tag, sign, 1);
589 codehuffman(tag, rle, RLE_ESCAPE);
596 swf_SetBits(tag, islast, 1);
597 swf_SetBits(tag, run, 6);
598 swf_SetBits(tag, level, 8); //FIXME: fixme??
608 static void quantize(block_t*fb, block_t*b, int has_dc, int quant)
610 quantize8x8(fb->y1, b->y1, has_dc, quant);
611 quantize8x8(fb->y2, b->y2, has_dc, quant);
612 quantize8x8(fb->y3, b->y3, has_dc, quant);
613 quantize8x8(fb->y4, b->y4, has_dc, quant);
614 quantize8x8(fb->u, b->u, has_dc, quant);
615 quantize8x8(fb->v, b->v, has_dc, quant);
618 static void dodct(block_t*fb)
620 dct(fb->y1); dct(fb->y2); dct(fb->y3); dct(fb->y4);
621 dct(fb->u); dct(fb->v);
629 static void dodctandquant(block_t*fb, block_t*b, int has_dc, int quant)
634 quantize(fb,b,has_dc,quant);
638 dct2(fb->y1,b->y1); dct2(fb->y2,b->y2); dct2(fb->y3,b->y3); dct2(fb->y4,b->y4);
639 dct2(fb->u,b->u); dct2(fb->v,b->v);
642 static void doidct(block_t*b)
647 fb.y1[t] = b->y1[zigzagtable[t]];
648 fb.y2[t] = b->y2[zigzagtable[t]];
649 fb.y3[t] = b->y3[zigzagtable[t]];
650 fb.y4[t] = b->y4[zigzagtable[t]];
651 fb.u[t] = b->u[zigzagtable[t]];
652 fb.v[t] = b->v[zigzagtable[t]];
654 idct(fb.y1); idct(fb.y2); idct(fb.y3); idct(fb.y4);
655 idct(fb.u); idct(fb.v);
665 static void truncateblock(block_t*b)
669 b->y1[t] = truncate256(b->y1[t]);
670 b->y2[t] = truncate256(b->y2[t]);
671 b->y3[t] = truncate256(b->y3[t]);
672 b->y4[t] = truncate256(b->y4[t]);
673 b->u[t] = truncate256(b->u[t]);
674 b->v[t] = truncate256(b->v[t]);
678 static void dequantize(block_t*b, int has_dc, int quant)
680 dequantize8x8(b->y1, has_dc, quant);
681 dequantize8x8(b->y2, has_dc, quant);
682 dequantize8x8(b->y3, has_dc, quant);
683 dequantize8x8(b->y4, has_dc, quant);
684 dequantize8x8(b->u, has_dc, quant);
685 dequantize8x8(b->v, has_dc, quant);
688 static void getblockpatterns(block_t*b, int*cbpybits,int*cbpcbits, int has_dc)
693 *cbpybits|=hascoef(b->y1, has_dc)*8;
694 *cbpybits|=hascoef(b->y2, has_dc)*4;
695 *cbpybits|=hascoef(b->y3, has_dc)*2;
696 *cbpybits|=hascoef(b->y4, has_dc)*1;
698 *cbpcbits|=hascoef(b->u, has_dc)*2;
699 *cbpcbits|=hascoef(b->v, has_dc)*1;
702 static void setQuant(TAG*tag, int dquant)
709 swf_SetBits(tag, 0x0, 2);
710 } else if(dquant == -2) {
711 swf_SetBits(tag, 0x1, 2);
712 } else if(dquant == +1) {
713 swf_SetBits(tag, 0x2, 2);
714 } else if(dquant == +2) {
715 swf_SetBits(tag, 0x3, 2);
717 assert(0*strlen("invalid dquant"));
721 static void change_quant(int quant, int*dquant)
727 static void encode_blockI(TAG*tag, VIDEOSTREAM*s, int bx, int by, int*quant)
732 int cbpcbits = 0, cbpybits=0;
734 getregion(&fb, s->current, bx*2*16, by*2*16, s->width);
736 change_quant(*quant, &dquant);
739 dodctandquant(&fb, &b, 1, *quant);
740 //quantize(&fb, &b, 1, *quant);
742 //decode_blockI(s, &b, bx, by);
744 getblockpatterns(&b, &cbpybits, &cbpcbits, 1);
747 codehuffman(tag, mcbpc_intra, 4+cbpcbits);
749 codehuffman(tag, mcbpc_intra, 0+cbpcbits);
752 codehuffman(tag, cbpy, cbpybits);
755 setQuant(tag, dquant);
759 encode8x8(tag, b.y1, 1, cbpybits&8);
760 encode8x8(tag, b.y2, 1, cbpybits&4);
761 encode8x8(tag, b.y3, 1, cbpybits&2);
762 encode8x8(tag, b.y4, 1, cbpybits&1);
765 encode8x8(tag, b.u, 1, cbpcbits&2);
766 encode8x8(tag, b.v, 1, cbpcbits&1);
769 dequantize(&b, 1, *quant);
772 copyblock(s, s->current, &b, bx, by);
775 static void yuvdiff(block_t*a, block_t*b)
779 a->y1[t] = (a->y1[t] - b->y1[t]);
780 a->y2[t] = (a->y2[t] - b->y2[t]);
781 a->y3[t] = (a->y3[t] - b->y3[t]);
782 a->y4[t] = (a->y4[t] - b->y4[t]);
783 a->u[t] = (a->u[t] - b->u[t]);
784 a->v[t] = (a->v[t] - b->v[t]);
788 static void predictmvd(VIDEOSTREAM*s, int bx, int by, int*px, int*py)
791 int x1,y1,x2,y2,x3,y3;
793 if(bx) {x1=s->mvdx[by*s->bbx+bx-1];
794 y1=s->mvdy[by*s->bbx+bx-1];
797 if(by) {x2=s->mvdx[(by-1)*s->bbx+bx];
798 y2=s->mvdy[(by-1)*s->bbx+bx];
800 x3=s->mvdx[(by-1)*s->bbx+bx+1];
801 y3=s->mvdy[(by-1)*s->bbx+bx+1];
806 else {x2=x3=x1;y2=y3=y1;}
808 if((x1 <= x2 && x2 <= x3) ||
809 (x3 <= x2 && x2 <= x1)) {
811 } else if((x2 <= x1 && x1 <= x3) ||
812 (x3 <= x1 && x1 <= x2)) {
814 } else if((x1 <= x3 && x3 <= x2) ||
815 (x2 <= x3 && x3 <= x1)) {
819 if((y1 <= y2 && y2 <= y3) ||
820 (y3 <= y2 && y2 <= y1)) {
822 } else if((y2 <= y1 && y1 <= y3) ||
823 (y3 <= y1 && y1 <= y2)) {
825 } else if((y1 <= y3 && y3 <= y2) ||
826 (y2 <= y3 && y3 <= y1)) {
832 assert((x4>=-32 && x4<=31) && (y4>=-32 && y4<=31));
835 static inline int mvd2index(int px, int py, int x, int y, int xy)
837 assert((x>=-32 && x<=31) && (y>=-32 && y<=31));
838 assert((x&1)==0 && (y&1)==0);//for now
839 assert((x&2)==0 && (y&2)==0);//for now(2)
854 assert(x>=0 && x<64);
858 static int encode_blockP(TAG*tag, VIDEOSTREAM*s, int bx, int by, int*quant)
867 int cbpcbits = 0, cbpybits=0;
877 int bits_v00 = 65535;
881 diff = compareregions(s, bx, by);
882 if(diff < 20 /*TODO: should be a parameter- good values are between 32 and 48 */) {
883 swf_SetBits(tag, 1,1); /* cod=1, block skipped */
884 /* copy the region from the last frame so that we have a complete reconstruction */
885 copyregion(s, s->current, s->oldpic, bx, by);
889 predictmvd(s,bx,by,&predictmvdx,&predictmvdy);
890 getregion(&fb, s->current, bx*2*16, by*2*16, s->width);
892 { /* consider I-block */
895 memcpy(&fb_i, &fb, sizeof(block_t));
896 dodctandquant(&fb_i, &b_i, 1, *quant);
897 //quantize(&fb_i, &b_i, 1, *quant);
898 getblockpatterns(&b_i, &y, &c, 1);
900 bits_i += mcbpc_inter[3*4+c].len;
901 bits_i += cbpy[y].len;
902 bits_i += coefbits8x8(b_i.y1, 1);
903 bits_i += coefbits8x8(b_i.y2, 1);
904 bits_i += coefbits8x8(b_i.y3, 1);
905 bits_i += coefbits8x8(b_i.y4, 1);
906 bits_i += coefbits8x8(b_i.u, 1);
907 bits_i += coefbits8x8(b_i.v, 1);
910 { /* consider mvd(x,y)-block */
919 int bestx=0,besty=0,bestbits=65536;
920 int startx=-8,endx=8;
921 int starty=-8,endy=8;
925 if(bx==s->bbx-1) endx=0;
926 if(by==s->bby-1) endy=0;
928 for(hx=startx;hx<=endx;hx+=4)
929 for(hy=starty;hy<=endy;hy+=4)
934 memcpy(&fbdiff, &fb, sizeof(block_t));
935 getregion(&fbold, s->oldpic, bx*2*16+hx, by*2*16+hy, s->linex);
936 yuvdiff(&fbdiff, &fbold);
937 dodctandquant(&fbdiff, &b, 0, *quant);
938 //quantize(&fbdiff, &b, 0, *quant);
939 bits += coefbits8x8(b.y1, 0);
940 bits += coefbits8x8(b.y2, 0);
941 bits += coefbits8x8(b.y3, 0);
942 bits += coefbits8x8(b.y4, 0);
943 bits += coefbits8x8(b.u, 0);
944 bits += coefbits8x8(b.v, 0);
955 memcpy(&fbdiff, &fb, sizeof(block_t));
956 getregion(&fbold_v00, s->oldpic, bx*2*16+x_v00, by*2*16+y_v00, s->linex);
957 yuvdiff(&fbdiff, &fbold_v00);
958 dodctandquant(&fbdiff, &b_v00, 0, *quant);
959 //quantize(&fbdiff, &b_v00, 0, *quant);
960 getblockpatterns(&b_v00, &y, &c, 0);
963 bits_v00 += mcbpc_inter[0*4+c].len;
964 bits_v00 += cbpy[y^15].len;
965 bits_v00 += mvd[mvd2index(predictmvdx, predictmvdy, x_v00, y_v00, 0)].len; // (0,0)
966 bits_v00 += mvd[mvd2index(predictmvdx, predictmvdy, x_v00, y_v00, 1)].len;
967 bits_v00 += coefbits8x8(b_v00.y1, 0);
968 bits_v00 += coefbits8x8(b_v00.y2, 0);
969 bits_v00 += coefbits8x8(b_v00.y3, 0);
970 bits_v00 += coefbits8x8(b_v00.y4, 0);
971 bits_v00 += coefbits8x8(b_v00.u, 0);
972 bits_v00 += coefbits8x8(b_v00.v, 0);
975 if(bits_i > bits_v00)
977 /* mvd (0,0) block (mode=0) */
979 mode = 0; // mvd w/o mvd24
981 memcpy(&b, &b_v00, sizeof(block_t));
983 getblockpatterns(&b, &cbpybits, &cbpcbits, has_dc);
984 swf_SetBits(tag,0,1); // COD
985 codehuffman(tag, mcbpc_inter, mode*4+cbpcbits);
986 codehuffman(tag, cbpy, cbpybits^15);
989 codehuffman(tag, mvd, mvd2index(predictmvdx, predictmvdy, x_v00, y_v00, 0));
990 codehuffman(tag, mvd, mvd2index(predictmvdx, predictmvdy, x_v00, y_v00, 1));
991 s->mvdx[by*s->bbx+bx] = x_v00;
992 s->mvdy[by*s->bbx+bx] = y_v00;
995 encode8x8(tag, b.y1, has_dc, cbpybits&8);
996 encode8x8(tag, b.y2, has_dc, cbpybits&4);
997 encode8x8(tag, b.y3, has_dc, cbpybits&2);
998 encode8x8(tag, b.y4, has_dc, cbpybits&1);
1001 encode8x8(tag, b.u, has_dc, cbpcbits&2);
1002 encode8x8(tag, b.v, has_dc, cbpcbits&1);
1004 /* -- reconstruction -- */
1005 dequantize(&b, 0, *quant);
1008 b.y1[t] = truncate256(b.y1[t] + (int)fbold_v00.y1[t]);
1009 b.y2[t] = truncate256(b.y2[t] + (int)fbold_v00.y2[t]);
1010 b.y3[t] = truncate256(b.y3[t] + (int)fbold_v00.y3[t]);
1011 b.y4[t] = truncate256(b.y4[t] + (int)fbold_v00.y4[t]);
1012 b.u[t] = truncate256(b.u[t] + (int)fbold_v00.u[t]);
1013 b.v[t] = truncate256(b.v[t] + (int)fbold_v00.v[t]);
1015 copyblock(s, s->current, &b, bx, by);
1018 /* i block (mode=3) */
1021 memcpy(&b, &b_i, sizeof(block_t));
1022 getblockpatterns(&b, &cbpybits, &cbpcbits, has_dc);
1023 swf_SetBits(tag,0,1); // COD
1024 codehuffman(tag, mcbpc_inter, mode*4+cbpcbits);
1025 codehuffman(tag, cbpy, cbpybits);
1028 encode8x8(tag, b.y1, has_dc, cbpybits&8);
1029 encode8x8(tag, b.y2, has_dc, cbpybits&4);
1030 encode8x8(tag, b.y3, has_dc, cbpybits&2);
1031 encode8x8(tag, b.y4, has_dc, cbpybits&1);
1034 encode8x8(tag, b.u, has_dc, cbpcbits&2);
1035 encode8x8(tag, b.v, has_dc, cbpcbits&1);
1037 /* -- reconstruction -- */
1038 dequantize(&b, 1, *quant);
1041 copyblock(s, s->current, &b, bx, by);
1048 quantize(&fb, &b, has_dc, *quant);
1049 getblockpatterns(&b, &cbpybits, &cbpcbits, has_dc);
1051 if(!dquant && has_mvd && !has_mvd24 && !has_dc) mode = 0;
1052 else if(dquant && has_mvd && !has_mvd24 && !has_dc) mode = 1;
1053 else if(!dquant && has_mvd && has_mvd24 && !has_dc) mode = 2;
1054 else if(!dquant && !has_mvd && !has_mvd24 && has_dc) mode = 3;
1055 else if(dquant && !has_mvd && !has_mvd24 && has_dc) mode = 4;
1058 swf_SetBits(tag,0,1); /* cod - 1 if we're not going to code this block*/
1060 codehuffman(tag, mcbpc_inter, mode*4+cbpcbits);
1061 codehuffman(tag, cbpy, (mode==3 || mode==4)?cbpybits:cbpybits^15);
1064 setQuant(tag, dquant);
1069 codehuffman(tag, mvd, 32);
1070 codehuffman(tag, mvd, 32);
1076 encode8x8(tag, b.y1, has_dc, cbpybits&8);
1077 encode8x8(tag, b.y2, has_dc, cbpybits&4);
1078 encode8x8(tag, b.y3, has_dc, cbpybits&2);
1079 encode8x8(tag, b.y4, has_dc, cbpybits&1);
1082 encode8x8(tag, b.u, has_dc, cbpcbits&2);
1083 encode8x8(tag, b.v, has_dc, cbpcbits&1);
1087 #define TYPE_IFRAME 0
1088 #define TYPE_PFRAME 1
1090 static void writeHeader(TAG*tag, int width, int height, int frame, int quant, int type)
1093 swf_SetU16(tag, frame);
1094 swf_SetBits(tag, 1, 17); /* picture start code*/
1095 swf_SetBits(tag, 0, 5); /* version=0, version 1 would optimize rle behaviour*/
1096 swf_SetBits(tag, frame, 8); /* time reference */
1098 /* write dimensions, taking advantage of some predefined sizes
1099 if the opportunity presents itself */
1100 i32 = width<<16|height;
1103 case 352<<16|288: swf_SetBits(tag, 2, 3);break;
1104 case 176<<16|144: swf_SetBits(tag, 3, 3);break;
1105 case 128<<16|96: swf_SetBits(tag, 4, 3);break;
1106 case 320<<16|240: swf_SetBits(tag, 5, 3);break;
1107 case 160<<16|120: swf_SetBits(tag, 6, 3);break;
1109 if(width>255 || height>255) {
1110 swf_SetBits(tag, 1, 3);
1111 swf_SetBits(tag, width, 16);
1112 swf_SetBits(tag, height, 16);
1114 swf_SetBits(tag, 0, 3);
1115 swf_SetBits(tag, width, 8);
1116 swf_SetBits(tag, height, 8);
1120 swf_SetBits(tag, type, 2); /* I-Frame or P-Frame */
1121 swf_SetBits(tag, 0, 1); /* No deblock filter */
1123 swf_SetBits(tag, quant, 5); /* quantizer (1-31), may be updated later on*/
1124 swf_SetBits(tag, 0, 1); /* No extra info */
1126 void swf_SetVideoStreamIFrame(TAG*tag, VIDEOSTREAM*s, RGBA*pic, int quant)
1130 if(quant<1) quant=1;
1131 if(quant>31) quant=31;
1133 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_IFRAME);
1135 rgb2yuv(s->current, pic, s->linex, s->olinex, s->width, s->height);
1139 for(by=0;by<s->bby;by++)
1141 for(bx=0;bx<s->bbx;bx++)
1143 encode_blockI(tag, s, bx, by, &quant);
1147 memcpy(s->oldpic, s->current, s->width*s->height*sizeof(YUV));
1150 void swf_SetVideoStreamPFrame(TAG*tag, VIDEOSTREAM*s, RGBA*pic, int quant)
1154 if(quant<1) quant=1;
1155 if(quant>31) quant=31;
1157 writeHeader(tag, s->width, s->height, s->frame, quant, TYPE_PFRAME);
1159 rgb2yuv(s->current, pic, s->linex, s->olinex, s->width, s->height);
1160 memset(s->mvdx, 0, s->bbx*s->bby*sizeof(int));
1161 memset(s->mvdy, 0, s->bbx*s->bby*sizeof(int));
1163 for(by=0;by<s->bby;by++)
1165 for(bx=0;bx<s->bbx;bx++)
1167 encode_blockP(tag, s, bx, by, &quant);
1171 memcpy(s->oldpic, s->current, s->width*s->height*sizeof(YUV));
1175 FILE*fi = fopen("test.ppm", "wb");
1176 yuv2rgb(pic, s->current, s->linex, s->width, s->height);
1177 fprintf(fi, "P6\n%d %d\n255\n", s->width, s->height);
1178 for(t=0;t<s->width*s->height;t++)
1180 fwrite(&pic[t].r, 1, 1, fi);
1181 fwrite(&pic[t].g, 1, 1, fi);
1182 fwrite(&pic[t].b, 1, 1, fi);
1191 int main(int argn, char*argv[])
1197 RGBA* pic, *pic2, rgb;
1204 char* fname = "/home/kramm/pics/peppers.png";
1208 memset(&stream, 0, sizeof(stream));
1210 getPNG(fname, &width, &height, &data);
1211 pic = (RGBA*)malloc(width*height*sizeof(RGBA));
1212 pic2 = (RGBA*)malloc(width*height*sizeof(RGBA));
1213 memcpy(pic, data, width*height*sizeof(RGBA));
1216 printf("Compressing %s, size %dx%d\n", fname, width, height);
1218 memset(&swf,0,sizeof(SWF));
1219 memset(&obj,0,sizeof(obj));
1221 swf.fileVersion = 6;
1222 swf.frameRate = framerate*256;
1223 swf.movieSize.xmax = 20*width;
1224 swf.movieSize.ymax = 20*height;
1226 swf.firstTag = swf_InsertTag(NULL,ST_SETBACKGROUNDCOLOR);
1228 rgb.r = 0x00;rgb.g = 0x00;rgb.b = 0x00;
1229 swf_SetRGB(tag,&rgb);
1231 tag = swf_InsertTag(tag, ST_DEFINEVIDEOSTREAM);
1232 swf_SetU16(tag, 33);
1233 swf_SetVideoStreamDefine(tag, &stream, frames, width, height);
1234 stream.do_motion = 1;
1236 for(t=0;t<frames;t++)
1240 for(y=0,yy=0;y<height;y++,yy+=d) {
1241 RGBA*line = &pic[((int)yy)*width];
1242 for(x=0,xx=0;x<width;x++,xx+=d) {
1243 pic2[y*width+x] = line[((int)xx)];
1246 printf("frame:%d\n", t);fflush(stdout);
1248 tag = swf_InsertTag(tag, ST_VIDEOFRAME);
1249 swf_SetU16(tag, 33);
1251 swf_SetVideoStreamIFrame(tag, &stream, pic2, 9);
1253 swf_SetVideoStreamPFrame(tag, &stream, pic2, 9);
1255 tag = swf_InsertTag(tag, ST_PLACEOBJECT2);
1256 swf_GetPlaceObject(0, &obj);
1265 swf_SetPlaceObject(tag,&obj);
1267 tag = swf_InsertTag(tag, ST_SHOWFRAME);
1271 swf_VideoStreamClear(&stream);
1273 tag = swf_InsertTag(tag, ST_END);
1275 fi = open("video3.swf", O_WRONLY|O_CREAT|O_TRUNC, 0644);
1276 if(swf_WriteSWC(fi,&swf)<0) {
1277 fprintf(stderr,"WriteSWF() failed.\n");