4 * Copyright (c) 1999 Mark Taylor
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Library General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Library General Public License for more details.
16 * You should have received a copy of the GNU Library General Public
17 * License along with this library; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 02111-1307, USA.
22 /* $Id: vbrquantize.c,v 1.2 2006/02/09 16:56:23 kramm Exp $ */
25 #include "config_static.h"
30 #include "reservoir.h"
31 #include "quantize_pvt.h"
32 #include "vbrquantize.h"
45 #define MAGIC_FLOAT (65536*(128))
46 #define MAGIC_INT 0x4b000000
48 #ifdef TAKEHIRO_IEEE754_HACK
50 #define DUFFBLOCKMQ() do { \
51 xp = xr34[0] * sfpow34_p1; \
52 xe = xr34[0] * sfpow34_eq; \
53 xm = xr34[0] * sfpow34_m1; \
62 fi[0].f = xp + (adj43asm - MAGIC_INT)[fi[0].i]; \
63 fi[1].f = xe + (adj43asm - MAGIC_INT)[fi[1].i]; \
64 fi[2].f = xm + (adj43asm - MAGIC_INT)[fi[2].i]; \
65 fi[0].i -= MAGIC_INT; \
66 fi[1].i -= MAGIC_INT; \
67 fi[2].i -= MAGIC_INT; \
69 xp = x0 - pow43[fi[0].i] * sfpow_p1; \
70 xe = x0 - pow43[fi[1].i] * sfpow_eq; \
71 xm = x0 - pow43[fi[2].i] * sfpow_m1; \
75 xfsf_eq = Max(xfsf_eq, xe); \
76 xfsf_p1 = Max(xfsf_p1, xp); \
77 xfsf_m1 = Max(xfsf_m1, xm); \
82 #define DUFFBLOCK() do { \
83 xp = xr34[0] * sfpow34_p1; \
84 xe = xr34[0] * sfpow34_eq; \
85 xm = xr34[0] * sfpow34_m1; \
94 fi[0].f = xp + (adj43asm - MAGIC_INT)[fi[0].i]; \
95 fi[1].f = xe + (adj43asm - MAGIC_INT)[fi[1].i]; \
96 fi[2].f = xm + (adj43asm - MAGIC_INT)[fi[2].i]; \
97 fi[0].i -= MAGIC_INT; \
98 fi[1].i -= MAGIC_INT; \
99 fi[2].i -= MAGIC_INT; \
101 xp = x0 - pow43[fi[0].i] * sfpow_p1; \
102 xe = x0 - pow43[fi[1].i] * sfpow_eq; \
103 xm = x0 - pow43[fi[2].i] * sfpow_m1; \
104 xfsf_p1 += xp * xp; \
105 xfsf_eq += xe * xe; \
106 xfsf_m1 += xm * xm; \
113 /*********************************************************************
114 * XRPOW_FTOI is a macro to convert floats to ints.
115 * if XRPOW_FTOI(x) = nearest_int(x), then QUANTFAC(x)=adj43asm[x]
118 * if XRPOW_FTOI(x) = floor(x), then QUANTFAC(x)=asj43[x]
120 *********************************************************************/
121 # define QUANTFAC(rx) adj43[rx]
122 # define ROUNDFAC 0.4054
123 # define XRPOW_FTOI(src,dest) ((dest) = (int)(src))
128 /* caution: a[] will be resorted!!
130 FLOAT8 select_kth(FLOAT8 a[], int N, int k)
142 while (a[++i] < v) /*empty*/;
143 while (a[--j] > v) /*empty*/;
167 calc_sfb_noise(const FLOAT8 *xr, const FLOAT8 *xr34, int bw, int sf)
173 FLOAT8 sfpow, sfpow34;
175 sfpow = POW20(sf+210); /*pow(2.0,sf/4.0); */
176 sfpow34 = IPOW20(sf+210); /*pow(sfpow,-3.0/4.0);*/
178 for ( j = 0; j < bw ; ++j ) {
179 if ( xr34[j]*sfpow34 > IXMAX_VAL ) return -1;
181 #ifdef TAKEHIRO_IEEE754_HACK
182 temp = sfpow34*xr34[j];
185 fi.f = temp + (adj43asm - MAGIC_INT)[fi.i];
188 temp = xr34[j]*sfpow34;
189 XRPOW_FTOI(temp, fi.i);
190 XRPOW_FTOI(temp + QUANTFAC(fi.i), fi.i);
193 temp = fabs(xr[j])- pow43[fi.i]*sfpow;
203 calc_sfb_noise_mq( const FLOAT8 *xr, const FLOAT8 *xr34, int bw, int sf,
204 int mq, FLOAT8 *scratch )
209 FLOAT8 sfpow, sfpow34, xfsfm = 0, xfsf = 0;
211 sfpow = POW20(sf+210); /*pow(2.0,sf/4.0); */
212 sfpow34 = IPOW20(sf+210); /*pow(sfpow,-3.0/4.0);*/
214 for ( j = 0; j < bw; ++j ) {
215 if ( xr34[j]*sfpow34 > IXMAX_VAL ) return -1;
217 #ifdef TAKEHIRO_IEEE754_HACK
218 temp = sfpow34*xr34[j];
221 fi.f = temp + (adj43asm - MAGIC_INT)[fi.i];
224 temp = xr34[j]*sfpow34;
225 XRPOW_FTOI(temp, fi.i);
226 XRPOW_FTOI(temp + QUANTFAC(fi.i), fi.i);
229 temp = fabs(xr[j])- pow43[fi.i]*sfpow;
233 if ( xfsfm < temp ) xfsfm = temp;
236 if ( mq == 1 ) return bw*select_kth(scratch,bw,bw*13/16);
239 for ( k = 1, j = 0; j < bw; ++j ) {
240 if ( scratch[j] > xfsf ) {
252 calc_sfb_noise_ave(const FLOAT8 *xr, const FLOAT8 *xr34, int bw, int sf)
257 #ifdef TAKEHIRO_IEEE754_HACK
261 fi_union *fi = (fi_union *)xx;
262 FLOAT8 sfpow34_eq, sfpow34_p1, sfpow34_m1;
263 FLOAT8 sfpow_eq, sfpow_p1, sfpow_m1;
264 FLOAT8 xfsf_eq = 0, xfsf_p1 = 0, xfsf_m1 = 0;
266 sfpow_eq = POW20(sf + 210); /*pow(2.0,sf/4.0); */
267 sfpow_m1 = sfpow_eq * .8408964153; /* pow(2,(sf-1)/4.0) */
268 sfpow_p1 = sfpow_eq * 1.189207115;
270 sfpow34_eq = IPOW20(sf + 210); /*pow(sfpow,-3.0/4.0);*/
271 sfpow34_m1 = sfpow34_eq * 1.13878863476; /* .84089 ^ -3/4 */
272 sfpow34_p1 = sfpow34_eq * 0.878126080187;
274 #ifdef TAKEHIRO_IEEE754_HACK
276 * loop unrolled into "Duff's Device". Robert Hegemann
281 case 0: do{ DUFFBLOCK();
284 case 1: DUFFBLOCK(); } while (--j);
287 for (j = 0; j < bw; ++j) {
289 if (xr34[j]*sfpow34_m1 > IXMAX_VAL) return -1;
291 xe = xr34[j]*sfpow34_eq;
292 XRPOW_FTOI(xe, fi[0].i);
293 XRPOW_FTOI(xe + QUANTFAC(fi[0].i), fi[0].i);
294 xe = fabs(xr[j])- pow43[fi[0].i]*sfpow_eq;
297 xp = xr34[j]*sfpow34_p1;
298 XRPOW_FTOI(xp, fi[0].i);
299 XRPOW_FTOI(xp + QUANTFAC(fi[0].i), fi[0].i);
300 xp = fabs(xr[j])- pow43[fi[0].i]*sfpow_p1;
303 xm = xr34[j]*sfpow34_m1;
304 XRPOW_FTOI(xm, fi[0].i);
305 XRPOW_FTOI(xm + QUANTFAC(fi[0].i), fi[0].i);
306 xm = fabs(xr[j])- pow43[fi[0].i]*sfpow_m1;
315 if ( xfsf_eq < xfsf_p1 ) xfsf_eq = xfsf_p1;
316 if ( xfsf_eq < xfsf_m1 ) xfsf_eq = xfsf_m1;
323 find_scalefac( const FLOAT8 *xr, const FLOAT8 *xr34, FLOAT8 l3_xmin, int bw )
326 int i, sf, sf_ok, delsf;
328 /* search will range from sf: -209 -> 45 */
333 for (i = 0; i < 7; ++i) {
335 xfsf = calc_sfb_noise( xr, xr34, bw, sf );
338 /* scalefactors too small */
342 if (xfsf > l3_xmin) {
343 /* distortion. try a smaller scalefactor */
353 /* returning a scalefac without distortion, if possible
355 return sf_ok > 45 ? sf : sf_ok;
359 find_scalefac_mq( const FLOAT8 *xr, const FLOAT8 *xr34, FLOAT8 l3_xmin,
360 int bw, int mq, FLOAT8 *scratch )
363 int i, sf, sf_ok, delsf;
365 /* search will range from sf: -209 -> 45 */
370 for (i = 0; i < 7; ++i) {
372 xfsf = calc_sfb_noise_mq( xr, xr34, bw, sf, mq, scratch );
375 /* scalefactors too small */
379 if (xfsf > l3_xmin) {
380 /* distortion. try a smaller scalefactor */
390 /* returning a scalefac without distortion, if possible
392 return sf_ok > 45 ? sf : sf_ok;
396 find_scalefac_ave( const FLOAT8 *xr, const FLOAT8 *xr34, FLOAT8 l3_xmin, int bw )
399 int i, sf, sf_ok, delsf;
401 /* search will range from sf: -209 -> 45 */
406 for (i = 0; i < 7; ++i) {
408 xfsf = calc_sfb_noise_ave( xr, xr34, bw, sf );
411 /* scalefactors too small */
415 if (xfsf > l3_xmin) {
416 /* distortion. try a smaller scalefactor */
426 /* returning a scalefac without distortion, if possible
428 return sf_ok > 45 ? sf : sf_ok;
433 * Robert Hegemann 2001-05-01
434 * calculates quantization step size determined by allowed masking
437 calc_scalefac( FLOAT8 l3_xmin, int bw, FLOAT8 preset_tune )
439 FLOAT8 const c = (preset_tune > 0 ? preset_tune
440 : 5.799142446); // 10 * 10^(2/3) * log10(4/3)
441 return (int)(c*log10(l3_xmin/bw)-.5);
446 static const int max_range_short[SBMAX_s] =
447 {15, 15, 15, 15, 15, 15, 7, 7, 7, 7, 7, 7, 0 };
449 static const int max_range_long[SBMAX_l] =
450 {15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 0};
452 static const int max_range_long_lsf_pretab[SBMAX_l] =
453 { 7,7,7,7,7,7, 3,3,3,3,3, 0,0,0,0, 0,0,0, 0,0,0, 0 };
458 sfb=0..5 scalefac < 16
461 ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
462 ol_sf = (cod_info->global_gain-210.0);
463 ol_sf -= 8*cod_info->subblock_gain[i];
464 ol_sf -= ifqstep*scalefac[gr][ch].s[sfb][i];
468 compute_scalefacs_short(
469 int sf[][3], const gr_info *cod_info, int scalefac[][3], int *sbg )
471 const int maxrange1 = 15, maxrange2 = 7;
472 int maxrange, maxover = 0;
474 int ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
476 for (i = 0; i < 3; ++i) {
477 int maxsf1 = 0, maxsf2 = 0, minsf = 1000;
478 /* see if we should use subblock gain */
479 for (sfb = 0; sfb < 6; ++sfb) { /* part 1 */
480 if (maxsf1 < -sf[sfb][i]) maxsf1 = -sf[sfb][i];
481 if (minsf > -sf[sfb][i]) minsf = -sf[sfb][i];
483 for (; sfb < SBPSY_s; ++sfb) { /* part 2 */
484 if (maxsf2 < -sf[sfb][i]) maxsf2 = -sf[sfb][i];
485 if (minsf > -sf[sfb][i]) minsf = -sf[sfb][i];
488 /* boost subblock gain as little as possible so we can
489 * reach maxsf1 with scalefactors
492 maxsf1 = Max (maxsf1-maxrange1*ifqstep, maxsf2-maxrange2*ifqstep);
494 if (minsf > 0) sbg[i] = floor (.125*minsf + .001);
495 if (maxsf1 > 0) sbg[i] = Max (sbg[i], (maxsf1/8 + (maxsf1 % 8 != 0)));
496 if (sbg[i] > 7) sbg[i] = 7;
498 for (sfb = 0; sfb < SBPSY_s; ++sfb) {
499 sf[sfb][i] += 8*sbg[i];
501 if (sf[sfb][i] < 0) {
502 maxrange = sfb < 6 ? maxrange1 : maxrange2;
505 = -sf[sfb][i]/ifqstep + (-sf[sfb][i]%ifqstep != 0);
507 if (scalefac[sfb][i] > maxrange)
508 scalefac[sfb][i] = maxrange;
510 if (maxover < -(sf[sfb][i] + scalefac[sfb][i]*ifqstep))
511 maxover = -(sf[sfb][i] + scalefac[sfb][i]*ifqstep);
514 scalefac[sfb][i] = 0;
523 ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
524 ol_sf = (cod_info->global_gain-210.0);
525 ol_sf -= ifqstep*scalefac[gr][ch].l[sfb];
526 if (cod_info->preflag && sfb>=11)
527 ol_sf -= ifqstep*pretab[sfb];
530 compute_scalefacs_long_lsf( int *sf, const gr_info * cod_info, int *scalefac )
532 const int * max_range = max_range_long;
533 int ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
537 if (cod_info->preflag) {
538 max_range = max_range_long_lsf_pretab;
539 for (sfb = 11; sfb < SBPSY_l; ++sfb)
540 sf[sfb] += pretab[sfb] * ifqstep;
544 for (sfb = 0; sfb < SBPSY_l; ++sfb) {
547 /* ifqstep*scalefac >= -sf[sfb], so round UP */
548 scalefac[sfb] = -sf[sfb]/ifqstep + (-sf[sfb] % ifqstep != 0);
549 if (scalefac[sfb] > max_range[sfb])
550 scalefac[sfb] = max_range[sfb];
552 /* sf[sfb] should now be positive: */
553 if (-(sf[sfb] + scalefac[sfb]*ifqstep) > maxover) {
554 maxover = -(sf[sfb] + scalefac[sfb]*ifqstep);
558 scalefac[sfb] = 0; /* sfb21 */
568 ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
569 ol_sf = (cod_info->global_gain-210.0);
570 ol_sf -= ifqstep*scalefac[gr][ch].l[sfb];
571 if (cod_info->preflag && sfb>=11)
572 ol_sf -= ifqstep*pretab[sfb];
575 compute_scalefacs_long( int *sf, const gr_info * cod_info, int *scalefac )
577 int ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
581 if (cod_info->preflag) {
582 for (sfb = 11; sfb < SBPSY_l; ++sfb)
583 sf[sfb] += pretab[sfb] * ifqstep;
587 for (sfb = 0; sfb < SBPSY_l; ++sfb) {
590 /* ifqstep*scalefac >= -sf[sfb], so round UP */
591 scalefac[sfb] = -sf[sfb]/ifqstep + (-sf[sfb] % ifqstep != 0);
592 if (scalefac[sfb] > max_range_long[sfb])
593 scalefac[sfb] = max_range_long[sfb];
595 /* sf[sfb] should now be positive: */
596 if (-(sf[sfb] + scalefac[sfb]*ifqstep) > maxover) {
597 maxover = -(sf[sfb] + scalefac[sfb]*ifqstep);
601 scalefac[sfb] = 0; /* sfb21 */
613 /************************************************************************
615 * quantize and encode with the given scalefacs and global gain
617 * compute scalefactors, l3_enc, and return number of bits needed to encode
620 ************************************************************************/
623 VBR_quantize_granule(
624 lame_internal_flags * gfc,
627 III_scalefac_t * scalefac,
631 III_side_info_t * l3_side = & gfc->l3_side;
632 gr_info * cod_info = & l3_side->gr[gr].ch[ch].tt;
634 /* encode scalefacs */
636 status = scale_bitcount( scalefac, cod_info );
638 status = scale_bitcount_lsf( gfc, scalefac, cod_info );
645 cod_info->part2_3_length = count_bits(gfc,l3_enc,xr34,cod_info);
646 if (cod_info->part2_3_length >= LARGE_BITS) return -2;
647 cod_info->part2_3_length += cod_info->part2_length;
650 if (gfc->use_best_huffman == 1) {
651 best_huffman_divide(gfc, cod_info, l3_enc);
658 /***********************************************************************
660 * calc_short_block_vbr_sf()
661 * calc_long_block_vbr_sf()
663 * Mark Taylor 2000-??-??
664 * Robert Hegemann 2000-10-25 made functions of it
666 ***********************************************************************/
667 static const int MAX_SF_DELTA = 4;
671 const lame_internal_flags * gfc,
672 const III_psy_xmin * l3_xmin,
673 const FLOAT8 * xr34_orig,
675 III_scalefac_t * vbrsf,
681 int sf_cache[SBMAX_s];
682 int scalefac_criteria;
684 if (gfc->presetTune.use) {
685 /* map experimentalX settings to internal selections */
686 static char const map[] = {2,1,0,3,6};
687 scalefac_criteria = map[gfc->presetTune.quantcomp_current];
690 scalefac_criteria = gfc->VBR->quality;
693 for (j = 0, sfb = 0; sfb < SBMAX_s; ++sfb) {
694 for (b = 0; b < 3; ++b) {
695 const int start = gfc->scalefac_band.s[ sfb ];
696 const int end = gfc->scalefac_band.s[ sfb+1 ];
697 const int width = end - start;
699 switch( scalefac_criteria ) {
703 vbrsf->s[sfb][b] = calc_scalefac( l3_xmin->s[sfb][b], width,
704 gfc->presetTune.quantcomp_adjust_mtrh );
709 /* the faster and sloppier mode to use at lower quality
711 vbrsf->s[sfb][b] = find_scalefac (&xr34[j], &xr34_orig[j],
712 l3_xmin->s[sfb][b], width);
715 /* the slower and better mode to use at higher quality
717 vbrsf->s[sfb][b] = find_scalefac_ave (&xr34[j], &xr34_orig[j],
718 l3_xmin->s[sfb][b], width);
721 /* maxnoise mode to use at higher quality
723 vbrsf->s[sfb][b] = find_scalefac_mq (&xr34[j], &xr34_orig[j],
724 l3_xmin->s[sfb][b], width,
725 1, gfc->VBR->scratch);
728 /* maxnoise mode to use at higher quality
730 vbrsf->s[sfb][b] = find_scalefac_mq (&xr34[j], &xr34_orig[j],
731 l3_xmin->s[sfb][b], width,
732 0, gfc->VBR->scratch);
742 for (b = 0; b < 3; ++b) {
746 switch( gfc->VBR->smooth ) {
752 /* make working copy, get min value, select_kth_int will reorder!
754 for (vbrmn = +10000, sfb = 0; sfb < SBMAX_s; ++sfb) {
755 sf_cache[sfb] = vbrsf->s[sfb][b];
756 if (vbrmn > vbrsf->s[sfb][b])
757 vbrmn = vbrsf->s[sfb][b];
760 /* find median value, take it as mean
762 vbrmean = select_kth_int (sf_cache, SBMAX_s, (SBMAX_s+1)/2);
766 for (sfb = 0; sfb < SBMAX_s; ++sfb) {
767 if (vbrsf->s[sfb][b] > vbrmean+(vbrmean-vbrmn))
768 vbrsf->s[sfb][b] = vbrmean+(vbrmean-vbrmn);
773 for (sfb = 0; sfb < SBMAX_s; ++sfb) {
775 if (vbrsf->s[sfb][b] > vbrsf->s[sfb-1][b]+MAX_SF_DELTA)
776 vbrsf->s[sfb][b] = vbrsf->s[sfb-1][b]+MAX_SF_DELTA;
778 if (vbrsf->s[sfb][b] > vbrsf->s[sfb+1][b]+MAX_SF_DELTA)
779 vbrsf->s[sfb][b] = vbrsf->s[sfb+1][b]+MAX_SF_DELTA;
786 for (sfb = 0; sfb < SBMAX_s; ++sfb) {
787 if (*vbrmax < vbrsf->s[sfb][b])
788 *vbrmax = vbrsf->s[sfb][b];
789 if (*vbrmin > vbrsf->s[sfb][b])
790 *vbrmin = vbrsf->s[sfb][b];
796 /* a variation for vbr-mtrh */
799 const lame_internal_flags * gfc,
800 const III_psy_xmin * l3_xmin,
801 const FLOAT8 * xr34_orig,
803 III_scalefac_t * vbrsf,
809 int sf_cache[SBMAX_l];
810 int scalefac_criteria;
812 if (gfc->presetTune.use) {
813 /* map experimentalX settings to internal selections */
814 static char const map[] = {2,1,0,3,6};
815 scalefac_criteria = map[gfc->presetTune.quantcomp_current];
818 scalefac_criteria = gfc->VBR->quality;
821 for (sfb = 0; sfb < SBMAX_l; ++sfb) {
822 const int start = gfc->scalefac_band.l[ sfb ];
823 const int end = gfc->scalefac_band.l[ sfb+1 ];
824 const int width = end - start;
826 switch( scalefac_criteria ) {
830 vbrsf->l[sfb] = calc_scalefac( l3_xmin->l[sfb], width,
831 gfc->presetTune.quantcomp_adjust_mtrh );
836 /* the faster and sloppier mode to use at lower quality
838 vbrsf->l[sfb] = find_scalefac (&xr34[start], &xr34_orig[start],
839 l3_xmin->l[sfb], width);
842 /* the slower and better mode to use at higher quality
844 vbrsf->l[sfb] = find_scalefac_ave (&xr34[start], &xr34_orig[start],
845 l3_xmin->l[sfb], width);
848 /* maxnoise mode to use at higher quality
850 vbrsf->l[sfb] = find_scalefac_mq (&xr34[start], &xr34_orig[start],
851 l3_xmin->l[sfb], width,
852 1, gfc->VBR->scratch);
855 /* maxnoise mode to use at higher quality
857 vbrsf->l[sfb] = find_scalefac_mq (&xr34[start], &xr34_orig[start],
858 l3_xmin->l[sfb], width,
859 0, gfc->VBR->scratch);
864 switch( gfc->VBR->smooth ) {
870 /* make working copy, get min value, select_kth_int will reorder!
872 for (vbrmn = +10000, sfb = 0; sfb < SBMAX_l; ++sfb) {
873 sf_cache[sfb] = vbrsf->l[sfb];
874 if (vbrmn > vbrsf->l[sfb])
875 vbrmn = vbrsf->l[sfb];
877 /* find median value, take it as mean
879 vbrmean = select_kth_int (sf_cache, SBMAX_l, (SBMAX_l+1)/2);
883 for (sfb = 0; sfb < SBMAX_l; ++sfb) {
884 if (vbrsf->l[sfb] > vbrmean+(vbrmean-vbrmn))
885 vbrsf->l[sfb] = vbrmean+(vbrmean-vbrmn);
890 for (sfb = 0; sfb < SBMAX_l; ++sfb) {
892 if (vbrsf->l[sfb] > vbrsf->l[sfb-1]+MAX_SF_DELTA)
893 vbrsf->l[sfb] = vbrsf->l[sfb-1]+MAX_SF_DELTA;
895 if (vbrsf->l[sfb] > vbrsf->l[sfb+1]+MAX_SF_DELTA)
896 vbrsf->l[sfb] = vbrsf->l[sfb+1]+MAX_SF_DELTA;
903 for (*vbrmin = +10000, *vbrmax = -10000, sfb = 0; sfb < SBMAX_l; ++sfb) {
904 if (*vbrmax < vbrsf->l[sfb])
905 *vbrmax = vbrsf->l[sfb];
906 if (*vbrmin > vbrsf->l[sfb])
907 *vbrmin = vbrsf->l[sfb];
913 /******************************************************************
915 * short block scalefacs
917 ******************************************************************/
920 short_block_scalefacs (
921 const lame_internal_flags * gfc,
923 III_scalefac_t * scalefac,
924 III_scalefac_t * vbrsf,
928 int maxover, maxover0, maxover1, mover;
931 int vbrmax = *VBRmax;
935 maxsfb = gfc->sfb21_extra ? SBMAX_s : SBPSY_s;
936 for (sfb = 0; sfb < maxsfb; ++sfb) {
937 for (b = 0; b < 3; ++b) {
938 v0 = (vbrmax - vbrsf->s[sfb][b]) - (4*14 + 2*max_range_short[sfb]);
939 v1 = (vbrmax - vbrsf->s[sfb][b]) - (4*14 + 4*max_range_short[sfb]);
947 if ((gfc->noise_shaping == 2) && (gfc->presetTune.use && !(gfc->presetTune.athadjust_safe_noiseshaping || gfc->ATH->adjust < 1.0)))
948 /* allow scalefac_scale=1 */
949 mover = Min (maxover0, maxover1);
958 cod_info->scalefac_scale = 0;
959 else if (maxover1 == 0)
960 cod_info->scalefac_scale = 1;
962 /* sf = (cod_info->global_gain-210.0) */
963 cod_info->global_gain = vbrmax + 210;
964 assert(cod_info->global_gain < 256);
966 if (cod_info->global_gain < 0) {
967 cod_info->global_gain = 0;
970 if (cod_info->global_gain > 255) {
971 cod_info->global_gain = 255;
973 for (sfb = 0; sfb < SBMAX_s; ++sfb) {
974 for (b = 0; b < 3; ++b) {
975 vbrsf->s[sfb][b] -= vbrmax;
978 maxover = compute_scalefacs_short (vbrsf->s, cod_info, scalefac->s,
979 cod_info->subblock_gain);
981 assert (maxover <= 0);
983 /* adjust global_gain so at least 1 subblock gain = 0 */
984 minsfb = 999; /* prepare for minimum search */
985 for (b = 0; b < 3; ++b)
986 if (minsfb > cod_info->subblock_gain[b])
987 minsfb = cod_info->subblock_gain[b];
989 if (minsfb > cod_info->global_gain/8)
990 minsfb = cod_info->global_gain/8;
993 cod_info->global_gain -= 8*minsfb;
995 for (b = 0; b < 3; ++b)
996 cod_info->subblock_gain[b] -= minsfb;
1003 /******************************************************************
1005 * long block scalefacs
1007 ******************************************************************/
1010 long_block_scalefacs (
1011 const lame_internal_flags * gfc,
1013 III_scalefac_t * scalefac,
1014 III_scalefac_t * vbrsf,
1017 const int * max_rangep;
1019 int maxover, maxover0, maxover1, maxover0p, maxover1p, mover;
1020 int v0, v1, v0p, v1p;
1021 int vbrmax = *VBRmax;
1023 max_rangep = gfc->is_mpeg1 ? max_range_long : max_range_long_lsf_pretab;
1027 maxover0p = 0; /* pretab */
1028 maxover1p = 0; /* pretab */
1030 maxsfb = gfc->sfb21_extra ? SBMAX_l : SBPSY_l;
1031 for ( sfb = 0; sfb < maxsfb; ++sfb ) {
1032 v0 = (vbrmax - vbrsf->l[sfb]) - 2*max_range_long[sfb];
1033 v1 = (vbrmax - vbrsf->l[sfb]) - 4*max_range_long[sfb];
1034 v0p = (vbrmax - vbrsf->l[sfb]) - 2*(max_rangep[sfb]+pretab[sfb]);
1035 v1p = (vbrmax - vbrsf->l[sfb]) - 4*(max_rangep[sfb]+pretab[sfb]);
1040 if (maxover0p < v0p)
1042 if (maxover1p < v1p)
1046 mover = Min (maxover0, maxover0p);
1047 if ((gfc->noise_shaping == 2) && (gfc->presetTune.use && !(gfc->presetTune.athadjust_safe_noiseshaping || gfc->ATH->adjust < 1.0))) {
1048 /* allow scalefac_scale=1 */
1049 mover = Min (mover, maxover1);
1050 mover = Min (mover, maxover1p);
1059 if (maxover0 <= 0) {
1060 cod_info->scalefac_scale = 0;
1061 cod_info->preflag = 0;
1063 } else if (maxover0p <= 0) {
1064 cod_info->scalefac_scale = 0;
1065 cod_info->preflag = 1;
1066 vbrmax -= maxover0p;
1067 } else if (maxover1 == 0) {
1068 cod_info->scalefac_scale = 1;
1069 cod_info->preflag = 0;
1070 } else if (maxover1p == 0) {
1071 cod_info->scalefac_scale = 1;
1072 cod_info->preflag = 1;
1074 assert(0); /* this should not happen */
1077 /* sf = (cod_info->global_gain-210.0) */
1078 cod_info->global_gain = vbrmax + 210;
1079 assert (cod_info->global_gain < 256);
1081 if (cod_info->global_gain < 0) {
1082 cod_info->global_gain = 0;
1085 if (cod_info->global_gain > 255)
1086 cod_info->global_gain = 255;
1088 for (sfb = 0; sfb < SBMAX_l; ++sfb)
1089 vbrsf->l[sfb] -= vbrmax;
1091 if ( gfc->is_mpeg1 == 1 )
1092 maxover = compute_scalefacs_long (vbrsf->l, cod_info, scalefac->l);
1094 maxover = compute_scalefacs_long_lsf (vbrsf->l, cod_info, scalefac->l);
1096 assert (maxover <= 0);
1103 /***********************************************************************
1105 * quantize xr34 based on scalefactors
1107 * calc_short_block_xr34
1108 * calc_long_block_xr34
1110 * Mark Taylor 2000-??-??
1111 * Robert Hegemann 2000-10-20 made functions of them
1113 ***********************************************************************/
1117 const lame_internal_flags * gfc,
1118 const gr_info * cod_info,
1119 const III_scalefac_t * scalefac,
1120 const FLOAT8 * xr34_orig,
1124 int ifac, ifqstep, start, end;
1127 /* even though there is no scalefactor for sfb12
1128 * subblock gain affects upper frequencies too, that's why
1129 * we have to go up to SBMAX_s
1131 ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
1132 for ( j = 0, sfb = 0; sfb < SBMAX_s; ++sfb ) {
1133 start = gfc->scalefac_band.s[ sfb ];
1134 end = gfc->scalefac_band.s[ sfb+1 ];
1135 for (b = 0; b < 3; ++b) {
1136 ifac = 8*cod_info->subblock_gain[b]+ifqstep*scalefac->s[sfb][b];
1138 if ( ifac == 0 ) { /* just copy */
1139 l = (end-start+7) / 8;
1140 switch ((end-start) % 8) {
1142 case 0: do{ xr34[j] = xr34_orig[j]; ++j;
1143 case 7: xr34[j] = xr34_orig[j]; ++j;
1144 case 6: xr34[j] = xr34_orig[j]; ++j;
1145 case 5: xr34[j] = xr34_orig[j]; ++j;
1146 case 4: xr34[j] = xr34_orig[j]; ++j;
1147 case 3: xr34[j] = xr34_orig[j]; ++j;
1148 case 2: xr34[j] = xr34_orig[j]; ++j;
1149 case 1: xr34[j] = xr34_orig[j]; ++j; } while (--l);
1153 if (ifac < Q_MAX-210)
1154 fac = IIPOW20_(ifac);
1156 fac = pow (2.0, 0.1875*ifac);
1159 * loop unrolled into "Duff's Device". Robert Hegemann
1161 l = (end-start+7) / 8;
1162 switch ((end-start) % 8) {
1164 case 0: do{ xr34[j] = xr34_orig[j]*fac; ++j;
1165 case 7: xr34[j] = xr34_orig[j]*fac; ++j;
1166 case 6: xr34[j] = xr34_orig[j]*fac; ++j;
1167 case 5: xr34[j] = xr34_orig[j]*fac; ++j;
1168 case 4: xr34[j] = xr34_orig[j]*fac; ++j;
1169 case 3: xr34[j] = xr34_orig[j]*fac; ++j;
1170 case 2: xr34[j] = xr34_orig[j]*fac; ++j;
1171 case 1: xr34[j] = xr34_orig[j]*fac; ++j; } while (--l);
1181 const lame_internal_flags * gfc,
1182 const gr_info * cod_info,
1183 const III_scalefac_t * scalefac,
1184 const FLOAT8 * xr34_orig,
1188 int ifac, ifqstep, start, end;
1191 ifqstep = ( cod_info->scalefac_scale == 0 ) ? 2 : 4;
1192 for ( sfb = 0; sfb < SBMAX_l; ++sfb ) {
1194 ifac = ifqstep*scalefac->l[sfb];
1195 if (cod_info->preflag)
1196 ifac += ifqstep*pretab[sfb];
1198 start = gfc->scalefac_band.l[ sfb ];
1199 end = gfc->scalefac_band.l[ sfb+1 ];
1201 if ( ifac == 0 ) { /* just copy */
1203 l = (end-start+7) / 8;
1204 switch ((end-start) % 8) {
1206 case 0: do{ xr34[j] = xr34_orig[j]; ++j;
1207 case 7: xr34[j] = xr34_orig[j]; ++j;
1208 case 6: xr34[j] = xr34_orig[j]; ++j;
1209 case 5: xr34[j] = xr34_orig[j]; ++j;
1210 case 4: xr34[j] = xr34_orig[j]; ++j;
1211 case 3: xr34[j] = xr34_orig[j]; ++j;
1212 case 2: xr34[j] = xr34_orig[j]; ++j;
1213 case 1: xr34[j] = xr34_orig[j]; ++j; } while (--l);
1217 if (ifac < Q_MAX-210)
1218 fac = IIPOW20_(ifac);
1220 fac = pow (2.0, 0.1875*ifac);
1223 * loop unrolled into "Duff's Device". Robert Hegemann
1226 l = (end-start+7) / 8;
1227 switch ((end-start) % 8) {
1229 case 0: do{ xr34[j] = xr34_orig[j]*fac; ++j;
1230 case 7: xr34[j] = xr34_orig[j]*fac; ++j;
1231 case 6: xr34[j] = xr34_orig[j]*fac; ++j;
1232 case 5: xr34[j] = xr34_orig[j]*fac; ++j;
1233 case 4: xr34[j] = xr34_orig[j]*fac; ++j;
1234 case 3: xr34[j] = xr34_orig[j]*fac; ++j;
1235 case 2: xr34[j] = xr34_orig[j]*fac; ++j;
1236 case 1: xr34[j] = xr34_orig[j]*fac; ++j; } while (--l);
1247 /************************************************************************
1249 * VBR_noise_shaping2()
1251 * may result in a need of too many bits, then do it CBR like
1253 * Robert Hegemann 2000-10-25
1255 ***********************************************************************/
1258 VBR_noise_shaping2 (
1259 lame_global_flags * gfp,
1265 III_scalefac_t * scalefac,
1266 III_psy_xmin * l3_xmin,
1270 lame_internal_flags *gfc = gfp->internal_flags;
1271 III_scalefac_t vbrsf;
1272 III_scalefac_t vbrsf2;
1275 int shortblock, ret, bits, huffbits;
1276 int vbrmin, vbrmax, vbrmin2, vbrmax2;
1277 int best_huffman = gfc->use_best_huffman;
1280 gfc->use_best_huffman = 0; /* we will do it later */
1282 cod_info = &gfc->l3_side.gr[gr].ch[ch].tt;
1283 shortblock = (cod_info->block_type == SHORT_TYPE);
1286 short_block_sf (gfc, l3_xmin, xr34orig, xr, &vbrsf2, &vbrmin2, &vbrmax2);
1288 long_block_sf (gfc, l3_xmin, xr34orig, xr, &vbrsf2, &vbrmin2, &vbrmax2);
1298 short_block_scalefacs (gfc, cod_info, scalefac, &vbrsf, &vbrmax);
1299 short_block_xr34 (gfc, cod_info, scalefac, xr34orig, xr34);
1301 long_block_scalefacs (gfc, cod_info, scalefac, &vbrsf, &vbrmax);
1302 long_block_xr34 (gfc, cod_info, scalefac, xr34orig, xr34);
1305 ret = VBR_quantize_granule (gfc, xr34, l3_enc, scalefac, gr, ch);
1307 if (vbrmin == vbrmax) break;
1308 else if (cod_info->part2_3_length < minbits) {
1310 vbrmax = vbrmin2 + (vbrmax2-vbrmin2) * count/6;
1313 for (i = 0; i < SBMAX_s; ++i) {
1314 //vbrsf.s[i][0] = vbrmin2 + (vbrsf2.s[i][0]-vbrmin2) * count/6;
1315 //vbrsf.s[i][1] = vbrmin2 + (vbrsf2.s[i][1]-vbrmin2) * count/6;
1316 //vbrsf.s[i][2] = vbrmin2 + (vbrsf2.s[i][2]-vbrmin2) * count/6;
1317 vbrsf.s[i][0] = Min(vbrsf2.s[i][0], vbrmax);
1318 vbrsf.s[i][1] = Min(vbrsf2.s[i][1], vbrmax);
1319 vbrsf.s[i][2] = Min(vbrsf2.s[i][2], vbrmax);
1323 for (i = 0; i < SBMAX_l; ++i) {
1324 //vbrsf.l[i] = vbrmin2 + (vbrsf2.l[i]-vbrmin2) * count/6;
1325 vbrsf.l[i] = Min(vbrsf2.l[i], vbrmax);
1329 else if (cod_info->part2_3_length > maxbits) {
1332 vbrmin = vbrmax2 + (vbrmin2-vbrmax2) * count/6;
1334 for (i = 0; i < SBMAX_s; ++i) {
1335 //vbrsf.s[i][0] = vbrmax2 + (vbrsf2.s[i][0]-vbrmax2) * count/6;
1336 //vbrsf.s[i][1] = vbrmax2 + (vbrsf2.s[i][1]-vbrmax2) * count/6;
1337 //vbrsf.s[i][2] = vbrmax2 + (vbrsf2.s[i][2]-vbrmax2) * count/6;
1338 vbrsf.s[i][0] = Max(vbrsf2.s[i][0], vbrmin);
1339 vbrsf.s[i][1] = Max(vbrsf2.s[i][1], vbrmin);
1340 vbrsf.s[i][2] = Max(vbrsf2.s[i][2], vbrmin);
1344 for (i = 0; i < SBMAX_l; ++i) {
1345 //vbrsf.l[i] = vbrmax2 + (vbrsf2.l[i]-vbrmax2) * count/6;
1346 vbrsf.l[i] = Max(vbrsf2.l[i], vbrmin);
1351 } while(1 && ret != -1);
1354 gfc->use_best_huffman = best_huffman;
1356 if (ret == -1) /* Houston, we have a problem */
1359 if (cod_info->part2_3_length < minbits) {
1360 huffbits = minbits - cod_info->part2_length;
1361 bits = bin_search_StepSize (gfc, cod_info, huffbits,
1362 gfc->OldValue[ch], xr34, l3_enc);
1363 gfc->OldValue[ch] = cod_info->global_gain;
1364 cod_info->part2_3_length = bits + cod_info->part2_length;
1366 if (cod_info->part2_3_length > maxbits) {
1367 huffbits = maxbits - cod_info->part2_length;
1368 if (huffbits < 0) huffbits = 0;
1369 bits = bin_search_StepSize (gfc, cod_info, huffbits,
1370 gfc->OldValue[ch], xr34, l3_enc);
1371 gfc->OldValue[ch] = cod_info->global_gain;
1372 cod_info->part2_3_length = bits;
1373 if (bits > huffbits) {
1374 bits = inner_loop (gfc, cod_info, huffbits, xr34, l3_enc);
1375 cod_info->part2_3_length = bits;
1377 if (bits >= LARGE_BITS) /* Houston, we have a problem */
1379 cod_info->part2_3_length += cod_info->part2_length;
1382 if (cod_info->part2_length >= LARGE_BITS) /* Houston, we have a problem */
1385 assert (cod_info->global_gain < 256);
1395 /************************************************************************
1397 * VBR_noise_shaping()
1399 * compute scalefactors, l3_enc, and return number of bits needed to encode
1401 * return code: 0 scalefactors were found with all noise < masking
1403 * n>0 scalefactors required too many bits. global gain
1404 * was decreased by n
1405 * If n is large, we should probably recompute scalefacs
1406 * with a lower quality.
1408 * n<0 scalefactors used less than minbits.
1409 * global gain was increased by n.
1410 * If n is large, might want to recompute scalefacs
1411 * with a higher quality setting?
1413 ************************************************************************/
1416 lame_global_flags *gfp,
1418 FLOAT8 xr34orig [576],
1420 int digital_silence,
1423 III_scalefac_t *scalefac,
1424 III_psy_xmin *l3_xmin,
1428 lame_internal_flags *gfc=gfp->internal_flags;
1429 III_scalefac_t save_sf;
1430 III_scalefac_t vbrsf;
1435 int global_gain_adjust = 0;
1437 cod_info = &gfc->l3_side.gr[gr].ch[ch].tt;
1438 shortblock = (cod_info->block_type == SHORT_TYPE);
1441 short_block_sf (gfc, l3_xmin, xr34orig, xr, &vbrsf, &vbrmin, &vbrmax);
1443 long_block_sf (gfc, l3_xmin, xr34orig, xr, &vbrsf, &vbrmin, &vbrmax);
1445 /* save a copy of vbrsf, incase we have to recomptue scalefacs */
1446 memcpy (&save_sf, &vbrsf, sizeof(III_scalefac_t));
1449 memset (scalefac, 0, sizeof(III_scalefac_t));
1452 short_block_scalefacs (gfc, cod_info, scalefac, &vbrsf, &vbrmax);
1453 short_block_xr34 (gfc, cod_info, scalefac, xr34orig, xr34);
1455 long_block_scalefacs (gfc, cod_info, scalefac, &vbrsf, &vbrmax);
1456 long_block_xr34 (gfc, cod_info, scalefac, xr34orig, xr34);
1458 VBR_quantize_granule (gfc, xr34, l3_enc, scalefac, gr, ch);
1461 /* decrease noise until we use at least minbits
1463 if (cod_info->part2_3_length < minbits) {
1464 if (digital_silence) break;
1465 //if (cod_info->part2_3_length == cod_info->part2_length) break;
1466 if (vbrmax+210 == 0) break;
1468 /* decrease global gain, recompute scale factors */
1470 --global_gain_adjust;
1471 memcpy (&vbrsf, &save_sf, sizeof(III_scalefac_t));
1474 } while (cod_info->part2_3_length < minbits);
1476 /* inject noise until we meet our bit limit
1478 while (cod_info->part2_3_length > Min (maxbits, MAX_BITS)) {
1479 /* increase global gain, keep existing scale factors */
1480 ++cod_info->global_gain;
1481 if (cod_info->global_gain > 255)
1482 ERRORF (gfc,"%ld impossible to encode ??? frame! bits=%d\n",
1483 // gfp->frameNum, cod_info->part2_3_length);
1484 -1, cod_info->part2_3_length);
1485 VBR_quantize_granule (gfc, xr34, l3_enc, scalefac, gr, ch);
1487 ++global_gain_adjust;
1490 return global_gain_adjust;
1496 VBR_quantize(lame_global_flags *gfp,
1497 FLOAT8 pe[2][2], FLOAT8 ms_ener_ratio[2],
1498 FLOAT8 xr[2][2][576], III_psy_ratio ratio[2][2],
1499 int l3_enc[2][2][576],
1500 III_scalefac_t scalefac[2][2])
1502 lame_internal_flags *gfc=gfp->internal_flags;
1503 III_psy_xmin l3_xmin[2][2];
1504 int minbits,maxbits,max_frame_bits,totbits,gr,ch,i,bits_ok;
1505 int bitsPerFrame,mean_bits;
1508 III_side_info_t * l3_side;
1510 int digital_silence[2][2];
1511 FLOAT8 masking_lower_db=0;
1512 FLOAT8 xr34[2][2][576];
1514 qadjust=0; /* start with -1 db quality improvement over quantize.c VBR */
1516 l3_side = &gfc->l3_side;
1518 /* now find out: if the frame can be considered analog silent
1519 * if each granule can be considered digital silent
1520 * and calculate l3_xmin and the fresh xr34 array
1523 assert( gfp->VBR_q <= 9 );
1524 assert( gfp->VBR_q >= 0 );
1526 for (gr = 0; gr < gfc->mode_gr; ++gr) {
1527 /* copy data to be quantized into xr */
1528 if (gfc->mode_ext==MPG_MD_MS_LR) {
1529 ms_convert(xr[gr],xr[gr]);
1531 for (ch = 0; ch < gfc->channels_out; ++ch) {
1532 /* if in the following sections the quality would not be adjusted
1533 * then we would only have to call calc_xmin once here and
1534 * could drop subsequently calls (rh 2000/07/17)
1537 cod_info = &l3_side->gr[gr].ch[ch].tt;
1538 cod_info->part2_3_length=LARGE_BITS;
1540 if (cod_info->block_type == SHORT_TYPE) {
1541 cod_info->sfb_lmax = 0; /* No sb*/
1542 cod_info->sfb_smin = 0;
1544 /* MPEG 1 doesnt use last scalefactor band */
1545 cod_info->sfb_lmax = SBPSY_l;
1546 cod_info->sfb_smin = SBPSY_s; /* No sb */
1547 if (cod_info->mixed_block_flag) {
1548 cod_info->sfb_lmax = gfc->is_mpeg1 ? 8 : 6;
1549 cod_info->sfb_smin = 3;
1553 /* quality setting */
1554 masking_lower_db = gfc->VBR->mask_adjust;
1555 if (pe[gr][ch]>750) {
1556 masking_lower_db -= Min(10,4*(pe[gr][ch]-750.)/750.);
1558 gfc->masking_lower = pow(10.0,masking_lower_db/10);
1560 /* masking thresholds */
1561 over_ath = calc_xmin(gfp,xr[gr][ch],&ratio[gr][ch],cod_info,&l3_xmin[gr][ch]);
1563 /* if there are bands with more energy than the ATH
1564 * then we say the frame is not analog silent */
1569 /* if there is no line with more energy than 1e-20
1570 * then this granule is considered to be digital silent
1571 * plus calculation of xr34 */
1572 digital_silence[gr][ch] = 1;
1573 for(i=0;i<576;++i) {
1574 FLOAT8 temp=fabs(xr[gr][ch][i]);
1575 xr34[gr][ch][i]=sqrt(sqrt(temp)*temp);
1576 digital_silence[gr][ch] &= temp < 1E-20;
1582 /* compute minimum allowed bits from minimum allowed bitrate */
1583 if (analog_silence) {
1584 gfc->bitrate_index=1;
1586 gfc->bitrate_index=gfc->VBR_min_bitrate;
1588 getframebits(gfp, &bitsPerFrame, &mean_bits);
1589 minbits = (mean_bits/gfc->channels_out);
1591 /* compute maximum allowed bits from max allowed bitrate */
1592 gfc->bitrate_index=gfc->VBR_max_bitrate;
1593 getframebits(gfp, &bitsPerFrame, &mean_bits);
1594 max_frame_bits = ResvFrameBegin(gfp, l3_side, mean_bits, bitsPerFrame);
1595 maxbits=2.5*(mean_bits/gfc->channels_out);
1598 /* compute a target mean_bits based on compression ratio
1599 * which was set based on VBR_q
1601 int bit_rate = gfp->out_samplerate*16*gfc->channels_out/(1000.0*gfp->compression_ratio);
1602 bitsPerFrame = (bit_rate*gfp->framesize*1000)/gfp->out_samplerate;
1603 mean_bits = (bitsPerFrame - 8*gfc->sideinfo_len) / gfc->mode_gr;
1607 minbits = Max(minbits,125);
1608 minbits=Max(minbits,.40*(mean_bits/gfc->channels_out));
1609 maxbits=Min(maxbits,2.5*(mean_bits/gfc->channels_out));
1618 * loop over all ch,gr, encoding anything with bits > .5*(max_frame_bits/4)
1620 * If a particular granule uses way too many bits, it will be re-encoded
1621 * on the next iteration of the loop (with a lower quality setting).
1622 * But granules which dont use
1623 * use too many bits will not be re-encoded.
1625 * minbits: minimum allowed bits for 1 granule 1 channel
1626 * maxbits: maximum allowwed bits for 1 granule 1 channel
1627 * max_frame_bits: maximum allowed bits for entire frame
1628 * (max_frame_bits/4) estimate of average bits per granule per channel
1635 for (gr = 0; gr < gfc->mode_gr; ++gr) {
1637 minbits_lr[0]=minbits;
1638 minbits_lr[1]=minbits;
1641 if (gfc->mode_ext==MPG_MD_MS_LR) {
1643 fac = .33*(.5-ms_ener_ratio[gr])/.5;
1646 minbits_lr[0] = (1+fac)*minbits;
1647 minbits_lr[1] = Max(125,(1-fac)*minbits);
1652 for (ch = 0; ch < gfc->channels_out; ++ch) {
1653 int adjusted,shortblock;
1654 cod_info = &l3_side->gr[gr].ch[ch].tt;
1656 /* ENCODE this data first pass, and on future passes unless it uses
1657 * a very small percentage of the max_frame_bits */
1658 if (cod_info->part2_3_length > (max_frame_bits/(2*gfc->channels_out*gfc->mode_gr))) {
1660 shortblock = (cod_info->block_type == SHORT_TYPE);
1662 /* Adjust allowed masking based on quality setting */
1663 if (qadjust!=0 /*|| shortblock*/) {
1664 masking_lower_db = gfc->VBR->mask_adjust + qadjust;
1667 if (shortblock) masking_lower_db -= 4;
1671 masking_lower_db -= Min(10,4*(pe[gr][ch]-750.)/750.);
1672 gfc->masking_lower = pow(10.0,masking_lower_db/10);
1673 calc_xmin( gfp, xr[gr][ch], ratio[gr]+ch, cod_info, l3_xmin[gr]+ch);
1676 /* digital silent granules do not need the full round trip,
1677 * but this can be optimized later on
1679 adjusted = VBR_noise_shaping (gfp,xr[gr][ch],xr34[gr][ch],
1681 digital_silence[gr][ch],
1683 maxbits,scalefac[gr]+ch,
1684 l3_xmin[gr]+ch,gr,ch);
1686 /* global_gain was changed by a large amount to get bits < maxbits */
1687 /* quality is set to high. we could set bits = LARGE_BITS
1688 * to force re-encoding. But most likely the other channels/granules
1689 * will also use too many bits, and the entire frame will
1690 * be > max_frame_bits, forcing re-encoding below.
1692 // cod_info->part2_3_bits = LARGE_BITS;
1695 totbits += cod_info->part2_3_length;
1699 if (totbits>max_frame_bits) {
1701 qadjust += Max(.125,Min(1,(totbits-max_frame_bits)/300.0));
1702 /* adjusting minbits and maxbits is necessary too
1703 * cos lowering quality is not enough in rare cases
1704 * when each granule still needs almost maxbits, it wont fit */
1705 minbits = Max(125,minbits*0.975);
1706 maxbits = Max(minbits,maxbits*0.975);
1707 // DEBUGF("%i totbits>max_frame_bits totbits=%i maxbits=%i \n",gfp->frameNum,totbits,max_frame_bits);
1708 // DEBUGF("next masking_lower_db = %f \n",masking_lower_db + qadjust);
1716 /* find optimal scalefac storage. Cant be done above because
1717 * might enable scfsi which breaks the interation loops */
1719 for (gr = 0; gr < gfc->mode_gr; ++gr) {
1720 for (ch = 0; ch < gfc->channels_out; ++ch) {
1721 best_scalefac_store(gfc, gr, ch, l3_enc, l3_side, scalefac);
1722 totbits += l3_side->gr[gr].ch[ch].tt.part2_3_length;
1729 if (analog_silence && !gfp->VBR_hard_min) {
1730 gfc->bitrate_index = 1;
1732 gfc->bitrate_index = gfc->VBR_min_bitrate;
1734 for( ; gfc->bitrate_index < gfc->VBR_max_bitrate; ++gfc->bitrate_index ) {
1736 getframebits (gfp, &bitsPerFrame, &mean_bits);
1737 maxbits = ResvFrameBegin(gfp, l3_side, mean_bits, bitsPerFrame);
1738 if (totbits <= maxbits) break;
1740 if (gfc->bitrate_index == gfc->VBR_max_bitrate) {
1741 getframebits (gfp, &bitsPerFrame, &mean_bits);
1742 maxbits = ResvFrameBegin(gfp, l3_side, mean_bits, bitsPerFrame);
1745 // DEBUGF("%i total_bits=%i max_frame_bits=%i index=%i \n",gfp->frameNum,totbits,max_frame_bits,gfc->bitrate_index);
1747 for (gr = 0; gr < gfc->mode_gr; ++gr) {
1748 for (ch = 0; ch < gfc->channels_out; ++ch) {
1749 cod_info = &l3_side->gr[gr].ch[ch].tt;
1752 ResvAdjust (gfc, cod_info, l3_side, mean_bits);
1754 /*******************************************************************
1755 * set the sign of l3_enc from the sign of xr
1756 *******************************************************************/
1757 for ( i = 0; i < 576; ++i) {
1758 if (xr[gr][ch][i] < 0) l3_enc[gr][ch][i] *= -1;
1762 ResvFrameEnd (gfc, l3_side, mean_bits);