diff --git a/src/libFLAC/include/private/stream_encoder.h b/src/libFLAC/include/private/stream_encoder.h
index 96d3135..3c11b7e 100644
--- a/src/libFLAC/include/private/stream_encoder.h
+++ b/src/libFLAC/include/private/stream_encoder.h
@@ -41,7 +41,7 @@
  * This is used to avoid overflow with unusual signals in 32-bit
  * accumulator in the *precompute_partition_info_sums_* functions.
  */
-#define FLAC__MAX_EXTRA_RESIDUAL_BPS 4
+#define FLAC__MAX_EXTRA_RESIDUAL_BPS 5
 
 #if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && defined FLAC__HAS_X86INTRIN
 #include "private/cpu.h"
diff --git a/src/libFLAC/stream_encoder.c b/src/libFLAC/stream_encoder.c
index 6380ce6..402dfa0 100644
--- a/src/libFLAC/stream_encoder.c
+++ b/src/libFLAC/stream_encoder.c
@@ -3978,10 +3978,10 @@ void precompute_partition_info_sums_(
 
 	/* first do max_partition_order */
 	{
+		const unsigned threshold = 32 - FLAC__bitmath_ilog2(default_partition_samples);
 		unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order);
-		/* WATCHOUT: "+ bps + FLAC__MAX_EXTRA_RESIDUAL_BPS" is the maximum
-		 * assumed size of the average residual magnitude */
-		if(FLAC__bitmath_ilog2(default_partition_samples) + bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < 32) {
+		/* WATCHOUT: "bps + FLAC__MAX_EXTRA_RESIDUAL_BPS" is the maximum assumed size of the average residual magnitude */
+		if(bps + FLAC__MAX_EXTRA_RESIDUAL_BPS <= threshold) { /* (FLAC__bitmath_ilog2(default_partition_samples) + 1) + (bps + FLAC__MAX_EXTRA_RESIDUAL_BPS) <= 32 */
 			FLAC__uint32 abs_residual_partition_sum;
 
 			for(partition = residual_sample = 0; partition < partitions; partition++) {
@@ -3992,15 +3992,32 @@ void precompute_partition_info_sums_(
 				abs_residual_partition_sums[partition] = abs_residual_partition_sum;
 			}
 		}
-		else { /* have to pessimistically use 64 bits for accumulator */
-			FLAC__uint64 abs_residual_partition_sum;
+		else { /* still try to use 32-bit math */
+			FLAC__uint32 abs_residual_partition_sum;
+			FLAC__uint32 r_bits, r_abs;
+			unsigned r_sample_init;
 
 			for(partition = residual_sample = 0; partition < partitions; partition++) {
 				end += default_partition_samples;
-				abs_residual_partition_sum = 0;
-				for( ; residual_sample < end; residual_sample++)
-					abs_residual_partition_sum += abs(residual[residual_sample]); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
-				abs_residual_partition_sums[partition] = abs_residual_partition_sum;
+				abs_residual_partition_sum = 0; r_bits = 0;
+				r_sample_init = residual_sample; /* save initial position */
+				for( ; residual_sample < end; residual_sample++) {
+					r_abs = abs(residual[residual_sample]); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
+					abs_residual_partition_sum += r_abs;
+					r_bits |= r_abs;
+				}
+
+				if(FLAC__bitmath_ilog2(r_bits|1) < threshold) { /* actually the condition is (r_bits==0 ? 0 : FLAC__bitmath_ilog2(r_bits)+1) <= threshold */
+					abs_residual_partition_sums[partition] = abs_residual_partition_sum; /* no overflow */
+				}
+				else { /* have to pessimistically use 64 bits for accumulator */
+					FLAC__uint64 abs_residual_partition_sum64 = 0;
+
+					residual_sample = r_sample_init; /* rewind and repeat summation */
+					for( ; residual_sample < end; residual_sample++)
+						abs_residual_partition_sum64 += (FLAC__uint32)abs(residual[residual_sample]); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
+					abs_residual_partition_sums[partition] = abs_residual_partition_sum64;
+				}
 			}
 		}
 	}
diff --git a/src/libFLAC/stream_encoder_intrin_avx2.c b/src/libFLAC/stream_encoder_intrin_avx2.c
index 3aa3197..cc84e72 100644
--- a/src/libFLAC/stream_encoder_intrin_avx2.c
+++ b/src/libFLAC/stream_encoder_intrin_avx2.c
@@ -55,13 +55,15 @@ void FLAC__precompute_partition_info_sums_intrin_avx2(const FLAC__int32 residual
 
 	/* first do max_partition_order */
 	{
+		const unsigned threshold = 32 - FLAC__bitmath_ilog2(default_partition_samples);
 		unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order);
 		__m256i res256, sum256;
 		__m128i res128, sum128;
 
-		if(FLAC__bitmath_ilog2(default_partition_samples) + bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < 32) {
+		if(bps + FLAC__MAX_EXTRA_RESIDUAL_BPS <= threshold) {
 			for(partition = residual_sample = 0; partition < partitions; partition++) {
 				end += default_partition_samples;
+
 				sum256 = _mm256_setzero_si256();
 
 				for( ; (int)residual_sample < (int)end-7; residual_sample+=8) {
@@ -87,34 +89,75 @@ void FLAC__precompute_partition_info_sums_intrin_avx2(const FLAC__int32 residual
 				abs_residual_partition_sums[partition] = (FLAC__uint32)_mm_cvtsi128_si32(sum128);
 			}
 		}
-		else { /* have to pessimistically use 64 bits for accumulator */
+		else {
+			unsigned r_sample_init;
+			__m256i bits256;
+			__m128i bits128;
+
 			for(partition = residual_sample = 0; partition < partitions; partition++) {
 				end += default_partition_samples;
+
 				sum256 = _mm256_setzero_si256();
+				bits256 = _mm256_setzero_si256();
+				r_sample_init = residual_sample;
 
-				for( ; (int)residual_sample < (int)end-3; residual_sample+=4) {
-					res128 = _mm_abs_epi32(_mm_loadu_si128((const __m128i*)(residual+residual_sample)));
-					res256 = _mm256_cvtepu32_epi64(res128);
-					sum256 = _mm256_add_epi64(sum256, res256);
+				for( ; (int)residual_sample < (int)end-7; residual_sample+=8) {
+					res256 = _mm256_abs_epi32(_mm256_loadu_si256((const __m256i*)(residual+residual_sample)));
+					sum256 = _mm256_add_epi32(sum256, res256);
+					bits256 = _mm256_or_si256(bits256, res256);
 				}
 
-				sum128 = _mm_add_epi64(_mm256_extracti128_si256(sum256, 1), _mm256_castsi256_si128(sum256));
+				sum128 = _mm_add_epi32(_mm256_extracti128_si256(sum256, 1), _mm256_castsi256_si128(sum256));
+				bits128 = _mm_or_si128(_mm256_extracti128_si256(bits256, 1), _mm256_castsi256_si128(bits256));
 
-				for( ; (int)residual_sample < (int)end-1; residual_sample+=2) {
-					res128 = _mm_loadl_epi64((const __m128i*)(residual+residual_sample));
-					res128 = _mm_abs_epi32(res128);
-					res128 = _mm_cvtepu32_epi64(res128);
-					sum128 = _mm_add_epi64(sum128, res128);
+				for( ; (int)residual_sample < (int)end-3; residual_sample+=4) {
+					res128 = _mm_abs_epi32(_mm_loadu_si128((const __m128i*)(residual+residual_sample)));
+					sum128 = _mm_add_epi32(sum128, res128);
+					bits128 = _mm_or_si128(bits128, res128);
 				}
 
 				for( ; residual_sample < end; residual_sample++) {
 					res128 = _mm_cvtsi32_si128(residual[residual_sample]);
 					res128 = _mm_abs_epi32(res128);
-					sum128 = _mm_add_epi64(sum128, res128);
+					sum128 = _mm_add_epi32(sum128, res128);
+					bits128 = _mm_or_si128(bits128, res128);
 				}
 
-				sum128 = _mm_add_epi64(sum128, _mm_srli_si128(sum128, 8));
-				_mm_storel_epi64((__m128i*)(abs_residual_partition_sums+partition), sum128);
+				bits128 = _mm_or_si128(bits128, _mm_srli_si128(bits128, 8));
+				bits128 = _mm_or_si128(bits128, _mm_srli_si128(bits128, 4));
+				if(FLAC__bitmath_ilog2((FLAC__uint32)_mm_cvtsi128_si32(bits128)|1) < threshold) { /* no overflow */
+					sum128 = _mm_hadd_epi32(sum128, sum128);
+					sum128 = _mm_hadd_epi32(sum128, sum128);
+					abs_residual_partition_sums[partition] = (FLAC__uint32)_mm_cvtsi128_si32(sum128);
+				}
+				else { /* have to pessimistically use 64 bits for accumulator */
+					sum256 = _mm256_setzero_si256();
+					residual_sample = r_sample_init; /* rewind */
+
+					for( ; (int)residual_sample < (int)end-3; residual_sample+=4) {
+						res128 = _mm_abs_epi32(_mm_loadu_si128((const __m128i*)(residual+residual_sample)));
+						res256 = _mm256_cvtepu32_epi64(res128);
+						sum256 = _mm256_add_epi64(sum256, res256);
+					}
+
+					sum128 = _mm_add_epi64(_mm256_extracti128_si256(sum256, 1), _mm256_castsi256_si128(sum256));
+
+					for( ; (int)residual_sample < (int)end-1; residual_sample+=2) {
+						res128 = _mm_loadl_epi64((const __m128i*)(residual+residual_sample));
+						res128 = _mm_abs_epi32(res128);
+						res128 = _mm_cvtepu32_epi64(res128);
+						sum128 = _mm_add_epi64(sum128, res128);
+					}
+
+					for( ; residual_sample < end; residual_sample++) {
+						res128 = _mm_cvtsi32_si128(residual[residual_sample]);
+						res128 = _mm_abs_epi32(res128);
+						sum128 = _mm_add_epi64(sum128, res128);
+					}
+
+					sum128 = _mm_add_epi64(sum128, _mm_srli_si128(sum128, 8));
+					_mm_storel_epi64((__m128i*)(abs_residual_partition_sums+partition), sum128);
+				}
 			}
 		}
 	}
diff --git a/src/libFLAC/stream_encoder_intrin_sse2.c b/src/libFLAC/stream_encoder_intrin_sse2.c
index ec5541c..6ba393c 100644
--- a/src/libFLAC/stream_encoder_intrin_sse2.c
+++ b/src/libFLAC/stream_encoder_intrin_sse2.c
@@ -55,15 +55,16 @@ void FLAC__precompute_partition_info_sums_intrin_sse2(const FLAC__int32 residual
 
 	/* first do max_partition_order */
 	{
+		const unsigned threshold = 32 - FLAC__bitmath_ilog2(default_partition_samples);
 		unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order);
 		unsigned e1, e3;
 		__m128i mm_res, mm_sum, mm_mask;
 
-		if(FLAC__bitmath_ilog2(default_partition_samples) + bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < 32) {
+		if(bps + FLAC__MAX_EXTRA_RESIDUAL_BPS <= threshold) {
 			for(partition = residual_sample = 0; partition < partitions; partition++) {
 				end += default_partition_samples;
-				mm_sum = _mm_setzero_si128();
 
+				mm_sum = _mm_setzero_si128();
 				e1 = (residual_sample + 3) & ~3; e3 = end & ~3;
 				if(e1 > end)
 					e1 = end; /* try flac -l 1 -b 16 and you'll be here */
@@ -73,7 +74,7 @@ void FLAC__precompute_partition_info_sums_intrin_sse2(const FLAC__int32 residual
 					mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
 					mm_mask = _mm_srai_epi32(mm_res, 31);
 					mm_res = _mm_xor_si128(mm_res, mm_mask);
-					mm_res = _mm_sub_epi32(mm_res, mm_mask); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
+					mm_res = _mm_sub_epi32(mm_res, mm_mask);
 					mm_sum = _mm_add_epi32(mm_sum, mm_res);
 				}
 
@@ -98,29 +99,37 @@ void FLAC__precompute_partition_info_sums_intrin_sse2(const FLAC__int32 residual
 				abs_residual_partition_sums[partition] = (FLAC__uint32)_mm_cvtsi128_si32(mm_sum);
 			}
 		}
-		else { /* have to pessimistically use 64 bits for accumulator */
+		else {
+			unsigned r_sample_init;
+			__m128i mm_bits;
+
 			for(partition = residual_sample = 0; partition < partitions; partition++) {
 				end += default_partition_samples;
-				mm_sum = _mm_setzero_si128();
 
-				e1 = (residual_sample + 1) & ~1; e3 = end & ~1;
-				FLAC__ASSERT(e1 <= end);
+				mm_sum = _mm_setzero_si128();
+				mm_bits = _mm_setzero_si128();
+				r_sample_init = residual_sample;
+				e1 = (residual_sample + 3) & ~3; e3 = end & ~3;
+				if(e1 > end)
+					e1 = end; /* try flac -l 1 -b 16 and you'll be here */
 
+				/* assumption: residual[] is properly aligned so (residual + e1) is properly aligned too and _mm_loadu_si128() is fast */
 				for( ; residual_sample < e1; residual_sample++) {
-					mm_res = _mm_cvtsi32_si128(residual[residual_sample]); /*  0   0   0   r0 */
+					mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
 					mm_mask = _mm_srai_epi32(mm_res, 31);
 					mm_res = _mm_xor_si128(mm_res, mm_mask);
-					mm_res = _mm_sub_epi32(mm_res, mm_mask); /*  0   0   0  |r0|  ==   00   |r0_64| */
-					mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					mm_res = _mm_sub_epi32(mm_res, mm_mask);
+					mm_sum = _mm_add_epi32(mm_sum, mm_res);
+					mm_bits = _mm_or_si128(mm_bits, mm_res);
 				}
 
-				for( ; residual_sample < e3; residual_sample+=2) {
-					mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /*  0   0   r1  r0 */
+				for( ; residual_sample < e3; residual_sample+=4) {
+					mm_res = _mm_loadu_si128((const __m128i*)(residual+residual_sample));
 					mm_mask = _mm_srai_epi32(mm_res, 31);
 					mm_res = _mm_xor_si128(mm_res, mm_mask);
-					mm_res = _mm_sub_epi32(mm_res, mm_mask); /*  0   0  |r1|   |r0| */
-					mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0  |r1|  0  |r0|  ==  |r1_64|  |r0_64|  */
-					mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					mm_res = _mm_sub_epi32(mm_res, mm_mask);
+					mm_sum = _mm_add_epi32(mm_sum, mm_res);
+					mm_bits = _mm_or_si128(mm_bits, mm_res);
 				}
 
 				for( ; residual_sample < end; residual_sample++) {
@@ -128,11 +137,51 @@ void FLAC__precompute_partition_info_sums_intrin_sse2(const FLAC__int32 residual
 					mm_mask = _mm_srai_epi32(mm_res, 31);
 					mm_res = _mm_xor_si128(mm_res, mm_mask);
 					mm_res = _mm_sub_epi32(mm_res, mm_mask);
-					mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					mm_sum = _mm_add_epi32(mm_sum, mm_res);
+					mm_bits = _mm_or_si128(mm_bits, mm_res);
 				}
 
-				mm_sum = _mm_add_epi64(mm_sum, _mm_srli_si128(mm_sum, 8));
-				_mm_storel_epi64((__m128i*)(abs_residual_partition_sums+partition), mm_sum);
+				mm_bits = _mm_or_si128(mm_bits, _mm_srli_si128(mm_bits, 8));
+				mm_bits = _mm_or_si128(mm_bits, _mm_srli_si128(mm_bits, 4));
+				if(FLAC__bitmath_ilog2((FLAC__uint32)_mm_cvtsi128_si32(mm_bits)|1) < threshold) { /* no overflow */
+					mm_sum = _mm_add_epi32(mm_sum, _mm_srli_si128(mm_sum, 8));
+					mm_sum = _mm_add_epi32(mm_sum, _mm_srli_si128(mm_sum, 4));
+					abs_residual_partition_sums[partition] = (FLAC__uint32)_mm_cvtsi128_si32(mm_sum);
+				}
+				else { /* have to pessimistically use 64 bits for accumulator */
+					mm_sum = _mm_setzero_si128();
+					residual_sample = r_sample_init; /* rewind */
+					e1 = (residual_sample + 1) & ~1; e3 = end & ~1;
+					FLAC__ASSERT(e1 <= end);
+
+					for( ; residual_sample < e1; residual_sample++) {
+						mm_res = _mm_cvtsi32_si128(residual[residual_sample]); /*  0   0   0   r0 */
+						mm_mask = _mm_srai_epi32(mm_res, 31);
+						mm_res = _mm_xor_si128(mm_res, mm_mask);
+						mm_res = _mm_sub_epi32(mm_res, mm_mask); /*  0   0   0  |r0|  ==   00   |r0_64| */
+						mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					}
+
+					for( ; residual_sample < e3; residual_sample+=2) {
+						mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /*  0   0   r1  r0 */
+						mm_mask = _mm_srai_epi32(mm_res, 31);
+						mm_res = _mm_xor_si128(mm_res, mm_mask);
+						mm_res = _mm_sub_epi32(mm_res, mm_mask); /*  0   0  |r1|   |r0| */
+						mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0  |r1|  0  |r0|  ==  |r1_64|  |r0_64|  */
+						mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					}
+
+					for( ; residual_sample < end; residual_sample++) {
+						mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
+						mm_mask = _mm_srai_epi32(mm_res, 31);
+						mm_res = _mm_xor_si128(mm_res, mm_mask);
+						mm_res = _mm_sub_epi32(mm_res, mm_mask);
+						mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					}
+
+					mm_sum = _mm_add_epi64(mm_sum, _mm_srli_si128(mm_sum, 8));
+					_mm_storel_epi64((__m128i*)(abs_residual_partition_sums+partition), mm_sum);
+				}
 			}
 		}
 	}
diff --git a/src/libFLAC/stream_encoder_intrin_ssse3.c b/src/libFLAC/stream_encoder_intrin_ssse3.c
index 2dbd18c..8340303 100644
--- a/src/libFLAC/stream_encoder_intrin_ssse3.c
+++ b/src/libFLAC/stream_encoder_intrin_ssse3.c
@@ -55,15 +55,16 @@ void FLAC__precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residua
 
 	/* first do max_partition_order */
 	{
+		const unsigned threshold = 32 - FLAC__bitmath_ilog2(default_partition_samples);
 		unsigned partition, residual_sample, end = (unsigned)(-(int)predictor_order);
 		unsigned e1, e3;
 		__m128i mm_res, mm_sum;
 
-		if(FLAC__bitmath_ilog2(default_partition_samples) + bps + FLAC__MAX_EXTRA_RESIDUAL_BPS < 32) {
+		if(bps + FLAC__MAX_EXTRA_RESIDUAL_BPS <= threshold) {
 			for(partition = residual_sample = 0; partition < partitions; partition++) {
 				end += default_partition_samples;
-				mm_sum = _mm_setzero_si128();
 
+				mm_sum = _mm_setzero_si128();
 				e1 = (residual_sample + 3) & ~3; e3 = end & ~3;
 				if(e1 > end)
 					e1 = end; /* try flac -l 1 -b 16 and you'll be here */
@@ -71,7 +72,7 @@ void FLAC__precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residua
 				/* assumption: residual[] is properly aligned so (residual + e1) is properly aligned too and _mm_loadu_si128() is fast */
 				for( ; residual_sample < e1; residual_sample++) {
 					mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
-					mm_res = _mm_abs_epi32(mm_res); /* abs(INT_MIN) is undefined, but if the residual is INT_MIN we have bigger problems */
+					mm_res = _mm_abs_epi32(mm_res);
 					mm_sum = _mm_add_epi32(mm_sum, mm_res);
 				}
 
@@ -92,35 +93,77 @@ void FLAC__precompute_partition_info_sums_intrin_ssse3(const FLAC__int32 residua
 				abs_residual_partition_sums[partition] = (FLAC__uint32)_mm_cvtsi128_si32(mm_sum);
 			}
 		}
-		else { /* have to pessimistically use 64 bits for accumulator */
+		else {
+			unsigned r_sample_init;
+			__m128i mm_bits;
+
 			for(partition = residual_sample = 0; partition < partitions; partition++) {
 				end += default_partition_samples;
-				mm_sum = _mm_setzero_si128();
 
-				e1 = (residual_sample + 1) & ~1; e3 = end & ~1;
-				FLAC__ASSERT(e1 <= end);
+				mm_sum = _mm_setzero_si128();
+				mm_bits = _mm_setzero_si128();
+				r_sample_init = residual_sample;
+				e1 = (residual_sample + 3) & ~3; e3 = end & ~3;
+				if(e1 > end)
+					e1 = end;
 
 				for( ; residual_sample < e1; residual_sample++) {
-					mm_res = _mm_cvtsi32_si128(residual[residual_sample]); /*  0   0   0   r0 */
-					mm_res = _mm_abs_epi32(mm_res); /*  0   0   0  |r0|  ==   00   |r0_64| */
-					mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
+					mm_res = _mm_abs_epi32(mm_res);
+					mm_sum = _mm_add_epi32(mm_sum, mm_res);
+					mm_bits = _mm_or_si128(mm_bits, mm_res);
 				}
 
-				for( ; residual_sample < e3; residual_sample+=2) {
-					mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /*  0   0   r1  r0 */
-					mm_res = _mm_abs_epi32(mm_res); /*  0   0  |r1|   |r0| */
-					mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0  |r1|  0  |r0|  ==  |r1_64|  |r0_64|  */
-					mm_sum = _mm_add_epi64(mm_sum, mm_res);
+				for( ; residual_sample < e3; residual_sample+=4) {
+					mm_res = _mm_loadu_si128((const __m128i*)(residual+residual_sample));
+					mm_res = _mm_abs_epi32(mm_res);
+					mm_sum = _mm_add_epi32(mm_sum, mm_res);
+					mm_bits = _mm_or_si128(mm_bits, mm_res);
 				}
 
 				for( ; residual_sample < end; residual_sample++) {
 					mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
 					mm_res = _mm_abs_epi32(mm_res);
-					mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					mm_sum = _mm_add_epi32(mm_sum, mm_res);
+					mm_bits = _mm_or_si128(mm_bits, mm_res);
 				}
 
-				mm_sum = _mm_add_epi64(mm_sum, _mm_srli_si128(mm_sum, 8));
-				_mm_storel_epi64((__m128i*)(abs_residual_partition_sums+partition), mm_sum);
+				mm_bits = _mm_or_si128(mm_bits, _mm_srli_si128(mm_bits, 8));
+				mm_bits = _mm_or_si128(mm_bits, _mm_srli_si128(mm_bits, 4));
+				if(FLAC__bitmath_ilog2((FLAC__uint32)_mm_cvtsi128_si32(mm_bits)|1) < threshold) { /* no overflow */
+					mm_sum = _mm_hadd_epi32(mm_sum, mm_sum);
+					mm_sum = _mm_hadd_epi32(mm_sum, mm_sum);
+					abs_residual_partition_sums[partition] = (FLAC__uint32)_mm_cvtsi128_si32(mm_sum);
+				}
+				else { /* have to pessimistically use 64 bits for accumulator */
+					mm_sum = _mm_setzero_si128();
+
+					residual_sample = r_sample_init; /* rewind */
+					e1 = (residual_sample + 1) & ~1; e3 = end & ~1;
+					FLAC__ASSERT(e1 <= end);
+
+					for( ; residual_sample < e1; residual_sample++) {
+						mm_res = _mm_cvtsi32_si128(residual[residual_sample]); /*  0   0   0   r0 */
+						mm_res = _mm_abs_epi32(mm_res); /*  0   0   0  |r0|  ==   00   |r0_64| */
+						mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					}
+
+					for( ; residual_sample < e3; residual_sample+=2) {
+						mm_res = _mm_loadl_epi64((const __m128i*)(residual+residual_sample)); /*  0   0   r1  r0 */
+						mm_res = _mm_abs_epi32(mm_res); /*  0   0  |r1|   |r0| */
+						mm_res = _mm_shuffle_epi32(mm_res, _MM_SHUFFLE(3,1,2,0)); /* 0  |r1|  0  |r0|  ==  |r1_64|  |r0_64|  */
+						mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					}
+
+					for( ; residual_sample < end; residual_sample++) {
+						mm_res = _mm_cvtsi32_si128(residual[residual_sample]);
+						mm_res = _mm_abs_epi32(mm_res);
+						mm_sum = _mm_add_epi64(mm_sum, mm_res);
+					}
+
+					mm_sum = _mm_add_epi64(mm_sum, _mm_srli_si128(mm_sum, 8));
+					_mm_storel_epi64((__m128i*)(abs_residual_partition_sums+partition), mm_sum);
+				}
 			}
 		}
 	}
