Remove unnecessary aligned memory check when __SSE__ is used

Since the land of https://pdfium.googlesource.com/pdfium/+/3522876d5291922ddc62bf1b70d02743b0850673, memory is assured to be 16 byte aligned. So no need to do this check.

Plus, the removed code was causing bug in M36: https://code.google.com/p/pdfium/issues/detail?id=27.

BUG=None
R=thestig@chromium.org

Review URL: https://codereview.chromium.org/418563002
diff --git a/core/src/fxcodec/fx_libopenjpeg/libopenjpeg20/mct.c b/core/src/fxcodec/fx_libopenjpeg/libopenjpeg20/mct.c
index c4341ec..89f8222 100644
--- a/core/src/fxcodec/fx_libopenjpeg/libopenjpeg20/mct.c
+++ b/core/src/fxcodec/fx_libopenjpeg/libopenjpeg20/mct.c
@@ -150,58 +150,43 @@
 {
 	OPJ_UINT32 i;
 #ifdef __SSE__
-	// Mantis BUGID: 0056291. The address must be 16-byte aligned.
-	// TestFile: fuzz-signal_sigsegv_6e9e7f_5076_5265.pdf
-	if ((OPJ_UINT32)c0 % 16 == 0 && (OPJ_UINT32)c1 % 16 == 0 && (OPJ_UINT32)c2 % 16 == 0){
-		__m128 vrv, vgu, vgv, vbu;
-		vrv = _mm_set1_ps(1.402f);
-		vgu = _mm_set1_ps(0.34413f);
-		vgv = _mm_set1_ps(0.71414f);
-		vbu = _mm_set1_ps(1.772f);
-		for (i = 0; i < (n >> 3); ++i) {
-			__m128 vy, vu, vv;
-			__m128 vr, vg, vb;
+    __m128 vrv, vgu, vgv, vbu;
+    vrv = _mm_set1_ps(1.402f);
+    vgu = _mm_set1_ps(0.34413f);
+    vgv = _mm_set1_ps(0.71414f);
+    vbu = _mm_set1_ps(1.772f);
+    for (i = 0; i < (n >> 3); ++i) {
+        __m128 vy, vu, vv;
+        __m128 vr, vg, vb;
 
-			vy = _mm_load_ps(c0);
-			vu = _mm_load_ps(c1);
-			vv = _mm_load_ps(c2);
-			vr = _mm_add_ps(vy, _mm_mul_ps(vv, vrv));
-			vg = _mm_sub_ps(_mm_sub_ps(vy, _mm_mul_ps(vu, vgu)), _mm_mul_ps(vv, vgv));
-			vb = _mm_add_ps(vy, _mm_mul_ps(vu, vbu));
-			_mm_store_ps(c0, vr);
-			_mm_store_ps(c1, vg);
-			_mm_store_ps(c2, vb);
-			c0 += 4;
-			c1 += 4;
-			c2 += 4;
+        vy = _mm_load_ps(c0);
+        vu = _mm_load_ps(c1);
+        vv = _mm_load_ps(c2);
+        vr = _mm_add_ps(vy, _mm_mul_ps(vv, vrv));
+        vg = _mm_sub_ps(_mm_sub_ps(vy, _mm_mul_ps(vu, vgu)), _mm_mul_ps(vv, vgv));
+        vb = _mm_add_ps(vy, _mm_mul_ps(vu, vbu));
+        _mm_store_ps(c0, vr);
+        _mm_store_ps(c1, vg);
+        _mm_store_ps(c2, vb);
+        c0 += 4;
+        c1 += 4;
+        c2 += 4;
 
-			vy = _mm_load_ps(c0);
-			vu = _mm_load_ps(c1);
-			vv = _mm_load_ps(c2);
-			vr = _mm_add_ps(vy, _mm_mul_ps(vv, vrv));
-			vg = _mm_sub_ps(_mm_sub_ps(vy, _mm_mul_ps(vu, vgu)), _mm_mul_ps(vv, vgv));
-			vb = _mm_add_ps(vy, _mm_mul_ps(vu, vbu));
-			_mm_store_ps(c0, vr);
-			_mm_store_ps(c1, vg);
-			_mm_store_ps(c2, vb);
-			c0 += 4;
-			c1 += 4;
-			c2 += 4;
-		}
-		n &= 7;
-	} else {
-		for (i = 0; i < n; ++i) {
-			OPJ_FLOAT32 y = c0[i];
-			OPJ_FLOAT32 u = c1[i];
-			OPJ_FLOAT32 v = c2[i];
-			OPJ_FLOAT32 r = y + (v * 1.402f);
-			OPJ_FLOAT32 g = y - (u * 0.34413f) - (v * (0.71414f));
-			OPJ_FLOAT32 b = y + (u * 1.772f);
-			c0[i] = r;
-			c1[i] = g;
-			c2[i] = b;
-		}
-	}
+        vy = _mm_load_ps(c0);
+        vu = _mm_load_ps(c1);
+        vv = _mm_load_ps(c2);
+        vr = _mm_add_ps(vy, _mm_mul_ps(vv, vrv));
+        vg = _mm_sub_ps(_mm_sub_ps(vy, _mm_mul_ps(vu, vgu)), _mm_mul_ps(vv, vgv));
+        vb = _mm_add_ps(vy, _mm_mul_ps(vu, vbu));
+        _mm_store_ps(c0, vr);
+        _mm_store_ps(c1, vg);
+        _mm_store_ps(c2, vb);
+        c0 += 4;
+        c1 += 4;
+        c2 += 4;
+    }
+    n &= 7;
+
 #endif
 	for(i = 0; i < n; ++i) {
 		OPJ_FLOAT32 y = c0[i];