Add 16-bit support for AVX2

Equivalent to 16-bit SSE support added in:

https://github.com/talumbau/gemmlowp/commit/76de7f790a2d65b88a06a2fbe9cfb5e7a70aebb6#diff-a414deff691f120bc86c429de8e68319
diff --git a/fixedpoint/fixedpoint_avx.h b/fixedpoint/fixedpoint_avx.h
index b6e8917..f3fe732 100644
--- a/fixedpoint/fixedpoint_avx.h
+++ b/fixedpoint/fixedpoint_avx.h
@@ -23,63 +23,133 @@
 
 namespace gemmlowp {
 
+struct int16x16_m256i {
+  __m256i v;
+};
+
+// Keep int16x16_m256i trivially constructible/destructible and provide
+// easily optimized helper function.
+inline int16x16_m256i to_int16x16_m256i(__m256i w) {
+  int16x16_m256i r;
+  r.v = w;
+  return r;
+}
+
 template <>
 struct FixedPointRawTypeTraits<__m256i> {
   typedef std::int32_t ScalarRawType;
+  // TODO: This can actually support up to 8 lanes, so we should either
+  // change to 8 or create int32x8_m256i struct to handle that case.
   static const int kLanes = 4;
 };
 
 template <>
+struct FixedPointRawTypeTraits<int16x16_m256i> {
+  typedef std::int16_t ScalarRawType;
+  static const int kLanes = 16;
+};
+
+template <>
 inline __m256i BitAnd(__m256i a, __m256i b) {
   return _mm256_and_si256(a, b);
 }
 
 template <>
+inline int16x16_m256i BitAnd(int16x16_m256i a, int16x16_m256i b) {
+  return to_int16x16_m256i(_mm256_and_si256(a.v, b.v));
+}
+
+template <>
 inline __m256i BitOr(__m256i a, __m256i b) {
   return _mm256_or_si256(a, b);
 }
 
 template <>
+inline int16x16_m256i BitOr(int16x16_m256i a, int16x16_m256i b) {
+  return to_int16x16_m256i(_mm256_or_si256(a.v, b.v));
+}
+
+template <>
 inline __m256i BitXor(__m256i a, __m256i b) {
   return _mm256_xor_si256(a, b);
 }
 
 template <>
+inline int16x16_m256i BitXor(int16x16_m256i a, int16x16_m256i b) {
+  return to_int16x16_m256i(_mm256_xor_si256(a.v, b.v));
+}
+
+template <>
 inline __m256i BitNot(__m256i a) {
   return _mm256_andnot_si256(a, _mm256_set1_epi32(-1));
 }
 
 template <>
+inline int16x16_m256i BitNot(int16x16_m256i a) {
+  return to_int16x16_m256i(_mm256_andnot_si256(a.v, _mm256_set1_epi16(-1)));
+}
+
+template <>
 inline __m256i Add(__m256i a, __m256i b) {
   return _mm256_add_epi32(a, b);
 }
 
 template <>
+inline int16x16_m256i Add(int16x16_m256i a, int16x16_m256i b) {
+  return to_int16x16_m256i(_mm256_add_epi16(a.v, b.v));
+}
+
+template <>
 inline __m256i Mul(__m256i a, __m256i b) {
   return _mm256_mullo_epi32(a, b);
 }
 
 template <>
+inline int16x16_m256i Mul(int16x16_m256i a, int16x16_m256i b) {
+  return to_int16x16_m256i(_mm256_mullo_epi16(a.v, b.v));
+}
+
+template <>
 inline __m256i Sub(__m256i a, __m256i b) {
   return _mm256_sub_epi32(a, b);
 }
 
 template <>
+inline int16x16_m256i Sub(int16x16_m256i a, int16x16_m256i b) {
+  return to_int16x16_m256i(_mm256_sub_epi16(a.v, b.v));
+}
+
+template <>
 inline __m256i Neg(__m256i a) {
   return _mm256_sign_epi32(a, _mm256_set1_epi32(-1));
 }
 
 template <>
+inline int16x16_m256i Neg(int16x16_m256i a) {
+  return to_int16x16_m256i(_mm256_sign_epi16(a.v, _mm256_set1_epi16(-1)));
+}
+
+template <>
 inline __m256i ShiftLeft(__m256i a, int offset) {
   return _mm256_slli_epi32(a, offset);
 }
 
 template <>
+inline int16x16_m256i ShiftLeft(int16x16_m256i a, int offset) {
+  return to_int16x16_m256i(_mm256_slli_epi16(a.v, offset));
+}
+
+template <>
 inline __m256i ShiftRight(__m256i a, int offset) {
   return _mm256_srai_epi32(a, offset);
 }
 
 template <>
+inline int16x16_m256i ShiftRight(int16x16_m256i a, int offset) {
+  return to_int16x16_m256i(_mm256_srai_epi16(a.v, offset));
+}
+
+template <>
 inline __m256i SelectUsingMask(__m256i if_mask, __m256i then_val,
                                __m256i else_val) {
   return _mm256_castps_si256(_mm256_blendv_ps(_mm256_castsi256_ps(else_val),
@@ -88,45 +158,97 @@
 }
 
 template <>
+inline int16x16_m256i SelectUsingMask(int16x16_m256i if_mask,
+                                      int16x16_m256i then_val,
+                                      int16x16_m256i else_val) {
+  // Borrowed from Intel's arm_neon_sse.h header.
+  return to_int16x16_m256i(
+      _mm256_or_si256(_mm256_and_si256(if_mask.v, then_val.v),
+                      _mm256_andnot_si256(if_mask.v, else_val.v)));
+}
+
+template <>
 inline __m256i MaskIfEqual(__m256i a, __m256i b) {
   return _mm256_cmpeq_epi32(a, b);
 }
 
 template <>
+inline int16x16_m256i MaskIfEqual(int16x16_m256i a, int16x16_m256i b) {
+  return to_int16x16_m256i(_mm256_cmpeq_epi16(a.v, b.v));
+}
+
+template <>
 inline __m256i MaskIfNotEqual(__m256i a, __m256i b) {
   return BitNot(MaskIfEqual(a, b));
 }
 
 template <>
+inline int16x16_m256i MaskIfNotEqual(int16x16_m256i a, int16x16_m256i b) {
+  return BitNot(MaskIfEqual(a, b));
+}
+
+template <>
 inline __m256i MaskIfZero(__m256i a) {
   return MaskIfEqual(a, _mm256_set1_epi32(0));
 }
 
 template <>
+inline int16x16_m256i MaskIfZero(int16x16_m256i a) {
+  return MaskIfEqual(a, to_int16x16_m256i(_mm256_set1_epi16(0)));
+}
+
+template <>
 inline __m256i MaskIfNonZero(__m256i a) {
   return MaskIfNotEqual(a, _mm256_set1_epi32(0));
 }
 
 template <>
+inline int16x16_m256i MaskIfNonZero(int16x16_m256i a) {
+  return MaskIfNotEqual(a, to_int16x16_m256i(_mm256_set1_epi16(0)));
+}
+
+template <>
 inline __m256i MaskIfGreaterThan(__m256i a, __m256i b) {
   return _mm256_cmpgt_epi32(a, b);
 }
 
 template <>
+inline int16x16_m256i MaskIfGreaterThan(int16x16_m256i a, int16x16_m256i b) {
+  return to_int16x16_m256i(_mm256_cmpgt_epi16(a.v, b.v));
+}
+
+template <>
 inline __m256i MaskIfLessThan(__m256i a, __m256i b) {
   return _mm256_cmpgt_epi32(b, a);
 }
 
 template <>
+inline int16x16_m256i MaskIfLessThan(int16x16_m256i a, int16x16_m256i b) {
+  return to_int16x16_m256i(_mm256_cmpgt_epi16(b.v, a.v));
+}
+
+template <>
 inline __m256i MaskIfGreaterThanOrEqual(__m256i a, __m256i b) {
   return BitNot(MaskIfLessThan(a, b));
 }
 
 template <>
+inline int16x16_m256i MaskIfGreaterThanOrEqual(int16x16_m256i a,
+                                               int16x16_m256i b) {
+  return BitNot(MaskIfLessThan(a, b));
+}
+
+template <>
 inline __m256i MaskIfLessThanOrEqual(__m256i a, __m256i b) {
   return BitNot(MaskIfGreaterThan(a, b));
 }
 
+template <>
+inline int16x16_m256i MaskIfLessThanOrEqual(int16x16_m256i a,
+                                            int16x16_m256i b) {
+  return BitNot(MaskIfGreaterThan(a, b));
+}
+
 /* Assumptions:
    - All and Any are used on masks.
    - masks are all_ones for true lanes, all_zeroes otherwise.
@@ -139,11 +261,21 @@
 }
 
 template <>
+inline bool All(int16x16_m256i a) {
+  return _mm256_testc_si256(a.v, a.v);
+}
+
+template <>
 inline bool Any(__m256i a) {
   return BitNot(_mm256_testz_si256(a, a));
 }
 
 template <>
+inline bool Any(int16x16_m256i a) {
+  return BitNot(_mm256_testz_si256(a.v, a.v));
+}
+
+template <>
 inline __m256i RoundingHalfSum(__m256i a, __m256i b) {
   /* __m256i round_bit_mask, a_over_2, b_over_2, round_bit, sum; */
   /* We divide the inputs before the add to avoid the overflow and costly test
@@ -171,6 +303,17 @@
 }
 
 template <>
+inline int16x16_m256i RoundingHalfSum(int16x16_m256i a, int16x16_m256i b) {
+  // Borrowed from Intel's arm_neon_sse.h header.
+  __m256i constant_neg_32768 = _mm256_set1_epi16(-32768);
+  __m256i a_unsigned = _mm256_sub_epi16(a.v, constant_neg_32768);
+  __m256i b_unsigned = _mm256_sub_epi16(b.v, constant_neg_32768);
+  __m256i avg_unsigned = _mm256_avg_epu16(a_unsigned, b_unsigned);
+  __m256i avg = _mm256_add_epi16(avg_unsigned, constant_neg_32768);
+  return to_int16x16_m256i(avg);
+}
+
+template <>
 inline __m256i SaturatingRoundingDoublingHighMul(__m256i a, __m256i b) {
   __m256i min, saturation_mask, a0_a2, a1_a3, b0_b2, b1_b3;
   __m256i a0b0_a2b2, a1b1_a3b3, a0b0_a2b2_rounded, a1b1_a3b3_rounded;
@@ -209,10 +352,33 @@
 }
 
 template <>
+inline int16x16_m256i SaturatingRoundingDoublingHighMul(int16x16_m256i a,
+                                                        int16x16_m256i b) {
+  // Use _mm256_mulhrs_epi16 then saturate with a bit-operation,
+  // borrowed from Intel's arm_neon_sse.h header.
+  __m256i result_unsaturated = _mm256_mulhrs_epi16(a.v, b.v);
+  __m256i saturation_mask =
+      _mm256_cmpeq_epi16(result_unsaturated, _mm256_set1_epi16(0x8000));
+  __m256i result = _mm256_xor_si256(result_unsaturated, saturation_mask);
+  return to_int16x16_m256i(result);
+}
+
+template <>
 inline __m256i Dup<__m256i>(std::int32_t x) {
   return _mm256_set1_epi32(x);
 }
 
+template <>
+inline int16x16_m256i Dup<int16x16_m256i>(std::int16_t x) {
+  return to_int16x16_m256i(_mm256_set1_epi16(x));
+}
+
+// So far this is only needed for int16.
+template <>
+inline int16x16_m256i SaturatingAdd(int16x16_m256i a, int16x16_m256i b) {
+  return to_int16x16_m256i(_mm256_adds_epi16(a.v, b.v));
+}
+
 }  // end namespace gemmlowp
 
 #endif  // GEMMLOWP_INTERNAL_FIXEDPOINT_AVX_H_
diff --git a/fixedpoint/fixedpoint_sse.h b/fixedpoint/fixedpoint_sse.h
index a1fae32..fbaa26a 100644
--- a/fixedpoint/fixedpoint_sse.h
+++ b/fixedpoint/fixedpoint_sse.h
@@ -32,13 +32,17 @@
 // data type, int16x8_m128i, that wraps __m128i while being a separate
 // type.
 struct int16x8_m128i {
-  int16x8_m128i() {}
-  explicit int16x8_m128i(__m128i w) : v(w) {}
-  ~int16x8_m128i() {}
-
   __m128i v;
 };
 
+// Keep int16x8_m128i trivially constructible/destructible and provide
+// easily optimized helper function.
+inline int16x8_m128i to_int16x8_m128i(__m128i w) {
+  int16x8_m128i r;
+  r.v = w;
+  return r;
+}
+
 template <>
 struct FixedPointRawTypeTraits<__m128i> {
   typedef std::int32_t ScalarRawType;
@@ -58,7 +62,7 @@
 
 template <>
 inline int16x8_m128i BitAnd(int16x8_m128i a, int16x8_m128i b) {
-  return int16x8_m128i(_mm_and_si128(a.v, b.v));
+  return to_int16x8_m128i(_mm_and_si128(a.v, b.v));
 }
 
 template <>
@@ -68,7 +72,7 @@
 
 template <>
 inline int16x8_m128i BitOr(int16x8_m128i a, int16x8_m128i b) {
-  return int16x8_m128i(_mm_or_si128(a.v, b.v));
+  return to_int16x8_m128i(_mm_or_si128(a.v, b.v));
 }
 
 template <>
@@ -78,7 +82,7 @@
 
 template <>
 inline int16x8_m128i BitXor(int16x8_m128i a, int16x8_m128i b) {
-  return int16x8_m128i(_mm_xor_si128(a.v, b.v));
+  return to_int16x8_m128i(_mm_xor_si128(a.v, b.v));
 }
 
 template <>
@@ -88,7 +92,7 @@
 
 template <>
 inline int16x8_m128i BitNot(int16x8_m128i a) {
-  return int16x8_m128i(_mm_andnot_si128(a.v, _mm_set1_epi16(-1)));
+  return to_int16x8_m128i(_mm_andnot_si128(a.v, _mm_set1_epi16(-1)));
 }
 
 template <>
@@ -98,7 +102,7 @@
 
 template <>
 inline int16x8_m128i Add(int16x8_m128i a, int16x8_m128i b) {
-  return int16x8_m128i(_mm_add_epi16(a.v, b.v));
+  return to_int16x8_m128i(_mm_add_epi16(a.v, b.v));
 }
 
 template <>
@@ -108,7 +112,7 @@
 
 template <>
 inline int16x8_m128i Mul(int16x8_m128i a, int16x8_m128i b) {
-  return int16x8_m128i(_mm_mullo_epi16(a.v, b.v));
+  return to_int16x8_m128i(_mm_mullo_epi16(a.v, b.v));
 }
 
 template <>
@@ -118,7 +122,7 @@
 
 template <>
 inline int16x8_m128i Sub(int16x8_m128i a, int16x8_m128i b) {
-  return int16x8_m128i(_mm_sub_epi16(a.v, b.v));
+  return to_int16x8_m128i(_mm_sub_epi16(a.v, b.v));
 }
 
 template <>
@@ -128,7 +132,7 @@
 
 template <>
 inline int16x8_m128i Neg(int16x8_m128i a) {
-  return int16x8_m128i(_mm_sign_epi16(a.v, _mm_set1_epi16(-1)));
+  return to_int16x8_m128i(_mm_sign_epi16(a.v, _mm_set1_epi16(-1)));
 }
 
 template <>
@@ -138,7 +142,7 @@
 
 template <>
 inline int16x8_m128i ShiftLeft(int16x8_m128i a, int offset) {
-  return int16x8_m128i(_mm_slli_epi16(a.v, offset));
+  return to_int16x8_m128i(_mm_slli_epi16(a.v, offset));
 }
 
 template <>
@@ -148,7 +152,7 @@
 
 template <>
 inline int16x8_m128i ShiftRight(int16x8_m128i a, int offset) {
-  return int16x8_m128i(_mm_srai_epi16(a.v, offset));
+  return to_int16x8_m128i(_mm_srai_epi16(a.v, offset));
 }
 
 template <>
@@ -164,7 +168,7 @@
                                      int16x8_m128i then_val,
                                      int16x8_m128i else_val) {
   // borrowed from Intel's arm_neon_sse.h header.
-  return int16x8_m128i(SelectUsingMask(if_mask.v, then_val.v, else_val.v));
+  return to_int16x8_m128i(SelectUsingMask(if_mask.v, then_val.v, else_val.v));
 }
 
 template <>
@@ -174,7 +178,7 @@
 
 template <>
 inline int16x8_m128i MaskIfEqual(int16x8_m128i a, int16x8_m128i b) {
-  return int16x8_m128i(_mm_cmpeq_epi16(a.v, b.v));
+  return to_int16x8_m128i(_mm_cmpeq_epi16(a.v, b.v));
 }
 
 template <>
@@ -194,7 +198,7 @@
 
 template <>
 inline int16x8_m128i MaskIfZero(int16x8_m128i a) {
-  return MaskIfEqual(a, int16x8_m128i(_mm_set1_epi16(0)));
+  return MaskIfEqual(a, to_int16x8_m128i(_mm_set1_epi16(0)));
 }
 
 template <>
@@ -204,7 +208,7 @@
 
 template <>
 inline int16x8_m128i MaskIfNonZero(int16x8_m128i a) {
-  return MaskIfNotEqual(a, int16x8_m128i(_mm_set1_epi16(0)));
+  return MaskIfNotEqual(a, to_int16x8_m128i(_mm_set1_epi16(0)));
 }
 
 template <>
@@ -214,7 +218,7 @@
 
 template <>
 inline int16x8_m128i MaskIfGreaterThan(int16x8_m128i a, int16x8_m128i b) {
-  return int16x8_m128i(_mm_cmpgt_epi16(a.v, b.v));
+  return to_int16x8_m128i(_mm_cmpgt_epi16(a.v, b.v));
 }
 
 template <>
@@ -224,7 +228,7 @@
 
 template <>
 inline int16x8_m128i MaskIfLessThan(int16x8_m128i a, int16x8_m128i b) {
-  return int16x8_m128i(_mm_cmplt_epi16(a.v, b.v));
+  return to_int16x8_m128i(_mm_cmplt_epi16(a.v, b.v));
 }
 
 template <>
@@ -310,7 +314,7 @@
   __m128i b_unsigned = _mm_sub_epi16(b.v, constant_neg_32768);
   __m128i avg_unsigned = _mm_avg_epu16(a_unsigned, b_unsigned);
   __m128i avg = _mm_add_epi16(avg_unsigned, constant_neg_32768);
-  return int16x8_m128i(avg);
+  return to_int16x8_m128i(avg);
 }
 
 template <>
@@ -360,7 +364,7 @@
   __m128i saturation_mask =
       _mm_cmpeq_epi16(result_unsaturated, _mm_set1_epi16(0x8000));
   __m128i result = _mm_xor_si128(result_unsaturated, saturation_mask);
-  return int16x8_m128i(result);
+  return to_int16x8_m128i(result);
 }
 
 template <>
@@ -370,13 +374,13 @@
 
 template <>
 inline int16x8_m128i Dup<int16x8_m128i>(std::int16_t x) {
-  return int16x8_m128i(_mm_set1_epi16(x));
+  return to_int16x8_m128i(_mm_set1_epi16(x));
 }
 
 // So far this is only needed for int16.
 template <>
 inline int16x8_m128i SaturatingAdd(int16x8_m128i a, int16x8_m128i b) {
-  return int16x8_m128i(_mm_adds_epi16(a.v, b.v));
+  return to_int16x8_m128i(_mm_adds_epi16(a.v, b.v));
 }
 
 }  // end namespace gemmlowp
diff --git a/test/test_fixedpoint.cc b/test/test_fixedpoint.cc
index a45ec17..44e6fae 100644
--- a/test/test_fixedpoint.cc
+++ b/test/test_fixedpoint.cc
@@ -17,14 +17,14 @@
 #define GEMMLOWP_ENABLE_FIXEDPOINT_CONSTANTS_CHECKS
 
 #include <algorithm>
+#include <cinttypes>
 #include <cmath>
 #include <cstdio>
-#include <cinttypes>
 #include <random>
 #include <vector>
-#include "test.h"
 
 #include "../fixedpoint/fixedpoint.h"
+#include "test.h"
 
 namespace gemmlowp {
 
@@ -67,7 +67,8 @@
 }
 template <>
 int16x8_m128i Load<int16x8_m128i>(const std::int16_t* src) {
-  return int16x8_m128i(_mm_loadu_si128(reinterpret_cast<const __m128i*>(src)));
+  return to_int16x8_m128i(
+      _mm_loadu_si128(reinterpret_cast<const __m128i*>(src)));
 }
 template <>
 void Store<int16x8_m128i>(std::int16_t* dst, int16x8_m128i v) {
@@ -93,6 +94,29 @@
 }
 #endif
 
+#ifdef GEMMLOWP_AVX2
+template <>
+__m256i Load<__m256i>(const std::int32_t* src) {
+  return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src));
+}
+
+template <>
+int16x16_m256i Load<int16x16_m256i>(const std::int16_t* src) {
+  return to_int16x16_m256i(
+      _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src)));
+}
+
+template <>
+void Store<__m256i>(std::int32_t* dst, __m256i v) {
+  _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst), v);
+}
+
+template <>
+void Store<int16x16_m256i>(std::int16_t* dst, int16x16_m256i v) {
+  _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst), v.v);
+}
+#endif
+
 template <typename tSimdType>
 class TestFixedPoint {
  public:
@@ -571,4 +595,9 @@
   gemmlowp::TestFixedPoint<v4i32>().RunTests("MSA v4i32");
   gemmlowp::TestFixedPoint<v8i16>().RunTests("MSA v8i16");
 #endif
+#ifdef GEMMLOWP_AVX2
+  gemmlowp::TestFixedPoint<__m256i>().RunTests("AVX __m256i");
+  gemmlowp::TestFixedPoint<gemmlowp::int16x16_m256i>().RunTests(
+      "AVX2 __m256i = int16x16");
+#endif
 }