diff options
Diffstat (limited to 'src/include/glm/simd')
-rw-r--r-- | src/include/glm/simd/common.h | 240 | ||||
-rw-r--r-- | src/include/glm/simd/exponential.h | 20 | ||||
-rw-r--r-- | src/include/glm/simd/geometric.h | 124 | ||||
-rw-r--r-- | src/include/glm/simd/integer.h | 115 | ||||
-rw-r--r-- | src/include/glm/simd/matrix.h | 1028 | ||||
-rw-r--r-- | src/include/glm/simd/neon.h | 155 | ||||
-rw-r--r-- | src/include/glm/simd/packing.h | 8 | ||||
-rw-r--r-- | src/include/glm/simd/platform.h | 398 | ||||
-rw-r--r-- | src/include/glm/simd/trigonometric.h | 9 | ||||
-rw-r--r-- | src/include/glm/simd/vector_relational.h | 8 |
10 files changed, 2105 insertions, 0 deletions
diff --git a/src/include/glm/simd/common.h b/src/include/glm/simd/common.h new file mode 100644 index 0000000..9352dc6 --- /dev/null +++ b/src/include/glm/simd/common.h @@ -0,0 +1,240 @@ +/// @ref simd
+/// @file glm/simd/common.h
+
+#pragma once
+
+#include "platform.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_add(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_add_ps(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_add(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_add_ss(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sub(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_sub_ps(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sub(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_sub_ss(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_mul(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_mul_ps(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_mul(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_mul_ss(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_div_ps(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_div(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return _mm_div_ss(a, b);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_div_lowp(glm_f32vec4 a, glm_f32vec4 b)
+{
+ return glm_vec4_mul(a, _mm_rcp_ps(b));
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_swizzle_xyzw(glm_f32vec4 a)
+{
+# if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ return _mm_permute_ps(a, _MM_SHUFFLE(3, 2, 1, 0));
+# else
+ return _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 2, 1, 0));
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c)
+{
+# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG)
+ return _mm_fmadd_ss(a, b, c);
+# else
+ return _mm_add_ss(_mm_mul_ss(a, b), c);
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_fma(glm_f32vec4 a, glm_f32vec4 b, glm_f32vec4 c)
+{
+# if (GLM_ARCH & GLM_ARCH_AVX2_BIT) && !(GLM_COMPILER & GLM_COMPILER_CLANG)
+ return _mm_fmadd_ps(a, b, c);
+# else
+ return glm_vec4_add(glm_vec4_mul(a, b), c);
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_abs(glm_f32vec4 x)
+{
+ return _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x7FFFFFFF)));
+}
+
+GLM_FUNC_QUALIFIER glm_ivec4 glm_ivec4_abs(glm_ivec4 x)
+{
+# if GLM_ARCH & GLM_ARCH_SSSE3_BIT
+ return _mm_sign_epi32(x, x);
+# else
+ glm_ivec4 const sgn0 = _mm_srai_epi32(x, 31);
+ glm_ivec4 const inv0 = _mm_xor_si128(x, sgn0);
+ glm_ivec4 const sub0 = _mm_sub_epi32(inv0, sgn0);
+ return sub0;
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_sign(glm_vec4 x)
+{
+ glm_vec4 const zro0 = _mm_setzero_ps();
+ glm_vec4 const cmp0 = _mm_cmplt_ps(x, zro0);
+ glm_vec4 const cmp1 = _mm_cmpgt_ps(x, zro0);
+ glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(-1.0f));
+ glm_vec4 const and1 = _mm_and_ps(cmp1, _mm_set1_ps(1.0f));
+ glm_vec4 const or0 = _mm_or_ps(and0, and1);
+ return or0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_round(glm_vec4 x)
+{
+# if GLM_ARCH & GLM_ARCH_SSE41_BIT
+ return _mm_round_ps(x, _MM_FROUND_TO_NEAREST_INT);
+# else
+ glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000)));
+ glm_vec4 const and0 = _mm_and_ps(sgn0, x);
+ glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f));
+ glm_vec4 const add0 = glm_vec4_add(x, or0);
+ glm_vec4 const sub0 = glm_vec4_sub(add0, or0);
+ return sub0;
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_floor(glm_vec4 x)
+{
+# if GLM_ARCH & GLM_ARCH_SSE41_BIT
+ return _mm_floor_ps(x);
+# else
+ glm_vec4 const rnd0 = glm_vec4_round(x);
+ glm_vec4 const cmp0 = _mm_cmplt_ps(x, rnd0);
+ glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f));
+ glm_vec4 const sub0 = glm_vec4_sub(rnd0, and0);
+ return sub0;
+# endif
+}
+
+/* trunc TODO
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_trunc(glm_vec4 x)
+{
+ return glm_vec4();
+}
+*/
+
+//roundEven
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_roundEven(glm_vec4 x)
+{
+ glm_vec4 const sgn0 = _mm_castsi128_ps(_mm_set1_epi32(int(0x80000000)));
+ glm_vec4 const and0 = _mm_and_ps(sgn0, x);
+ glm_vec4 const or0 = _mm_or_ps(and0, _mm_set_ps1(8388608.0f));
+ glm_vec4 const add0 = glm_vec4_add(x, or0);
+ glm_vec4 const sub0 = glm_vec4_sub(add0, or0);
+ return sub0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_ceil(glm_vec4 x)
+{
+# if GLM_ARCH & GLM_ARCH_SSE41_BIT
+ return _mm_ceil_ps(x);
+# else
+ glm_vec4 const rnd0 = glm_vec4_round(x);
+ glm_vec4 const cmp0 = _mm_cmpgt_ps(x, rnd0);
+ glm_vec4 const and0 = _mm_and_ps(cmp0, _mm_set1_ps(1.0f));
+ glm_vec4 const add0 = glm_vec4_add(rnd0, and0);
+ return add0;
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_fract(glm_vec4 x)
+{
+ glm_vec4 const flr0 = glm_vec4_floor(x);
+ glm_vec4 const sub0 = glm_vec4_sub(x, flr0);
+ return sub0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mod(glm_vec4 x, glm_vec4 y)
+{
+ glm_vec4 const div0 = glm_vec4_div(x, y);
+ glm_vec4 const flr0 = glm_vec4_floor(div0);
+ glm_vec4 const mul0 = glm_vec4_mul(y, flr0);
+ glm_vec4 const sub0 = glm_vec4_sub(x, mul0);
+ return sub0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_clamp(glm_vec4 v, glm_vec4 minVal, glm_vec4 maxVal)
+{
+ glm_vec4 const min0 = _mm_min_ps(v, maxVal);
+ glm_vec4 const max0 = _mm_max_ps(min0, minVal);
+ return max0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_mix(glm_vec4 v1, glm_vec4 v2, glm_vec4 a)
+{
+ glm_vec4 const sub0 = glm_vec4_sub(_mm_set1_ps(1.0f), a);
+ glm_vec4 const mul0 = glm_vec4_mul(v1, sub0);
+ glm_vec4 const mad0 = glm_vec4_fma(v2, a, mul0);
+ return mad0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_step(glm_vec4 edge, glm_vec4 x)
+{
+ glm_vec4 const cmp = _mm_cmple_ps(x, edge);
+ return _mm_movemask_ps(cmp) == 0 ? _mm_set1_ps(1.0f) : _mm_setzero_ps();
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_smoothstep(glm_vec4 edge0, glm_vec4 edge1, glm_vec4 x)
+{
+ glm_vec4 const sub0 = glm_vec4_sub(x, edge0);
+ glm_vec4 const sub1 = glm_vec4_sub(edge1, edge0);
+ glm_vec4 const div0 = glm_vec4_sub(sub0, sub1);
+ glm_vec4 const clp0 = glm_vec4_clamp(div0, _mm_setzero_ps(), _mm_set1_ps(1.0f));
+ glm_vec4 const mul0 = glm_vec4_mul(_mm_set1_ps(2.0f), clp0);
+ glm_vec4 const sub2 = glm_vec4_sub(_mm_set1_ps(3.0f), mul0);
+ glm_vec4 const mul1 = glm_vec4_mul(clp0, clp0);
+ glm_vec4 const mul2 = glm_vec4_mul(mul1, sub2);
+ return mul2;
+}
+
+// Agner Fog method
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_nan(glm_vec4 x)
+{
+ glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
+ glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
+ glm_ivec4 const t3 = _mm_set1_epi32(int(0xFF000000)); // exponent mask
+ glm_ivec4 const t4 = _mm_and_si128(t2, t3); // exponent
+ glm_ivec4 const t5 = _mm_andnot_si128(t3, t2); // fraction
+ glm_ivec4 const Equal = _mm_cmpeq_epi32(t3, t4);
+ glm_ivec4 const Nequal = _mm_cmpeq_epi32(t5, _mm_setzero_si128());
+ glm_ivec4 const And = _mm_and_si128(Equal, Nequal);
+ return _mm_castsi128_ps(And); // exponent = all 1s and fraction != 0
+}
+
+// Agner Fog method
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_inf(glm_vec4 x)
+{
+ glm_ivec4 const t1 = _mm_castps_si128(x); // reinterpret as 32-bit integer
+ glm_ivec4 const t2 = _mm_sll_epi32(t1, _mm_cvtsi32_si128(1)); // shift out sign bit
+ return _mm_castsi128_ps(_mm_cmpeq_epi32(t2, _mm_set1_epi32(int(0xFF000000)))); // exponent is all 1s, fraction is 0
+}
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/src/include/glm/simd/exponential.h b/src/include/glm/simd/exponential.h new file mode 100644 index 0000000..006db1e --- /dev/null +++ b/src/include/glm/simd/exponential.h @@ -0,0 +1,20 @@ +/// @ref simd
+/// @file glm/simd/experimental.h
+
+#pragma once
+
+#include "platform.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec1_sqrt_lowp(glm_f32vec4 x)
+{
+ return _mm_mul_ss(_mm_rsqrt_ss(x), x);
+}
+
+GLM_FUNC_QUALIFIER glm_f32vec4 glm_vec4_sqrt_lowp(glm_f32vec4 x)
+{
+ return _mm_mul_ps(_mm_rsqrt_ps(x), x);
+}
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/src/include/glm/simd/geometric.h b/src/include/glm/simd/geometric.h new file mode 100644 index 0000000..fde57e9 --- /dev/null +++ b/src/include/glm/simd/geometric.h @@ -0,0 +1,124 @@ +/// @ref simd
+/// @file glm/simd/geometric.h
+
+#pragma once
+
+#include "common.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+GLM_FUNC_DECL glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2);
+GLM_FUNC_DECL glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2);
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_length(glm_vec4 x)
+{
+ glm_vec4 const dot0 = glm_vec4_dot(x, x);
+ glm_vec4 const sqt0 = _mm_sqrt_ps(dot0);
+ return sqt0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_distance(glm_vec4 p0, glm_vec4 p1)
+{
+ glm_vec4 const sub0 = _mm_sub_ps(p0, p1);
+ glm_vec4 const len0 = glm_vec4_length(sub0);
+ return len0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_dot(glm_vec4 v1, glm_vec4 v2)
+{
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ return _mm_dp_ps(v1, v2, 0xff);
+# elif GLM_ARCH & GLM_ARCH_SSE3_BIT
+ glm_vec4 const mul0 = _mm_mul_ps(v1, v2);
+ glm_vec4 const hadd0 = _mm_hadd_ps(mul0, mul0);
+ glm_vec4 const hadd1 = _mm_hadd_ps(hadd0, hadd0);
+ return hadd1;
+# else
+ glm_vec4 const mul0 = _mm_mul_ps(v1, v2);
+ glm_vec4 const swp0 = _mm_shuffle_ps(mul0, mul0, _MM_SHUFFLE(2, 3, 0, 1));
+ glm_vec4 const add0 = _mm_add_ps(mul0, swp0);
+ glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3));
+ glm_vec4 const add1 = _mm_add_ps(add0, swp1);
+ return add1;
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec1_dot(glm_vec4 v1, glm_vec4 v2)
+{
+# if GLM_ARCH & GLM_ARCH_AVX_BIT
+ return _mm_dp_ps(v1, v2, 0xff);
+# elif GLM_ARCH & GLM_ARCH_SSE3_BIT
+ glm_vec4 const mul0 = _mm_mul_ps(v1, v2);
+ glm_vec4 const had0 = _mm_hadd_ps(mul0, mul0);
+ glm_vec4 const had1 = _mm_hadd_ps(had0, had0);
+ return had1;
+# else
+ glm_vec4 const mul0 = _mm_mul_ps(v1, v2);
+ glm_vec4 const mov0 = _mm_movehl_ps(mul0, mul0);
+ glm_vec4 const add0 = _mm_add_ps(mov0, mul0);
+ glm_vec4 const swp1 = _mm_shuffle_ps(add0, add0, 1);
+ glm_vec4 const add1 = _mm_add_ss(add0, swp1);
+ return add1;
+# endif
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_cross(glm_vec4 v1, glm_vec4 v2)
+{
+ glm_vec4 const swp0 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 0, 2, 1));
+ glm_vec4 const swp1 = _mm_shuffle_ps(v1, v1, _MM_SHUFFLE(3, 1, 0, 2));
+ glm_vec4 const swp2 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 0, 2, 1));
+ glm_vec4 const swp3 = _mm_shuffle_ps(v2, v2, _MM_SHUFFLE(3, 1, 0, 2));
+ glm_vec4 const mul0 = _mm_mul_ps(swp0, swp3);
+ glm_vec4 const mul1 = _mm_mul_ps(swp1, swp2);
+ glm_vec4 const sub0 = _mm_sub_ps(mul0, mul1);
+ return sub0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_normalize(glm_vec4 v)
+{
+ glm_vec4 const dot0 = glm_vec4_dot(v, v);
+ glm_vec4 const isr0 = _mm_rsqrt_ps(dot0);
+ glm_vec4 const mul0 = _mm_mul_ps(v, isr0);
+ return mul0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_faceforward(glm_vec4 N, glm_vec4 I, glm_vec4 Nref)
+{
+ glm_vec4 const dot0 = glm_vec4_dot(Nref, I);
+ glm_vec4 const sgn0 = glm_vec4_sign(dot0);
+ glm_vec4 const mul0 = _mm_mul_ps(sgn0, _mm_set1_ps(-1.0f));
+ glm_vec4 const mul1 = _mm_mul_ps(N, mul0);
+ return mul1;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_vec4_reflect(glm_vec4 I, glm_vec4 N)
+{
+ glm_vec4 const dot0 = glm_vec4_dot(N, I);
+ glm_vec4 const mul0 = _mm_mul_ps(N, dot0);
+ glm_vec4 const mul1 = _mm_mul_ps(mul0, _mm_set1_ps(2.0f));
+ glm_vec4 const sub0 = _mm_sub_ps(I, mul1);
+ return sub0;
+}
+
+GLM_FUNC_QUALIFIER __m128 glm_vec4_refract(glm_vec4 I, glm_vec4 N, glm_vec4 eta)
+{
+ glm_vec4 const dot0 = glm_vec4_dot(N, I);
+ glm_vec4 const mul0 = _mm_mul_ps(eta, eta);
+ glm_vec4 const mul1 = _mm_mul_ps(dot0, dot0);
+ glm_vec4 const sub0 = _mm_sub_ps(_mm_set1_ps(1.0f), mul0);
+ glm_vec4 const sub1 = _mm_sub_ps(_mm_set1_ps(1.0f), mul1);
+ glm_vec4 const mul2 = _mm_mul_ps(sub0, sub1);
+
+ if(_mm_movemask_ps(_mm_cmplt_ss(mul2, _mm_set1_ps(0.0f))) == 0)
+ return _mm_set1_ps(0.0f);
+
+ glm_vec4 const sqt0 = _mm_sqrt_ps(mul2);
+ glm_vec4 const mad0 = glm_vec4_fma(eta, dot0, sqt0);
+ glm_vec4 const mul4 = _mm_mul_ps(mad0, N);
+ glm_vec4 const mul5 = _mm_mul_ps(eta, I);
+ glm_vec4 const sub2 = _mm_sub_ps(mul5, mul4);
+
+ return sub2;
+}
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/src/include/glm/simd/integer.h b/src/include/glm/simd/integer.h new file mode 100644 index 0000000..8bb410e --- /dev/null +++ b/src/include/glm/simd/integer.h @@ -0,0 +1,115 @@ +/// @ref simd
+/// @file glm/simd/integer.h
+
+#pragma once
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave(glm_uvec4 x)
+{
+ glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF);
+ glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF);
+ glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
+ glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333);
+ glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555);
+
+ glm_uvec4 Reg1;
+ glm_uvec4 Reg2;
+
+ // REG1 = x;
+ // REG2 = y;
+ //Reg1 = _mm_unpacklo_epi64(x, y);
+ Reg1 = x;
+
+ //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+ //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+ Reg2 = _mm_slli_si128(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask4);
+
+ //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+ //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+ Reg2 = _mm_slli_si128(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask3);
+
+ //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ Reg2 = _mm_slli_epi32(Reg1, 4);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask2);
+
+ //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
+ //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
+ Reg2 = _mm_slli_epi32(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask1);
+
+ //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
+ //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask0);
+
+ //return REG1 | (REG2 << 1);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg2 = _mm_srli_si128(Reg2, 8);
+ Reg1 = _mm_or_si128(Reg1, Reg2);
+
+ return Reg1;
+}
+
+GLM_FUNC_QUALIFIER glm_uvec4 glm_i128_interleave2(glm_uvec4 x, glm_uvec4 y)
+{
+ glm_uvec4 const Mask4 = _mm_set1_epi32(0x0000FFFF);
+ glm_uvec4 const Mask3 = _mm_set1_epi32(0x00FF00FF);
+ glm_uvec4 const Mask2 = _mm_set1_epi32(0x0F0F0F0F);
+ glm_uvec4 const Mask1 = _mm_set1_epi32(0x33333333);
+ glm_uvec4 const Mask0 = _mm_set1_epi32(0x55555555);
+
+ glm_uvec4 Reg1;
+ glm_uvec4 Reg2;
+
+ // REG1 = x;
+ // REG2 = y;
+ Reg1 = _mm_unpacklo_epi64(x, y);
+
+ //REG1 = ((REG1 << 16) | REG1) & glm::uint64(0x0000FFFF0000FFFF);
+ //REG2 = ((REG2 << 16) | REG2) & glm::uint64(0x0000FFFF0000FFFF);
+ Reg2 = _mm_slli_si128(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask4);
+
+ //REG1 = ((REG1 << 8) | REG1) & glm::uint64(0x00FF00FF00FF00FF);
+ //REG2 = ((REG2 << 8) | REG2) & glm::uint64(0x00FF00FF00FF00FF);
+ Reg2 = _mm_slli_si128(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask3);
+
+ //REG1 = ((REG1 << 4) | REG1) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ //REG2 = ((REG2 << 4) | REG2) & glm::uint64(0x0F0F0F0F0F0F0F0F);
+ Reg2 = _mm_slli_epi32(Reg1, 4);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask2);
+
+ //REG1 = ((REG1 << 2) | REG1) & glm::uint64(0x3333333333333333);
+ //REG2 = ((REG2 << 2) | REG2) & glm::uint64(0x3333333333333333);
+ Reg2 = _mm_slli_epi32(Reg1, 2);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask1);
+
+ //REG1 = ((REG1 << 1) | REG1) & glm::uint64(0x5555555555555555);
+ //REG2 = ((REG2 << 1) | REG2) & glm::uint64(0x5555555555555555);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg1 = _mm_or_si128(Reg2, Reg1);
+ Reg1 = _mm_and_si128(Reg1, Mask0);
+
+ //return REG1 | (REG2 << 1);
+ Reg2 = _mm_slli_epi32(Reg1, 1);
+ Reg2 = _mm_srli_si128(Reg2, 8);
+ Reg1 = _mm_or_si128(Reg1, Reg2);
+
+ return Reg1;
+}
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/src/include/glm/simd/matrix.h b/src/include/glm/simd/matrix.h new file mode 100644 index 0000000..9cc629b --- /dev/null +++ b/src/include/glm/simd/matrix.h @@ -0,0 +1,1028 @@ +/// @ref simd
+/// @file glm/simd/matrix.h
+
+#pragma once
+
+#include "geometric.h"
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+GLM_FUNC_QUALIFIER void glm_mat4_matrixCompMult(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4])
+{
+ out[0] = _mm_mul_ps(in1[0], in2[0]);
+ out[1] = _mm_mul_ps(in1[1], in2[1]);
+ out[2] = _mm_mul_ps(in1[2], in2[2]);
+ out[3] = _mm_mul_ps(in1[3], in2[3]);
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_add(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4])
+{
+ out[0] = _mm_add_ps(in1[0], in2[0]);
+ out[1] = _mm_add_ps(in1[1], in2[1]);
+ out[2] = _mm_add_ps(in1[2], in2[2]);
+ out[3] = _mm_add_ps(in1[3], in2[3]);
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_sub(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4])
+{
+ out[0] = _mm_sub_ps(in1[0], in2[0]);
+ out[1] = _mm_sub_ps(in1[1], in2[1]);
+ out[2] = _mm_sub_ps(in1[2], in2[2]);
+ out[3] = _mm_sub_ps(in1[3], in2[3]);
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_mul_vec4(glm_vec4 const m[4], glm_vec4 v)
+{
+ __m128 v0 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 v1 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 v2 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 v3 = _mm_shuffle_ps(v, v, _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(m[0], v0);
+ __m128 m1 = _mm_mul_ps(m[1], v1);
+ __m128 m2 = _mm_mul_ps(m[2], v2);
+ __m128 m3 = _mm_mul_ps(m[3], v3);
+
+ __m128 a0 = _mm_add_ps(m0, m1);
+ __m128 a1 = _mm_add_ps(m2, m3);
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ return a2;
+}
+
+GLM_FUNC_QUALIFIER __m128 glm_vec4_mul_mat4(glm_vec4 v, glm_vec4 const m[4])
+{
+ __m128 i0 = m[0];
+ __m128 i1 = m[1];
+ __m128 i2 = m[2];
+ __m128 i3 = m[3];
+
+ __m128 m0 = _mm_mul_ps(v, i0);
+ __m128 m1 = _mm_mul_ps(v, i1);
+ __m128 m2 = _mm_mul_ps(v, i2);
+ __m128 m3 = _mm_mul_ps(v, i3);
+
+ __m128 u0 = _mm_unpacklo_ps(m0, m1);
+ __m128 u1 = _mm_unpackhi_ps(m0, m1);
+ __m128 a0 = _mm_add_ps(u0, u1);
+
+ __m128 u2 = _mm_unpacklo_ps(m2, m3);
+ __m128 u3 = _mm_unpackhi_ps(m2, m3);
+ __m128 a1 = _mm_add_ps(u2, u3);
+
+ __m128 f0 = _mm_movelh_ps(a0, a1);
+ __m128 f1 = _mm_movehl_ps(a1, a0);
+ __m128 f2 = _mm_add_ps(f0, f1);
+
+ return f2;
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_mul(glm_vec4 const in1[4], glm_vec4 const in2[4], glm_vec4 out[4])
+{
+ {
+ __m128 e0 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 e1 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 e2 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 e3 = _mm_shuffle_ps(in2[0], in2[0], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(in1[0], e0);
+ __m128 m1 = _mm_mul_ps(in1[1], e1);
+ __m128 m2 = _mm_mul_ps(in1[2], e2);
+ __m128 m3 = _mm_mul_ps(in1[3], e3);
+
+ __m128 a0 = _mm_add_ps(m0, m1);
+ __m128 a1 = _mm_add_ps(m2, m3);
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ out[0] = a2;
+ }
+
+ {
+ __m128 e0 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 e1 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 e2 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 e3 = _mm_shuffle_ps(in2[1], in2[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(in1[0], e0);
+ __m128 m1 = _mm_mul_ps(in1[1], e1);
+ __m128 m2 = _mm_mul_ps(in1[2], e2);
+ __m128 m3 = _mm_mul_ps(in1[3], e3);
+
+ __m128 a0 = _mm_add_ps(m0, m1);
+ __m128 a1 = _mm_add_ps(m2, m3);
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ out[1] = a2;
+ }
+
+ {
+ __m128 e0 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 e1 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 e2 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 e3 = _mm_shuffle_ps(in2[2], in2[2], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(in1[0], e0);
+ __m128 m1 = _mm_mul_ps(in1[1], e1);
+ __m128 m2 = _mm_mul_ps(in1[2], e2);
+ __m128 m3 = _mm_mul_ps(in1[3], e3);
+
+ __m128 a0 = _mm_add_ps(m0, m1);
+ __m128 a1 = _mm_add_ps(m2, m3);
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ out[2] = a2;
+ }
+
+ {
+ //(__m128&)_mm_shuffle_epi32(__m128i&)in2[0], _MM_SHUFFLE(3, 3, 3, 3))
+ __m128 e0 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 e1 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 e2 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 e3 = _mm_shuffle_ps(in2[3], in2[3], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 m0 = _mm_mul_ps(in1[0], e0);
+ __m128 m1 = _mm_mul_ps(in1[1], e1);
+ __m128 m2 = _mm_mul_ps(in1[2], e2);
+ __m128 m3 = _mm_mul_ps(in1[3], e3);
+
+ __m128 a0 = _mm_add_ps(m0, m1);
+ __m128 a1 = _mm_add_ps(m2, m3);
+ __m128 a2 = _mm_add_ps(a0, a1);
+
+ out[3] = a2;
+ }
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_transpose(glm_vec4 const in[4], glm_vec4 out[4])
+{
+ __m128 tmp0 = _mm_shuffle_ps(in[0], in[1], 0x44);
+ __m128 tmp2 = _mm_shuffle_ps(in[0], in[1], 0xEE);
+ __m128 tmp1 = _mm_shuffle_ps(in[2], in[3], 0x44);
+ __m128 tmp3 = _mm_shuffle_ps(in[2], in[3], 0xEE);
+
+ out[0] = _mm_shuffle_ps(tmp0, tmp1, 0x88);
+ out[1] = _mm_shuffle_ps(tmp0, tmp1, 0xDD);
+ out[2] = _mm_shuffle_ps(tmp2, tmp3, 0x88);
+ out[3] = _mm_shuffle_ps(tmp2, tmp3, 0xDD);
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_highp(glm_vec4 const in[4])
+{
+ __m128 Fac0;
+ {
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
+ // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac0 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac1;
+ {
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
+ // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac1 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+
+ __m128 Fac2;
+ {
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
+ // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac2 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac3;
+ {
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
+ // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac3 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac4;
+ {
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
+ // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac4 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac5;
+ {
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
+ // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac5 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
+ __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
+
+ // m[1][0]
+ // m[0][0]
+ // m[0][0]
+ // m[0][0]
+ __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][1]
+ // m[0][1]
+ // m[0][1]
+ // m[0][1]
+ __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][2]
+ // m[0][2]
+ // m[0][2]
+ // m[0][2]
+ __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][3]
+ // m[0][3]
+ // m[0][3]
+ // m[0][3]
+ __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // col0
+ // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]),
+ // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]),
+ // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]),
+ // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]),
+ __m128 Mul00 = _mm_mul_ps(Vec1, Fac0);
+ __m128 Mul01 = _mm_mul_ps(Vec2, Fac1);
+ __m128 Mul02 = _mm_mul_ps(Vec3, Fac2);
+ __m128 Sub00 = _mm_sub_ps(Mul00, Mul01);
+ __m128 Add00 = _mm_add_ps(Sub00, Mul02);
+ __m128 Inv0 = _mm_mul_ps(SignB, Add00);
+
+ // col1
+ // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]),
+ // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]),
+ // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]),
+ // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]),
+ __m128 Mul03 = _mm_mul_ps(Vec0, Fac0);
+ __m128 Mul04 = _mm_mul_ps(Vec2, Fac3);
+ __m128 Mul05 = _mm_mul_ps(Vec3, Fac4);
+ __m128 Sub01 = _mm_sub_ps(Mul03, Mul04);
+ __m128 Add01 = _mm_add_ps(Sub01, Mul05);
+ __m128 Inv1 = _mm_mul_ps(SignA, Add01);
+
+ // col2
+ // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]),
+ // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]),
+ // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]),
+ // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]),
+ __m128 Mul06 = _mm_mul_ps(Vec0, Fac1);
+ __m128 Mul07 = _mm_mul_ps(Vec1, Fac3);
+ __m128 Mul08 = _mm_mul_ps(Vec3, Fac5);
+ __m128 Sub02 = _mm_sub_ps(Mul06, Mul07);
+ __m128 Add02 = _mm_add_ps(Sub02, Mul08);
+ __m128 Inv2 = _mm_mul_ps(SignB, Add02);
+
+ // col3
+ // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]),
+ // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]),
+ // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]),
+ // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3]));
+ __m128 Mul09 = _mm_mul_ps(Vec0, Fac2);
+ __m128 Mul10 = _mm_mul_ps(Vec1, Fac4);
+ __m128 Mul11 = _mm_mul_ps(Vec2, Fac5);
+ __m128 Sub03 = _mm_sub_ps(Mul09, Mul10);
+ __m128 Add03 = _mm_add_ps(Sub03, Mul11);
+ __m128 Inv3 = _mm_mul_ps(SignA, Add03);
+
+ __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
+
+ // valType Determinant = m[0][0] * Inverse[0][0]
+ // + m[0][1] * Inverse[1][0]
+ // + m[0][2] * Inverse[2][0]
+ // + m[0][3] * Inverse[3][0];
+ __m128 Det0 = glm_vec4_dot(in[0], Row2);
+ return Det0;
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant_lowp(glm_vec4 const m[4])
+{
+ // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(
+
+ //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+
+ // First 2 columns
+ __m128 Swp2A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 1, 1, 2)));
+ __m128 Swp3A = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(3, 2, 3, 3)));
+ __m128 MulA = _mm_mul_ps(Swp2A, Swp3A);
+
+ // Second 2 columns
+ __m128 Swp2B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(3, 2, 3, 3)));
+ __m128 Swp3B = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(0, 1, 1, 2)));
+ __m128 MulB = _mm_mul_ps(Swp2B, Swp3B);
+
+ // Columns subtraction
+ __m128 SubE = _mm_sub_ps(MulA, MulB);
+
+ // Last 2 rows
+ __m128 Swp2C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[2]), _MM_SHUFFLE(0, 0, 1, 2)));
+ __m128 Swp3C = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[3]), _MM_SHUFFLE(1, 2, 0, 0)));
+ __m128 MulC = _mm_mul_ps(Swp2C, Swp3C);
+ __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC);
+
+ //vec<4, T, Q> DetCof(
+ // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02),
+ // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04),
+ // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05),
+ // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05));
+
+ __m128 SubFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubE), _MM_SHUFFLE(2, 1, 0, 0)));
+ __m128 SwpFacA = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(0, 0, 0, 1)));
+ __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA);
+
+ __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1));
+ __m128 SubFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpB), _MM_SHUFFLE(3, 1, 1, 0)));//SubF[0], SubE[3], SubE[3], SubE[1];
+ __m128 SwpFacB = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(1, 1, 2, 2)));
+ __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB);
+
+ __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB);
+
+ __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2));
+ __m128 SubFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(SubTmpC), _MM_SHUFFLE(3, 3, 2, 0)));
+ __m128 SwpFacC = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(m[1]), _MM_SHUFFLE(2, 3, 3, 3)));
+ __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC);
+
+ __m128 AddRes = _mm_add_ps(SubRes, MulFacC);
+ __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f));
+
+ //return m[0][0] * DetCof[0]
+ // + m[0][1] * DetCof[1]
+ // + m[0][2] * DetCof[2]
+ // + m[0][3] * DetCof[3];
+
+ return glm_vec4_dot(m[0], DetCof);
+}
+
+GLM_FUNC_QUALIFIER glm_vec4 glm_mat4_determinant(glm_vec4 const m[4])
+{
+ // _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(add)
+
+ //T SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ //T SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ //T SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ //T SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ //T SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ //T SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+
+ // First 2 columns
+ __m128 Swp2A = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 1, 1, 2));
+ __m128 Swp3A = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(3, 2, 3, 3));
+ __m128 MulA = _mm_mul_ps(Swp2A, Swp3A);
+
+ // Second 2 columns
+ __m128 Swp2B = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(3, 2, 3, 3));
+ __m128 Swp3B = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(0, 1, 1, 2));
+ __m128 MulB = _mm_mul_ps(Swp2B, Swp3B);
+
+ // Columns subtraction
+ __m128 SubE = _mm_sub_ps(MulA, MulB);
+
+ // Last 2 rows
+ __m128 Swp2C = _mm_shuffle_ps(m[2], m[2], _MM_SHUFFLE(0, 0, 1, 2));
+ __m128 Swp3C = _mm_shuffle_ps(m[3], m[3], _MM_SHUFFLE(1, 2, 0, 0));
+ __m128 MulC = _mm_mul_ps(Swp2C, Swp3C);
+ __m128 SubF = _mm_sub_ps(_mm_movehl_ps(MulC, MulC), MulC);
+
+ //vec<4, T, Q> DetCof(
+ // + (m[1][1] * SubFactor00 - m[1][2] * SubFactor01 + m[1][3] * SubFactor02),
+ // - (m[1][0] * SubFactor00 - m[1][2] * SubFactor03 + m[1][3] * SubFactor04),
+ // + (m[1][0] * SubFactor01 - m[1][1] * SubFactor03 + m[1][3] * SubFactor05),
+ // - (m[1][0] * SubFactor02 - m[1][1] * SubFactor04 + m[1][2] * SubFactor05));
+
+ __m128 SubFacA = _mm_shuffle_ps(SubE, SubE, _MM_SHUFFLE(2, 1, 0, 0));
+ __m128 SwpFacA = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(0, 0, 0, 1));
+ __m128 MulFacA = _mm_mul_ps(SwpFacA, SubFacA);
+
+ __m128 SubTmpB = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(0, 0, 3, 1));
+ __m128 SubFacB = _mm_shuffle_ps(SubTmpB, SubTmpB, _MM_SHUFFLE(3, 1, 1, 0));//SubF[0], SubE[3], SubE[3], SubE[1];
+ __m128 SwpFacB = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(1, 1, 2, 2));
+ __m128 MulFacB = _mm_mul_ps(SwpFacB, SubFacB);
+
+ __m128 SubRes = _mm_sub_ps(MulFacA, MulFacB);
+
+ __m128 SubTmpC = _mm_shuffle_ps(SubE, SubF, _MM_SHUFFLE(1, 0, 2, 2));
+ __m128 SubFacC = _mm_shuffle_ps(SubTmpC, SubTmpC, _MM_SHUFFLE(3, 3, 2, 0));
+ __m128 SwpFacC = _mm_shuffle_ps(m[1], m[1], _MM_SHUFFLE(2, 3, 3, 3));
+ __m128 MulFacC = _mm_mul_ps(SwpFacC, SubFacC);
+
+ __m128 AddRes = _mm_add_ps(SubRes, MulFacC);
+ __m128 DetCof = _mm_mul_ps(AddRes, _mm_setr_ps( 1.0f,-1.0f, 1.0f,-1.0f));
+
+ //return m[0][0] * DetCof[0]
+ // + m[0][1] * DetCof[1]
+ // + m[0][2] * DetCof[2]
+ // + m[0][3] * DetCof[3];
+
+ return glm_vec4_dot(m[0], DetCof);
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_inverse(glm_vec4 const in[4], glm_vec4 out[4])
+{
+ __m128 Fac0;
+ {
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
+ // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac0 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac1;
+ {
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
+ // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac1 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+
+ __m128 Fac2;
+ {
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
+ // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac2 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac3;
+ {
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
+ // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac3 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac4;
+ {
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
+ // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac4 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac5;
+ {
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
+ // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac5 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
+ __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
+
+ // m[1][0]
+ // m[0][0]
+ // m[0][0]
+ // m[0][0]
+ __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][1]
+ // m[0][1]
+ // m[0][1]
+ // m[0][1]
+ __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][2]
+ // m[0][2]
+ // m[0][2]
+ // m[0][2]
+ __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][3]
+ // m[0][3]
+ // m[0][3]
+ // m[0][3]
+ __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // col0
+ // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]),
+ // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]),
+ // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]),
+ // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]),
+ __m128 Mul00 = _mm_mul_ps(Vec1, Fac0);
+ __m128 Mul01 = _mm_mul_ps(Vec2, Fac1);
+ __m128 Mul02 = _mm_mul_ps(Vec3, Fac2);
+ __m128 Sub00 = _mm_sub_ps(Mul00, Mul01);
+ __m128 Add00 = _mm_add_ps(Sub00, Mul02);
+ __m128 Inv0 = _mm_mul_ps(SignB, Add00);
+
+ // col1
+ // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]),
+ // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]),
+ // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]),
+ // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]),
+ __m128 Mul03 = _mm_mul_ps(Vec0, Fac0);
+ __m128 Mul04 = _mm_mul_ps(Vec2, Fac3);
+ __m128 Mul05 = _mm_mul_ps(Vec3, Fac4);
+ __m128 Sub01 = _mm_sub_ps(Mul03, Mul04);
+ __m128 Add01 = _mm_add_ps(Sub01, Mul05);
+ __m128 Inv1 = _mm_mul_ps(SignA, Add01);
+
+ // col2
+ // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]),
+ // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]),
+ // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]),
+ // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]),
+ __m128 Mul06 = _mm_mul_ps(Vec0, Fac1);
+ __m128 Mul07 = _mm_mul_ps(Vec1, Fac3);
+ __m128 Mul08 = _mm_mul_ps(Vec3, Fac5);
+ __m128 Sub02 = _mm_sub_ps(Mul06, Mul07);
+ __m128 Add02 = _mm_add_ps(Sub02, Mul08);
+ __m128 Inv2 = _mm_mul_ps(SignB, Add02);
+
+ // col3
+ // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]),
+ // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]),
+ // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]),
+ // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3]));
+ __m128 Mul09 = _mm_mul_ps(Vec0, Fac2);
+ __m128 Mul10 = _mm_mul_ps(Vec1, Fac4);
+ __m128 Mul11 = _mm_mul_ps(Vec2, Fac5);
+ __m128 Sub03 = _mm_sub_ps(Mul09, Mul10);
+ __m128 Add03 = _mm_add_ps(Sub03, Mul11);
+ __m128 Inv3 = _mm_mul_ps(SignA, Add03);
+
+ __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
+
+ // valType Determinant = m[0][0] * Inverse[0][0]
+ // + m[0][1] * Inverse[1][0]
+ // + m[0][2] * Inverse[2][0]
+ // + m[0][3] * Inverse[3][0];
+ __m128 Det0 = glm_vec4_dot(in[0], Row2);
+ __m128 Rcp0 = _mm_div_ps(_mm_set1_ps(1.0f), Det0);
+ //__m128 Rcp0 = _mm_rcp_ps(Det0);
+
+ // Inverse /= Determinant;
+ out[0] = _mm_mul_ps(Inv0, Rcp0);
+ out[1] = _mm_mul_ps(Inv1, Rcp0);
+ out[2] = _mm_mul_ps(Inv2, Rcp0);
+ out[3] = _mm_mul_ps(Inv3, Rcp0);
+}
+
+GLM_FUNC_QUALIFIER void glm_mat4_inverse_lowp(glm_vec4 const in[4], glm_vec4 out[4])
+{
+ __m128 Fac0;
+ {
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor00 = m[2][2] * m[3][3] - m[3][2] * m[2][3];
+ // valType SubFactor06 = m[1][2] * m[3][3] - m[3][2] * m[1][3];
+ // valType SubFactor13 = m[1][2] * m[2][3] - m[2][2] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac0 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac1;
+ {
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor01 = m[2][1] * m[3][3] - m[3][1] * m[2][3];
+ // valType SubFactor07 = m[1][1] * m[3][3] - m[3][1] * m[1][3];
+ // valType SubFactor14 = m[1][1] * m[2][3] - m[2][1] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac1 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+
+ __m128 Fac2;
+ {
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor02 = m[2][1] * m[3][2] - m[3][1] * m[2][2];
+ // valType SubFactor08 = m[1][1] * m[3][2] - m[3][1] * m[1][2];
+ // valType SubFactor15 = m[1][1] * m[2][2] - m[2][1] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac2 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac3;
+ {
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor03 = m[2][0] * m[3][3] - m[3][0] * m[2][3];
+ // valType SubFactor09 = m[1][0] * m[3][3] - m[3][0] * m[1][3];
+ // valType SubFactor16 = m[1][0] * m[2][3] - m[2][0] * m[1][3];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(3, 3, 3, 3));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac3 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac4;
+ {
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor04 = m[2][0] * m[3][2] - m[3][0] * m[2][2];
+ // valType SubFactor10 = m[1][0] * m[3][2] - m[3][0] * m[1][2];
+ // valType SubFactor17 = m[1][0] * m[2][2] - m[2][0] * m[1][2];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(2, 2, 2, 2));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac4 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 Fac5;
+ {
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor05 = m[2][0] * m[3][1] - m[3][0] * m[2][1];
+ // valType SubFactor12 = m[1][0] * m[3][1] - m[3][0] * m[1][1];
+ // valType SubFactor18 = m[1][0] * m[2][1] - m[2][0] * m[1][1];
+
+ __m128 Swp0a = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Swp0b = _mm_shuffle_ps(in[3], in[2], _MM_SHUFFLE(0, 0, 0, 0));
+
+ __m128 Swp00 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Swp01 = _mm_shuffle_ps(Swp0a, Swp0a, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp02 = _mm_shuffle_ps(Swp0b, Swp0b, _MM_SHUFFLE(2, 0, 0, 0));
+ __m128 Swp03 = _mm_shuffle_ps(in[2], in[1], _MM_SHUFFLE(1, 1, 1, 1));
+
+ __m128 Mul00 = _mm_mul_ps(Swp00, Swp01);
+ __m128 Mul01 = _mm_mul_ps(Swp02, Swp03);
+ Fac5 = _mm_sub_ps(Mul00, Mul01);
+ }
+
+ __m128 SignA = _mm_set_ps( 1.0f,-1.0f, 1.0f,-1.0f);
+ __m128 SignB = _mm_set_ps(-1.0f, 1.0f,-1.0f, 1.0f);
+
+ // m[1][0]
+ // m[0][0]
+ // m[0][0]
+ // m[0][0]
+ __m128 Temp0 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Vec0 = _mm_shuffle_ps(Temp0, Temp0, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][1]
+ // m[0][1]
+ // m[0][1]
+ // m[0][1]
+ __m128 Temp1 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 Vec1 = _mm_shuffle_ps(Temp1, Temp1, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][2]
+ // m[0][2]
+ // m[0][2]
+ // m[0][2]
+ __m128 Temp2 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 Vec2 = _mm_shuffle_ps(Temp2, Temp2, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // m[1][3]
+ // m[0][3]
+ // m[0][3]
+ // m[0][3]
+ __m128 Temp3 = _mm_shuffle_ps(in[1], in[0], _MM_SHUFFLE(3, 3, 3, 3));
+ __m128 Vec3 = _mm_shuffle_ps(Temp3, Temp3, _MM_SHUFFLE(2, 2, 2, 0));
+
+ // col0
+ // + (Vec1[0] * Fac0[0] - Vec2[0] * Fac1[0] + Vec3[0] * Fac2[0]),
+ // - (Vec1[1] * Fac0[1] - Vec2[1] * Fac1[1] + Vec3[1] * Fac2[1]),
+ // + (Vec1[2] * Fac0[2] - Vec2[2] * Fac1[2] + Vec3[2] * Fac2[2]),
+ // - (Vec1[3] * Fac0[3] - Vec2[3] * Fac1[3] + Vec3[3] * Fac2[3]),
+ __m128 Mul00 = _mm_mul_ps(Vec1, Fac0);
+ __m128 Mul01 = _mm_mul_ps(Vec2, Fac1);
+ __m128 Mul02 = _mm_mul_ps(Vec3, Fac2);
+ __m128 Sub00 = _mm_sub_ps(Mul00, Mul01);
+ __m128 Add00 = _mm_add_ps(Sub00, Mul02);
+ __m128 Inv0 = _mm_mul_ps(SignB, Add00);
+
+ // col1
+ // - (Vec0[0] * Fac0[0] - Vec2[0] * Fac3[0] + Vec3[0] * Fac4[0]),
+ // + (Vec0[0] * Fac0[1] - Vec2[1] * Fac3[1] + Vec3[1] * Fac4[1]),
+ // - (Vec0[0] * Fac0[2] - Vec2[2] * Fac3[2] + Vec3[2] * Fac4[2]),
+ // + (Vec0[0] * Fac0[3] - Vec2[3] * Fac3[3] + Vec3[3] * Fac4[3]),
+ __m128 Mul03 = _mm_mul_ps(Vec0, Fac0);
+ __m128 Mul04 = _mm_mul_ps(Vec2, Fac3);
+ __m128 Mul05 = _mm_mul_ps(Vec3, Fac4);
+ __m128 Sub01 = _mm_sub_ps(Mul03, Mul04);
+ __m128 Add01 = _mm_add_ps(Sub01, Mul05);
+ __m128 Inv1 = _mm_mul_ps(SignA, Add01);
+
+ // col2
+ // + (Vec0[0] * Fac1[0] - Vec1[0] * Fac3[0] + Vec3[0] * Fac5[0]),
+ // - (Vec0[0] * Fac1[1] - Vec1[1] * Fac3[1] + Vec3[1] * Fac5[1]),
+ // + (Vec0[0] * Fac1[2] - Vec1[2] * Fac3[2] + Vec3[2] * Fac5[2]),
+ // - (Vec0[0] * Fac1[3] - Vec1[3] * Fac3[3] + Vec3[3] * Fac5[3]),
+ __m128 Mul06 = _mm_mul_ps(Vec0, Fac1);
+ __m128 Mul07 = _mm_mul_ps(Vec1, Fac3);
+ __m128 Mul08 = _mm_mul_ps(Vec3, Fac5);
+ __m128 Sub02 = _mm_sub_ps(Mul06, Mul07);
+ __m128 Add02 = _mm_add_ps(Sub02, Mul08);
+ __m128 Inv2 = _mm_mul_ps(SignB, Add02);
+
+ // col3
+ // - (Vec1[0] * Fac2[0] - Vec1[0] * Fac4[0] + Vec2[0] * Fac5[0]),
+ // + (Vec1[0] * Fac2[1] - Vec1[1] * Fac4[1] + Vec2[1] * Fac5[1]),
+ // - (Vec1[0] * Fac2[2] - Vec1[2] * Fac4[2] + Vec2[2] * Fac5[2]),
+ // + (Vec1[0] * Fac2[3] - Vec1[3] * Fac4[3] + Vec2[3] * Fac5[3]));
+ __m128 Mul09 = _mm_mul_ps(Vec0, Fac2);
+ __m128 Mul10 = _mm_mul_ps(Vec1, Fac4);
+ __m128 Mul11 = _mm_mul_ps(Vec2, Fac5);
+ __m128 Sub03 = _mm_sub_ps(Mul09, Mul10);
+ __m128 Add03 = _mm_add_ps(Sub03, Mul11);
+ __m128 Inv3 = _mm_mul_ps(SignA, Add03);
+
+ __m128 Row0 = _mm_shuffle_ps(Inv0, Inv1, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row1 = _mm_shuffle_ps(Inv2, Inv3, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Row2 = _mm_shuffle_ps(Row0, Row1, _MM_SHUFFLE(2, 0, 2, 0));
+
+ // valType Determinant = m[0][0] * Inverse[0][0]
+ // + m[0][1] * Inverse[1][0]
+ // + m[0][2] * Inverse[2][0]
+ // + m[0][3] * Inverse[3][0];
+ __m128 Det0 = glm_vec4_dot(in[0], Row2);
+ __m128 Rcp0 = _mm_rcp_ps(Det0);
+ //__m128 Rcp0 = _mm_div_ps(one, Det0);
+ // Inverse /= Determinant;
+ out[0] = _mm_mul_ps(Inv0, Rcp0);
+ out[1] = _mm_mul_ps(Inv1, Rcp0);
+ out[2] = _mm_mul_ps(Inv2, Rcp0);
+ out[3] = _mm_mul_ps(Inv3, Rcp0);
+}
+/*
+GLM_FUNC_QUALIFIER void glm_mat4_rotate(__m128 const in[4], float Angle, float const v[3], __m128 out[4])
+{
+ float a = glm::radians(Angle);
+ float c = cos(a);
+ float s = sin(a);
+
+ glm::vec4 AxisA(v[0], v[1], v[2], float(0));
+ __m128 AxisB = _mm_set_ps(AxisA.w, AxisA.z, AxisA.y, AxisA.x);
+ __m128 AxisC = detail::sse_nrm_ps(AxisB);
+
+ __m128 Cos0 = _mm_set_ss(c);
+ __m128 CosA = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 Sin0 = _mm_set_ss(s);
+ __m128 SinA = _mm_shuffle_ps(Sin0, Sin0, _MM_SHUFFLE(0, 0, 0, 0));
+
+ // vec<3, T, Q> temp = (valType(1) - c) * axis;
+ __m128 Temp0 = _mm_sub_ps(one, CosA);
+ __m128 Temp1 = _mm_mul_ps(Temp0, AxisC);
+
+ //Rotate[0][0] = c + temp[0] * axis[0];
+ //Rotate[0][1] = 0 + temp[0] * axis[1] + s * axis[2];
+ //Rotate[0][2] = 0 + temp[0] * axis[2] - s * axis[1];
+ __m128 Axis0 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(0, 0, 0, 0));
+ __m128 TmpA0 = _mm_mul_ps(Axis0, AxisC);
+ __m128 CosA0 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 1, 0));
+ __m128 TmpA1 = _mm_add_ps(CosA0, TmpA0);
+ __m128 SinA0 = SinA;//_mm_set_ps(0.0f, s, -s, 0.0f);
+ __m128 TmpA2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 1, 2, 3));
+ __m128 TmpA3 = _mm_mul_ps(SinA0, TmpA2);
+ __m128 TmpA4 = _mm_add_ps(TmpA1, TmpA3);
+
+ //Rotate[1][0] = 0 + temp[1] * axis[0] - s * axis[2];
+ //Rotate[1][1] = c + temp[1] * axis[1];
+ //Rotate[1][2] = 0 + temp[1] * axis[2] + s * axis[0];
+ __m128 Axis1 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(1, 1, 1, 1));
+ __m128 TmpB0 = _mm_mul_ps(Axis1, AxisC);
+ __m128 CosA1 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 1, 0, 1));
+ __m128 TmpB1 = _mm_add_ps(CosA1, TmpB0);
+ __m128 SinB0 = SinA;//_mm_set_ps(-s, 0.0f, s, 0.0f);
+ __m128 TmpB2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 0, 3, 2));
+ __m128 TmpB3 = _mm_mul_ps(SinA0, TmpB2);
+ __m128 TmpB4 = _mm_add_ps(TmpB1, TmpB3);
+
+ //Rotate[2][0] = 0 + temp[2] * axis[0] + s * axis[1];
+ //Rotate[2][1] = 0 + temp[2] * axis[1] - s * axis[0];
+ //Rotate[2][2] = c + temp[2] * axis[2];
+ __m128 Axis2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(2, 2, 2, 2));
+ __m128 TmpC0 = _mm_mul_ps(Axis2, AxisC);
+ __m128 CosA2 = _mm_shuffle_ps(Cos0, Cos0, _MM_SHUFFLE(1, 0, 1, 1));
+ __m128 TmpC1 = _mm_add_ps(CosA2, TmpC0);
+ __m128 SinC0 = SinA;//_mm_set_ps(s, -s, 0.0f, 0.0f);
+ __m128 TmpC2 = _mm_shuffle_ps(AxisC, AxisC, _MM_SHUFFLE(3, 3, 0, 1));
+ __m128 TmpC3 = _mm_mul_ps(SinA0, TmpC2);
+ __m128 TmpC4 = _mm_add_ps(TmpC1, TmpC3);
+
+ __m128 Result[4];
+ Result[0] = TmpA4;
+ Result[1] = TmpB4;
+ Result[2] = TmpC4;
+ Result[3] = _mm_set_ps(1, 0, 0, 0);
+
+ //mat<4, 4, valType> Result;
+ //Result[0] = m[0] * Rotate[0][0] + m[1] * Rotate[0][1] + m[2] * Rotate[0][2];
+ //Result[1] = m[0] * Rotate[1][0] + m[1] * Rotate[1][1] + m[2] * Rotate[1][2];
+ //Result[2] = m[0] * Rotate[2][0] + m[1] * Rotate[2][1] + m[2] * Rotate[2][2];
+ //Result[3] = m[3];
+ //return Result;
+ sse_mul_ps(in, Result, out);
+}
+*/
+GLM_FUNC_QUALIFIER void glm_mat4_outerProduct(__m128 const& c, __m128 const& r, __m128 out[4])
+{
+ out[0] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(0, 0, 0, 0)));
+ out[1] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(1, 1, 1, 1)));
+ out[2] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(2, 2, 2, 2)));
+ out[3] = _mm_mul_ps(c, _mm_shuffle_ps(r, r, _MM_SHUFFLE(3, 3, 3, 3)));
+}
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/src/include/glm/simd/neon.h b/src/include/glm/simd/neon.h new file mode 100644 index 0000000..b2c2e8d --- /dev/null +++ b/src/include/glm/simd/neon.h @@ -0,0 +1,155 @@ +/// @ref simd_neon
+/// @file glm/simd/neon.h
+
+#pragma once
+
+#if GLM_ARCH & GLM_ARCH_NEON_BIT
+#include <arm_neon.h>
+
+namespace glm {
+ namespace neon {
+ static float32x4_t dupq_lane(float32x4_t vsrc, int lane) {
+ switch(lane) {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ case 0: return vdupq_laneq_f32(vsrc, 0);
+ case 1: return vdupq_laneq_f32(vsrc, 1);
+ case 2: return vdupq_laneq_f32(vsrc, 2);
+ case 3: return vdupq_laneq_f32(vsrc, 3);
+#else
+ case 0: return vdupq_n_f32(vgetq_lane_f32(vsrc, 0));
+ case 1: return vdupq_n_f32(vgetq_lane_f32(vsrc, 1));
+ case 2: return vdupq_n_f32(vgetq_lane_f32(vsrc, 2));
+ case 3: return vdupq_n_f32(vgetq_lane_f32(vsrc, 3));
+#endif
+ }
+ assert(!"Unreachable code executed!");
+ return vdupq_n_f32(0.0f);
+ }
+
+ static float32x2_t dup_lane(float32x4_t vsrc, int lane) {
+ switch(lane) {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ case 0: return vdup_laneq_f32(vsrc, 0);
+ case 1: return vdup_laneq_f32(vsrc, 1);
+ case 2: return vdup_laneq_f32(vsrc, 2);
+ case 3: return vdup_laneq_f32(vsrc, 3);
+#else
+ case 0: return vdup_n_f32(vgetq_lane_f32(vsrc, 0));
+ case 1: return vdup_n_f32(vgetq_lane_f32(vsrc, 1));
+ case 2: return vdup_n_f32(vgetq_lane_f32(vsrc, 2));
+ case 3: return vdup_n_f32(vgetq_lane_f32(vsrc, 3));
+#endif
+ }
+ assert(!"Unreachable code executed!");
+ return vdup_n_f32(0.0f);
+ }
+
+ static float32x4_t copy_lane(float32x4_t vdst, int dlane, float32x4_t vsrc, int slane) {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ switch(dlane) {
+ case 0:
+ switch(slane) {
+ case 0: return vcopyq_laneq_f32(vdst, 0, vsrc, 0);
+ case 1: return vcopyq_laneq_f32(vdst, 0, vsrc, 1);
+ case 2: return vcopyq_laneq_f32(vdst, 0, vsrc, 2);
+ case 3: return vcopyq_laneq_f32(vdst, 0, vsrc, 3);
+ }
+ assert(!"Unreachable code executed!");
+ case 1:
+ switch(slane) {
+ case 0: return vcopyq_laneq_f32(vdst, 1, vsrc, 0);
+ case 1: return vcopyq_laneq_f32(vdst, 1, vsrc, 1);
+ case 2: return vcopyq_laneq_f32(vdst, 1, vsrc, 2);
+ case 3: return vcopyq_laneq_f32(vdst, 1, vsrc, 3);
+ }
+ assert(!"Unreachable code executed!");
+ case 2:
+ switch(slane) {
+ case 0: return vcopyq_laneq_f32(vdst, 2, vsrc, 0);
+ case 1: return vcopyq_laneq_f32(vdst, 2, vsrc, 1);
+ case 2: return vcopyq_laneq_f32(vdst, 2, vsrc, 2);
+ case 3: return vcopyq_laneq_f32(vdst, 2, vsrc, 3);
+ }
+ assert(!"Unreachable code executed!");
+ case 3:
+ switch(slane) {
+ case 0: return vcopyq_laneq_f32(vdst, 3, vsrc, 0);
+ case 1: return vcopyq_laneq_f32(vdst, 3, vsrc, 1);
+ case 2: return vcopyq_laneq_f32(vdst, 3, vsrc, 2);
+ case 3: return vcopyq_laneq_f32(vdst, 3, vsrc, 3);
+ }
+ assert(!"Unreachable code executed!");
+ }
+#else
+
+ float l;
+ switch(slane) {
+ case 0: l = vgetq_lane_f32(vsrc, 0); break;
+ case 1: l = vgetq_lane_f32(vsrc, 1); break;
+ case 2: l = vgetq_lane_f32(vsrc, 2); break;
+ case 3: l = vgetq_lane_f32(vsrc, 3); break;
+ default:
+ assert(!"Unreachable code executed!");
+ }
+ switch(dlane) {
+ case 0: return vsetq_lane_f32(l, vdst, 0);
+ case 1: return vsetq_lane_f32(l, vdst, 1);
+ case 2: return vsetq_lane_f32(l, vdst, 2);
+ case 3: return vsetq_lane_f32(l, vdst, 3);
+ }
+#endif
+ assert(!"Unreachable code executed!");
+ return vdupq_n_f32(0.0f);
+ }
+
+ static float32x4_t mul_lane(float32x4_t v, float32x4_t vlane, int lane) {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+ switch(lane) {
+ case 0: return vmulq_laneq_f32(v, vlane, 0); break;
+ case 1: return vmulq_laneq_f32(v, vlane, 1); break;
+ case 2: return vmulq_laneq_f32(v, vlane, 2); break;
+ case 3: return vmulq_laneq_f32(v, vlane, 3); break;
+ default:
+ assert(!"Unreachable code executed!");
+ }
+ assert(!"Unreachable code executed!");
+ return vdupq_n_f32(0.0f);
+#else
+ return vmulq_f32(v, dupq_lane(vlane, lane));
+#endif
+ }
+
+ static float32x4_t madd_lane(float32x4_t acc, float32x4_t v, float32x4_t vlane, int lane) {
+#if GLM_ARCH & GLM_ARCH_ARMV8_BIT
+#ifdef GLM_CONFIG_FORCE_FMA
+# define FMADD_LANE(acc, x, y, L) do { asm volatile ("fmla %0.4s, %1.4s, %2.4s" : "+w"(acc) : "w"(x), "w"(dup_lane(y, L))); } while(0)
+#else
+# define FMADD_LANE(acc, x, y, L) do { acc = vmlaq_laneq_f32(acc, x, y, L); } while(0)
+#endif
+
+ switch(lane) {
+ case 0:
+ FMADD_LANE(acc, v, vlane, 0);
+ return acc;
+ case 1:
+ FMADD_LANE(acc, v, vlane, 1);
+ return acc;
+ case 2:
+ FMADD_LANE(acc, v, vlane, 2);
+ return acc;
+ case 3:
+ FMADD_LANE(acc, v, vlane, 3);
+ return acc;
+ default:
+ assert(!"Unreachable code executed!");
+ }
+ assert(!"Unreachable code executed!");
+ return vdupq_n_f32(0.0f);
+# undef FMADD_LANE
+#else
+ return vaddq_f32(acc, vmulq_f32(v, dupq_lane(vlane, lane)));
+#endif
+ }
+ } //namespace neon
+} // namespace glm
+#endif // GLM_ARCH & GLM_ARCH_NEON_BIT
diff --git a/src/include/glm/simd/packing.h b/src/include/glm/simd/packing.h new file mode 100644 index 0000000..aca4361 --- /dev/null +++ b/src/include/glm/simd/packing.h @@ -0,0 +1,8 @@ +/// @ref simd
+/// @file glm/simd/packing.h
+
+#pragma once
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
diff --git a/src/include/glm/simd/platform.h b/src/include/glm/simd/platform.h new file mode 100644 index 0000000..0902154 --- /dev/null +++ b/src/include/glm/simd/platform.h @@ -0,0 +1,398 @@ +#pragma once
+
+///////////////////////////////////////////////////////////////////////////////////
+// Platform
+
+#define GLM_PLATFORM_UNKNOWN 0x00000000
+#define GLM_PLATFORM_WINDOWS 0x00010000
+#define GLM_PLATFORM_LINUX 0x00020000
+#define GLM_PLATFORM_APPLE 0x00040000
+//#define GLM_PLATFORM_IOS 0x00080000
+#define GLM_PLATFORM_ANDROID 0x00100000
+#define GLM_PLATFORM_CHROME_NACL 0x00200000
+#define GLM_PLATFORM_UNIX 0x00400000
+#define GLM_PLATFORM_QNXNTO 0x00800000
+#define GLM_PLATFORM_WINCE 0x01000000
+#define GLM_PLATFORM_CYGWIN 0x02000000
+
+#ifdef GLM_FORCE_PLATFORM_UNKNOWN
+# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN
+#elif defined(__CYGWIN__)
+# define GLM_PLATFORM GLM_PLATFORM_CYGWIN
+#elif defined(__QNXNTO__)
+# define GLM_PLATFORM GLM_PLATFORM_QNXNTO
+#elif defined(__APPLE__)
+# define GLM_PLATFORM GLM_PLATFORM_APPLE
+#elif defined(WINCE)
+# define GLM_PLATFORM GLM_PLATFORM_WINCE
+#elif defined(_WIN32)
+# define GLM_PLATFORM GLM_PLATFORM_WINDOWS
+#elif defined(__native_client__)
+# define GLM_PLATFORM GLM_PLATFORM_CHROME_NACL
+#elif defined(__ANDROID__)
+# define GLM_PLATFORM GLM_PLATFORM_ANDROID
+#elif defined(__linux)
+# define GLM_PLATFORM GLM_PLATFORM_LINUX
+#elif defined(__unix)
+# define GLM_PLATFORM GLM_PLATFORM_UNIX
+#else
+# define GLM_PLATFORM GLM_PLATFORM_UNKNOWN
+#endif//
+
+///////////////////////////////////////////////////////////////////////////////////
+// Compiler
+
+#define GLM_COMPILER_UNKNOWN 0x00000000
+
+// Intel
+#define GLM_COMPILER_INTEL 0x00100000
+#define GLM_COMPILER_INTEL14 0x00100040
+#define GLM_COMPILER_INTEL15 0x00100050
+#define GLM_COMPILER_INTEL16 0x00100060
+#define GLM_COMPILER_INTEL17 0x00100070
+
+// Visual C++ defines
+#define GLM_COMPILER_VC 0x01000000
+#define GLM_COMPILER_VC12 0x01000001
+#define GLM_COMPILER_VC14 0x01000002
+#define GLM_COMPILER_VC15 0x01000003
+#define GLM_COMPILER_VC15_3 0x01000004
+#define GLM_COMPILER_VC15_5 0x01000005
+#define GLM_COMPILER_VC15_6 0x01000006
+#define GLM_COMPILER_VC15_7 0x01000007
+#define GLM_COMPILER_VC15_8 0x01000008
+#define GLM_COMPILER_VC15_9 0x01000009
+#define GLM_COMPILER_VC16 0x0100000A
+
+// GCC defines
+#define GLM_COMPILER_GCC 0x02000000
+#define GLM_COMPILER_GCC46 0x020000D0
+#define GLM_COMPILER_GCC47 0x020000E0
+#define GLM_COMPILER_GCC48 0x020000F0
+#define GLM_COMPILER_GCC49 0x02000100
+#define GLM_COMPILER_GCC5 0x02000200
+#define GLM_COMPILER_GCC6 0x02000300
+#define GLM_COMPILER_GCC7 0x02000400
+#define GLM_COMPILER_GCC8 0x02000500
+
+// CUDA
+#define GLM_COMPILER_CUDA 0x10000000
+#define GLM_COMPILER_CUDA75 0x10000001
+#define GLM_COMPILER_CUDA80 0x10000002
+#define GLM_COMPILER_CUDA90 0x10000004
+
+// SYCL
+#define GLM_COMPILER_SYCL 0x00300000
+
+// Clang
+#define GLM_COMPILER_CLANG 0x20000000
+#define GLM_COMPILER_CLANG34 0x20000050
+#define GLM_COMPILER_CLANG35 0x20000060
+#define GLM_COMPILER_CLANG36 0x20000070
+#define GLM_COMPILER_CLANG37 0x20000080
+#define GLM_COMPILER_CLANG38 0x20000090
+#define GLM_COMPILER_CLANG39 0x200000A0
+#define GLM_COMPILER_CLANG40 0x200000B0
+#define GLM_COMPILER_CLANG41 0x200000C0
+#define GLM_COMPILER_CLANG42 0x200000D0
+
+// Build model
+#define GLM_MODEL_32 0x00000010
+#define GLM_MODEL_64 0x00000020
+
+// Force generic C++ compiler
+#ifdef GLM_FORCE_COMPILER_UNKNOWN
+# define GLM_COMPILER GLM_COMPILER_UNKNOWN
+
+#elif defined(__INTEL_COMPILER)
+# if __INTEL_COMPILER >= 1700
+# define GLM_COMPILER GLM_COMPILER_INTEL17
+# elif __INTEL_COMPILER >= 1600
+# define GLM_COMPILER GLM_COMPILER_INTEL16
+# elif __INTEL_COMPILER >= 1500
+# define GLM_COMPILER GLM_COMPILER_INTEL15
+# elif __INTEL_COMPILER >= 1400
+# define GLM_COMPILER GLM_COMPILER_INTEL14
+# elif __INTEL_COMPILER < 1400
+# error "GLM requires ICC 2013 SP1 or newer"
+# endif
+
+// CUDA
+#elif defined(__CUDACC__)
+# if !defined(CUDA_VERSION) && !defined(GLM_FORCE_CUDA)
+# include <cuda.h> // make sure version is defined since nvcc does not define it itself!
+# endif
+# if CUDA_VERSION >= 8000
+# define GLM_COMPILER GLM_COMPILER_CUDA80
+# elif CUDA_VERSION >= 7500
+# define GLM_COMPILER GLM_COMPILER_CUDA75
+# elif CUDA_VERSION >= 7000
+# define GLM_COMPILER GLM_COMPILER_CUDA70
+# elif CUDA_VERSION < 7000
+# error "GLM requires CUDA 7.0 or higher"
+# endif
+
+// SYCL
+#elif defined(__SYCL_DEVICE_ONLY__)
+# define GLM_COMPILER GLM_COMPILER_SYCL
+
+// Clang
+#elif defined(__clang__)
+# if defined(__apple_build_version__)
+# if (__clang_major__ < 6)
+# error "GLM requires Clang 3.4 / Apple Clang 6.0 or higher"
+# elif __clang_major__ == 6 && __clang_minor__ == 0
+# define GLM_COMPILER GLM_COMPILER_CLANG35
+# elif __clang_major__ == 6 && __clang_minor__ >= 1
+# define GLM_COMPILER GLM_COMPILER_CLANG36
+# elif __clang_major__ >= 7
+# define GLM_COMPILER GLM_COMPILER_CLANG37
+# endif
+# else
+# if ((__clang_major__ == 3) && (__clang_minor__ < 4)) || (__clang_major__ < 3)
+# error "GLM requires Clang 3.4 or higher"
+# elif __clang_major__ == 3 && __clang_minor__ == 4
+# define GLM_COMPILER GLM_COMPILER_CLANG34
+# elif __clang_major__ == 3 && __clang_minor__ == 5
+# define GLM_COMPILER GLM_COMPILER_CLANG35
+# elif __clang_major__ == 3 && __clang_minor__ == 6
+# define GLM_COMPILER GLM_COMPILER_CLANG36
+# elif __clang_major__ == 3 && __clang_minor__ == 7
+# define GLM_COMPILER GLM_COMPILER_CLANG37
+# elif __clang_major__ == 3 && __clang_minor__ == 8
+# define GLM_COMPILER GLM_COMPILER_CLANG38
+# elif __clang_major__ == 3 && __clang_minor__ >= 9
+# define GLM_COMPILER GLM_COMPILER_CLANG39
+# elif __clang_major__ == 4 && __clang_minor__ == 0
+# define GLM_COMPILER GLM_COMPILER_CLANG40
+# elif __clang_major__ == 4 && __clang_minor__ == 1
+# define GLM_COMPILER GLM_COMPILER_CLANG41
+# elif __clang_major__ == 4 && __clang_minor__ >= 2
+# define GLM_COMPILER GLM_COMPILER_CLANG42
+# elif __clang_major__ >= 4
+# define GLM_COMPILER GLM_COMPILER_CLANG42
+# endif
+# endif
+
+// Visual C++
+#elif defined(_MSC_VER)
+# if _MSC_VER >= 1920
+# define GLM_COMPILER GLM_COMPILER_VC16
+# elif _MSC_VER >= 1916
+# define GLM_COMPILER GLM_COMPILER_VC15_9
+# elif _MSC_VER >= 1915
+# define GLM_COMPILER GLM_COMPILER_VC15_8
+# elif _MSC_VER >= 1914
+# define GLM_COMPILER GLM_COMPILER_VC15_7
+# elif _MSC_VER >= 1913
+# define GLM_COMPILER GLM_COMPILER_VC15_6
+# elif _MSC_VER >= 1912
+# define GLM_COMPILER GLM_COMPILER_VC15_5
+# elif _MSC_VER >= 1911
+# define GLM_COMPILER GLM_COMPILER_VC15_3
+# elif _MSC_VER >= 1910
+# define GLM_COMPILER GLM_COMPILER_VC15
+# elif _MSC_VER >= 1900
+# define GLM_COMPILER GLM_COMPILER_VC14
+# elif _MSC_VER >= 1800
+# define GLM_COMPILER GLM_COMPILER_VC12
+# elif _MSC_VER < 1800
+# error "GLM requires Visual C++ 12 - 2013 or higher"
+# endif//_MSC_VER
+
+// G++
+#elif defined(__GNUC__) || defined(__MINGW32__)
+# if __GNUC__ >= 8
+# define GLM_COMPILER GLM_COMPILER_GCC8
+# elif __GNUC__ >= 7
+# define GLM_COMPILER GLM_COMPILER_GCC7
+# elif __GNUC__ >= 6
+# define GLM_COMPILER GLM_COMPILER_GCC6
+# elif __GNUC__ >= 5
+# define GLM_COMPILER GLM_COMPILER_GCC5
+# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
+# define GLM_COMPILER GLM_COMPILER_GCC49
+# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 8
+# define GLM_COMPILER GLM_COMPILER_GCC48
+# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 7
+# define GLM_COMPILER GLM_COMPILER_GCC47
+# elif __GNUC__ == 4 && __GNUC_MINOR__ >= 6
+# define GLM_COMPILER GLM_COMPILER_GCC46
+# elif ((__GNUC__ == 4) && (__GNUC_MINOR__ < 6)) || (__GNUC__ < 4)
+# error "GLM requires GCC 4.6 or higher"
+# endif
+
+#else
+# define GLM_COMPILER GLM_COMPILER_UNKNOWN
+#endif
+
+#ifndef GLM_COMPILER
+# error "GLM_COMPILER undefined, your compiler may not be supported by GLM. Add #define GLM_COMPILER 0 to ignore this message."
+#endif//GLM_COMPILER
+
+///////////////////////////////////////////////////////////////////////////////////
+// Instruction sets
+
+// User defines: GLM_FORCE_PURE GLM_FORCE_INTRINSICS GLM_FORCE_SSE2 GLM_FORCE_SSE3 GLM_FORCE_AVX GLM_FORCE_AVX2 GLM_FORCE_AVX2
+
+#define GLM_ARCH_MIPS_BIT (0x10000000)
+#define GLM_ARCH_PPC_BIT (0x20000000)
+#define GLM_ARCH_ARM_BIT (0x40000000)
+#define GLM_ARCH_ARMV8_BIT (0x01000000)
+#define GLM_ARCH_X86_BIT (0x80000000)
+
+#define GLM_ARCH_SIMD_BIT (0x00001000)
+
+#define GLM_ARCH_NEON_BIT (0x00000001)
+#define GLM_ARCH_SSE_BIT (0x00000002)
+#define GLM_ARCH_SSE2_BIT (0x00000004)
+#define GLM_ARCH_SSE3_BIT (0x00000008)
+#define GLM_ARCH_SSSE3_BIT (0x00000010)
+#define GLM_ARCH_SSE41_BIT (0x00000020)
+#define GLM_ARCH_SSE42_BIT (0x00000040)
+#define GLM_ARCH_AVX_BIT (0x00000080)
+#define GLM_ARCH_AVX2_BIT (0x00000100)
+
+#define GLM_ARCH_UNKNOWN (0)
+#define GLM_ARCH_X86 (GLM_ARCH_X86_BIT)
+#define GLM_ARCH_SSE (GLM_ARCH_SSE_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_X86)
+#define GLM_ARCH_SSE2 (GLM_ARCH_SSE2_BIT | GLM_ARCH_SSE)
+#define GLM_ARCH_SSE3 (GLM_ARCH_SSE3_BIT | GLM_ARCH_SSE2)
+#define GLM_ARCH_SSSE3 (GLM_ARCH_SSSE3_BIT | GLM_ARCH_SSE3)
+#define GLM_ARCH_SSE41 (GLM_ARCH_SSE41_BIT | GLM_ARCH_SSSE3)
+#define GLM_ARCH_SSE42 (GLM_ARCH_SSE42_BIT | GLM_ARCH_SSE41)
+#define GLM_ARCH_AVX (GLM_ARCH_AVX_BIT | GLM_ARCH_SSE42)
+#define GLM_ARCH_AVX2 (GLM_ARCH_AVX2_BIT | GLM_ARCH_AVX)
+#define GLM_ARCH_ARM (GLM_ARCH_ARM_BIT)
+#define GLM_ARCH_ARMV8 (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM | GLM_ARCH_ARMV8_BIT)
+#define GLM_ARCH_NEON (GLM_ARCH_NEON_BIT | GLM_ARCH_SIMD_BIT | GLM_ARCH_ARM)
+#define GLM_ARCH_MIPS (GLM_ARCH_MIPS_BIT)
+#define GLM_ARCH_PPC (GLM_ARCH_PPC_BIT)
+
+#if defined(GLM_FORCE_ARCH_UNKNOWN) || defined(GLM_FORCE_PURE)
+# define GLM_ARCH GLM_ARCH_UNKNOWN
+#elif defined(GLM_FORCE_NEON)
+# if __ARM_ARCH >= 8
+# define GLM_ARCH (GLM_ARCH_ARMV8)
+# else
+# define GLM_ARCH (GLM_ARCH_NEON)
+# endif
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_AVX2)
+# define GLM_ARCH (GLM_ARCH_AVX2)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_AVX)
+# define GLM_ARCH (GLM_ARCH_AVX)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSE42)
+# define GLM_ARCH (GLM_ARCH_SSE42)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSE41)
+# define GLM_ARCH (GLM_ARCH_SSE41)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSSE3)
+# define GLM_ARCH (GLM_ARCH_SSSE3)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSE3)
+# define GLM_ARCH (GLM_ARCH_SSE3)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSE2)
+# define GLM_ARCH (GLM_ARCH_SSE2)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_SSE)
+# define GLM_ARCH (GLM_ARCH_SSE)
+# define GLM_FORCE_INTRINSICS
+#elif defined(GLM_FORCE_INTRINSICS) && !defined(GLM_FORCE_XYZW_ONLY)
+# if defined(__AVX2__)
+# define GLM_ARCH (GLM_ARCH_AVX2)
+# elif defined(__AVX__)
+# define GLM_ARCH (GLM_ARCH_AVX)
+# elif defined(__SSE4_2__)
+# define GLM_ARCH (GLM_ARCH_SSE42)
+# elif defined(__SSE4_1__)
+# define GLM_ARCH (GLM_ARCH_SSE41)
+# elif defined(__SSSE3__)
+# define GLM_ARCH (GLM_ARCH_SSSE3)
+# elif defined(__SSE3__)
+# define GLM_ARCH (GLM_ARCH_SSE3)
+# elif defined(__SSE2__) || defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86_FP)
+# define GLM_ARCH (GLM_ARCH_SSE2)
+# elif defined(__i386__)
+# define GLM_ARCH (GLM_ARCH_X86)
+# elif defined(__ARM_ARCH) && (__ARM_ARCH >= 8)
+# define GLM_ARCH (GLM_ARCH_ARMV8)
+# elif defined(__ARM_NEON)
+# define GLM_ARCH (GLM_ARCH_ARM | GLM_ARCH_NEON)
+# elif defined(__arm__ ) || defined(_M_ARM)
+# define GLM_ARCH (GLM_ARCH_ARM)
+# elif defined(__mips__ )
+# define GLM_ARCH (GLM_ARCH_MIPS)
+# elif defined(__powerpc__ ) || defined(_M_PPC)
+# define GLM_ARCH (GLM_ARCH_PPC)
+# else
+# define GLM_ARCH (GLM_ARCH_UNKNOWN)
+# endif
+#else
+# if defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86) || defined(__i386__)
+# define GLM_ARCH (GLM_ARCH_X86)
+# elif defined(__arm__) || defined(_M_ARM)
+# define GLM_ARCH (GLM_ARCH_ARM)
+# elif defined(__powerpc__) || defined(_M_PPC)
+# define GLM_ARCH (GLM_ARCH_PPC)
+# elif defined(__mips__)
+# define GLM_ARCH (GLM_ARCH_MIPS)
+# else
+# define GLM_ARCH (GLM_ARCH_UNKNOWN)
+# endif
+#endif
+
+#if GLM_ARCH & GLM_ARCH_AVX2_BIT
+# include <immintrin.h>
+#elif GLM_ARCH & GLM_ARCH_AVX_BIT
+# include <immintrin.h>
+#elif GLM_ARCH & GLM_ARCH_SSE42_BIT
+# if GLM_COMPILER & GLM_COMPILER_CLANG
+# include <popcntintrin.h>
+# endif
+# include <nmmintrin.h>
+#elif GLM_ARCH & GLM_ARCH_SSE41_BIT
+# include <smmintrin.h>
+#elif GLM_ARCH & GLM_ARCH_SSSE3_BIT
+# include <tmmintrin.h>
+#elif GLM_ARCH & GLM_ARCH_SSE3_BIT
+# include <pmmintrin.h>
+#elif GLM_ARCH & GLM_ARCH_SSE2_BIT
+# include <emmintrin.h>
+#elif GLM_ARCH & GLM_ARCH_NEON_BIT
+# include "neon.h"
+#endif//GLM_ARCH
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+ typedef __m128 glm_f32vec4;
+ typedef __m128i glm_i32vec4;
+ typedef __m128i glm_u32vec4;
+ typedef __m128d glm_f64vec2;
+ typedef __m128i glm_i64vec2;
+ typedef __m128i glm_u64vec2;
+
+ typedef glm_f32vec4 glm_vec4;
+ typedef glm_i32vec4 glm_ivec4;
+ typedef glm_u32vec4 glm_uvec4;
+ typedef glm_f64vec2 glm_dvec2;
+#endif
+
+#if GLM_ARCH & GLM_ARCH_AVX_BIT
+ typedef __m256d glm_f64vec4;
+ typedef glm_f64vec4 glm_dvec4;
+#endif
+
+#if GLM_ARCH & GLM_ARCH_AVX2_BIT
+ typedef __m256i glm_i64vec4;
+ typedef __m256i glm_u64vec4;
+#endif
+
+#if GLM_ARCH & GLM_ARCH_NEON_BIT
+ typedef float32x4_t glm_f32vec4;
+ typedef int32x4_t glm_i32vec4;
+ typedef uint32x4_t glm_u32vec4;
+#endif
diff --git a/src/include/glm/simd/trigonometric.h b/src/include/glm/simd/trigonometric.h new file mode 100644 index 0000000..c1c9f9f --- /dev/null +++ b/src/include/glm/simd/trigonometric.h @@ -0,0 +1,9 @@ +/// @ref simd
+/// @file glm/simd/trigonometric.h
+
+#pragma once
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
+
diff --git a/src/include/glm/simd/vector_relational.h b/src/include/glm/simd/vector_relational.h new file mode 100644 index 0000000..cb903f4 --- /dev/null +++ b/src/include/glm/simd/vector_relational.h @@ -0,0 +1,8 @@ +/// @ref simd
+/// @file glm/simd/vector_relational.h
+
+#pragma once
+
+#if GLM_ARCH & GLM_ARCH_SSE2_BIT
+
+#endif//GLM_ARCH & GLM_ARCH_SSE2_BIT
|