return a + t*(b-a);
}
+static inline double vg_lerp( double a, double b, double t )
+{
+ return a + t*(b-a);
+}
+
+/* correctly lerp around circular period -pi -> pi */
+static float vg_alerpf( float a, float b, float t )
+{
+ float d = fmodf( b-a, VG_TAUf ),
+ s = fmodf( 2.0f*d, VG_TAUf ) - d;
+ return a + s*t;
+}
+
static inline void v3_lerp( v3f a, v3f b, float t, v3f d )
{
d[0] = a[0] + t*(b[0]-a[0]);
v3_negate( b[3], b[3] );
}
+static void m4x3_invert_full( m4x3f src, m4x3f dst )
+{
+ float t2, t4, t5,
+ det,
+ a = src[0][0], b = src[0][1], c = src[0][2],
+ e = src[1][0], f = src[1][1], g = src[1][2],
+ i = src[2][0], j = src[2][1], k = src[2][2],
+ m = src[3][0], n = src[3][1], o = src[3][2];
+
+ t2 = j*o - n*k;
+ t4 = i*o - m*k;
+ t5 = i*n - m*j;
+
+ dst[0][0] = f*k - g*j;
+ dst[1][0] =-(e*k - g*i);
+ dst[2][0] = e*j - f*i;
+ dst[3][0] =-(e*t2 - f*t4 + g*t5);
+
+ dst[0][1] =-(b*k - c*j);
+ dst[1][1] = a*k - c*i;
+ dst[2][1] =-(a*j - b*i);
+ dst[3][1] = a*t2 - b*t4 + c*t5;
+
+ t2 = f*o - n*g;
+ t4 = e*o - m*g;
+ t5 = e*n - m*f;
+
+ dst[0][2] = b*g - c*f ;
+ dst[1][2] =-(a*g - c*e );
+ dst[2][2] = a*f - b*e ;
+ dst[3][2] =-(a*t2 - b*t4 + c * t5);
+
+ det = 1.0f / (a * dst[0][0] + b * dst[1][0] + c * dst[2][0]);
+ v3_muls( dst[0], det, dst[0] );
+ v3_muls( dst[1], det, dst[1] );
+ v3_muls( dst[2], det, dst[2] );
+ v3_muls( dst[3], det, dst[3] );
+}
+
static inline void m4x3_copy( m4x3f a, m4x3f b )
{
v3_copy( a[0], b[0] );
static inline void q_nlerp( v4f a, v4f b, float t, v4f d )
{
- v4_lerp( a, b, t, d );
+ if( v4_dot(a,b) < 0.0f )
+ {
+ v4_muls( b, -1.0f, d );
+ v4_lerp( a, d, t, d );
+ }
+ else
+ v4_lerp( a, b, t, d );
+
q_normalize( d );
}