1 #ifndef INCLUDED_volk_16i_x5_add_quad_16i_x4_a_H
2 #define INCLUDED_volk_16i_x5_add_quad_16i_x4_a_H
16 static inline void volk_16i_x5_add_quad_16i_x4_a_sse2(
short* target0,
short* target1,
short* target2,
short* target3,
short* src0,
short* src1,
short* src2,
short* src3,
short* src4,
unsigned int num_bytes) {
18 __m128i xmm0, xmm1, xmm2, xmm3, xmm4;
19 __m128i *p_target0, *p_target1, *p_target2, *p_target3, *p_src0, *p_src1, *p_src2, *p_src3, *p_src4;
20 p_target0 = (__m128i*)target0;
21 p_target1 = (__m128i*)target1;
22 p_target2 = (__m128i*)target2;
23 p_target3 = (__m128i*)target3;
25 p_src0 = (__m128i*)src0;
26 p_src1 = (__m128i*)src1;
27 p_src2 = (__m128i*)src2;
28 p_src3 = (__m128i*)src3;
29 p_src4 = (__m128i*)src4;
33 int bound = (num_bytes >> 4);
34 int leftovers = (num_bytes >> 1) & 7;
36 for(; i < bound; ++i) {
37 xmm0 = _mm_load_si128(p_src0);
38 xmm1 = _mm_load_si128(p_src1);
39 xmm2 = _mm_load_si128(p_src2);
40 xmm3 = _mm_load_si128(p_src3);
41 xmm4 = _mm_load_si128(p_src4);
46 xmm1 = _mm_add_epi16(xmm0, xmm1);
47 xmm2 = _mm_add_epi16(xmm0, xmm2);
48 xmm3 = _mm_add_epi16(xmm0, xmm3);
49 xmm4 = _mm_add_epi16(xmm0, xmm4);
56 _mm_store_si128(p_target0, xmm1);
57 _mm_store_si128(p_target1, xmm2);
58 _mm_store_si128(p_target2, xmm3);
59 _mm_store_si128(p_target3, xmm4);
104 for(i = bound * 8; i < (bound * 8) + leftovers; ++i) {
105 target0[i] = src0[i] + src1[i];
106 target1[i] = src0[i] + src2[i];
107 target2[i] = src0[i] + src3[i];
108 target3[i] = src0[i] + src4[i];
114 #ifdef LV_HAVE_GENERIC
116 static inline void volk_16i_x5_add_quad_16i_x4_a_generic(
short* target0,
short* target1,
short* target2,
short* target3,
short* src0,
short* src1,
short* src2,
short* src3,
short* src4,
unsigned int num_bytes) {
120 int bound = num_bytes >> 1;
122 for(i = 0; i < bound; ++i) {
123 target0[i] = src0[i] + src1[i];
124 target1[i] = src0[i] + src2[i];
125 target2[i] = src0[i] + src3[i];
126 target3[i] = src0[i] + src4[i];