42#ifndef INCLUDED_volk_32fc_32f_multiply_32fc_a_H
43#define INCLUDED_volk_32fc_32f_multiply_32fc_a_H
54 unsigned int num_points)
56 unsigned int number = 0;
57 const unsigned int eighthPoints = num_points / 8;
61 const float* bPtr = bVector;
63 __m256 aVal1, aVal2, bVal, bVal1, bVal2, cVal1, cVal2;
65 __m256i permute_mask = _mm256_set_epi32(3, 3, 2, 2, 1, 1, 0, 0);
67 for (; number < eighthPoints; number++) {
69 aVal1 = _mm256_load_ps((
float*)aPtr);
72 aVal2 = _mm256_load_ps((
float*)aPtr);
75 bVal = _mm256_load_ps(bPtr);
78 bVal1 = _mm256_permute2f128_ps(bVal, bVal, 0x00);
79 bVal2 = _mm256_permute2f128_ps(bVal, bVal, 0x11);
81 bVal1 = _mm256_permutevar_ps(bVal1, permute_mask);
82 bVal2 = _mm256_permutevar_ps(bVal2, permute_mask);
84 cVal1 = _mm256_mul_ps(aVal1, bVal1);
85 cVal2 = _mm256_mul_ps(aVal2, bVal2);
87 _mm256_store_ps((
float*)cPtr,
91 _mm256_store_ps((
float*)cPtr,
96 number = eighthPoints * 8;
97 for (; number < num_points; ++number) {
98 *cPtr++ = (*aPtr++) * (*bPtr++);
105#include <xmmintrin.h>
109 const float* bVector,
110 unsigned int num_points)
112 unsigned int number = 0;
113 const unsigned int quarterPoints = num_points / 4;
117 const float* bPtr = bVector;
119 __m128 aVal1, aVal2, bVal, bVal1, bVal2, cVal;
120 for (; number < quarterPoints; number++) {
122 aVal1 = _mm_load_ps((
const float*)aPtr);
125 aVal2 = _mm_load_ps((
const float*)aPtr);
128 bVal = _mm_load_ps(bPtr);
131 bVal1 = _mm_shuffle_ps(bVal, bVal, _MM_SHUFFLE(1, 1, 0, 0));
132 bVal2 = _mm_shuffle_ps(bVal, bVal, _MM_SHUFFLE(3, 3, 2, 2));
134 cVal = _mm_mul_ps(aVal1, bVal1);
136 _mm_store_ps((
float*)cPtr, cVal);
139 cVal = _mm_mul_ps(aVal2, bVal2);
141 _mm_store_ps((
float*)cPtr, cVal);
146 number = quarterPoints * 4;
147 for (; number < num_points; number++) {
148 *cPtr++ = (*aPtr++) * (*bPtr);
155#ifdef LV_HAVE_GENERIC
159 const float* bVector,
160 unsigned int num_points)
164 const float* bPtr = bVector;
165 unsigned int number = 0;
167 for (number = 0; number < num_points; number++) {
168 *cPtr++ = (*aPtr++) * (*bPtr++);
179 const float* bVector,
180 unsigned int num_points)
184 const float* bPtr = bVector;
185 unsigned int number = 0;
186 unsigned int quarter_points = num_points / 4;
188 float32x4x2_t inputVector, outputVector;
189 float32x4_t tapsVector;
190 for (number = 0; number < quarter_points; number++) {
191 inputVector = vld2q_f32((
float*)aPtr);
192 tapsVector = vld1q_f32(bPtr);
194 outputVector.val[0] = vmulq_f32(inputVector.val[0], tapsVector);
195 outputVector.val[1] = vmulq_f32(inputVector.val[1], tapsVector);
197 vst2q_f32((
float*)cPtr, outputVector);
203 for (number = quarter_points * 4; number < num_points; number++) {
204 *cPtr++ = (*aPtr++) * (*bPtr++);
213static inline void volk_32fc_32f_multiply_32fc_neonv8(
lv_32fc_t* cVector,
215 const float* bVector,
216 unsigned int num_points)
218 unsigned int n = num_points;
221 const float* b = bVector;
225 float32x4x2_t a0 = vld2q_f32((
const float*)a);
226 float32x4x2_t a1 = vld2q_f32((
const float*)(a + 4));
227 float32x4_t b0 = vld1q_f32(b);
228 float32x4_t b1 = vld1q_f32(b + 4);
233 float32x4x2_t c0, c1;
234 c0.val[0] = vmulq_f32(a0.val[0], b0);
235 c0.val[1] = vmulq_f32(a0.val[1], b0);
236 c1.val[0] = vmulq_f32(a1.val[0], b1);
237 c1.val[1] = vmulq_f32(a1.val[1], b1);
239 vst2q_f32((
float*)c, c0);
240 vst2q_f32((
float*)(c + 4), c1);
250 float32x4x2_t a0 = vld2q_f32((
const float*)a);
251 float32x4_t b0 = vld1q_f32(b);
253 c0.val[0] = vmulq_f32(a0.val[0], b0);
254 c0.val[1] = vmulq_f32(a0.val[1], b0);
255 vst2q_f32((
float*)c, c0);
264 *c++ = (*a++) * (*b++);
274extern void volk_32fc_32f_multiply_32fc_a_orc_impl(
lv_32fc_t* cVector,
276 const float* bVector,
279static inline void volk_32fc_32f_multiply_32fc_u_orc(
lv_32fc_t* cVector,
281 const float* bVector,
282 unsigned int num_points)
284 volk_32fc_32f_multiply_32fc_a_orc_impl(cVector, aVector, bVector, num_points);
290#include <riscv_vector.h>
292static inline void volk_32fc_32f_multiply_32fc_rvv(
lv_32fc_t* cVector,
294 const float* bVector,
295 unsigned int num_points)
297 size_t n = num_points;
298 for (
size_t vl; n > 0; n -= vl, cVector += vl, aVector += vl, bVector += vl) {
299 vl = __riscv_vsetvl_e32m4(n);
300 vfloat32m8_t vc = __riscv_vle32_v_f32m8((
const float*)aVector, vl * 2);
301 vuint32m4_t v = __riscv_vle32_v_u32m4((
const uint32_t*)bVector, vl);
302 vfloat32m8_t vf = __riscv_vreinterpret_f32m8(__riscv_vreinterpret_u32m8(
303 __riscv_vwmaccu(__riscv_vwaddu_vv(v, v, vl), 0xFFFFFFFF, v, vl)));
304 __riscv_vse32((
float*)cVector, __riscv_vfmul(vc, vf, vl * 2), vl * 2);