Vector Optimized Library of Kernels 3.3.0
Architecture-tuned implementations of math kernels
Loading...
Searching...
No Matches
volk_32f_x2_divide_32f.h
Go to the documentation of this file.
1/* -*- c++ -*- */
2/*
3 * Copyright 2012, 2014 Free Software Foundation, Inc.
4 *
5 * This file is part of VOLK
6 *
7 * SPDX-License-Identifier: LGPL-3.0-or-later
8 */
9
57
58#ifndef INCLUDED_volk_32f_x2_divide_32f_a_H
59#define INCLUDED_volk_32f_x2_divide_32f_a_H
60
61#include <inttypes.h>
62#include <stdio.h>
63
64#ifdef LV_HAVE_AVX512F
65#include <immintrin.h>
66
67static inline void volk_32f_x2_divide_32f_a_avx512f(float* cVector,
68 const float* aVector,
69 const float* bVector,
70 unsigned int num_points)
71{
72 unsigned int number = 0;
73 const unsigned int sixteenthPoints = num_points / 16;
74
75 float* cPtr = cVector;
76 const float* aPtr = aVector;
77 const float* bPtr = bVector;
78
79 __m512 aVal, bVal, cVal;
80 for (; number < sixteenthPoints; number++) {
81 aVal = _mm512_load_ps(aPtr);
82 bVal = _mm512_load_ps(bPtr);
83
84 cVal = _mm512_div_ps(aVal, bVal);
85
86 _mm512_store_ps(cPtr, cVal); // Store the results back into the C container
87
88 aPtr += 16;
89 bPtr += 16;
90 cPtr += 16;
91 }
92
93 number = sixteenthPoints * 16;
94 for (; number < num_points; number++) {
95 *cPtr++ = (*aPtr++) / (*bPtr++);
96 }
97}
98#endif /* LV_HAVE_AVX512F */
99
100
101#ifdef LV_HAVE_AVX
102#include <immintrin.h>
103
104static inline void volk_32f_x2_divide_32f_a_avx(float* cVector,
105 const float* aVector,
106 const float* bVector,
107 unsigned int num_points)
108{
109 unsigned int number = 0;
110 const unsigned int eighthPoints = num_points / 8;
111
112 float* cPtr = cVector;
113 const float* aPtr = aVector;
114 const float* bPtr = bVector;
115
116 __m256 aVal, bVal, cVal;
117 for (; number < eighthPoints; number++) {
118 aVal = _mm256_load_ps(aPtr);
119 bVal = _mm256_load_ps(bPtr);
120
121 cVal = _mm256_div_ps(aVal, bVal);
122
123 _mm256_store_ps(cPtr, cVal); // Store the results back into the C container
124
125 aPtr += 8;
126 bPtr += 8;
127 cPtr += 8;
128 }
129
130 number = eighthPoints * 8;
131 for (; number < num_points; number++) {
132 *cPtr++ = (*aPtr++) / (*bPtr++);
133 }
134}
135#endif /* LV_HAVE_AVX */
136
137
138#ifdef LV_HAVE_SSE
139#include <xmmintrin.h>
140
141static inline void volk_32f_x2_divide_32f_a_sse(float* cVector,
142 const float* aVector,
143 const float* bVector,
144 unsigned int num_points)
145{
146 unsigned int number = 0;
147 const unsigned int quarterPoints = num_points / 4;
148
149 float* cPtr = cVector;
150 const float* aPtr = aVector;
151 const float* bPtr = bVector;
152
153 __m128 aVal, bVal, cVal;
154 for (; number < quarterPoints; number++) {
155 aVal = _mm_load_ps(aPtr);
156 bVal = _mm_load_ps(bPtr);
157
158 cVal = _mm_div_ps(aVal, bVal);
159
160 _mm_store_ps(cPtr, cVal); // Store the results back into the C container
161
162 aPtr += 4;
163 bPtr += 4;
164 cPtr += 4;
165 }
166
167 number = quarterPoints * 4;
168 for (; number < num_points; number++) {
169 *cPtr++ = (*aPtr++) / (*bPtr++);
170 }
171}
172#endif /* LV_HAVE_SSE */
173
174
175#ifdef LV_HAVE_NEON
176#include <arm_neon.h>
177
178static inline void volk_32f_x2_divide_32f_neon(float* cVector,
179 const float* aVector,
180 const float* bVector,
181 unsigned int num_points)
182{
183 float* cPtr = cVector;
184 const float* aPtr = aVector;
185 const float* bPtr = bVector;
186
187 float32x4x4_t aVal, bVal, bInv, cVal;
188
189 const unsigned int eighthPoints = num_points / 16;
190 unsigned int number = 0;
191 for (; number < eighthPoints; number++) {
192 aVal = vld4q_f32(aPtr);
193 aPtr += 16;
194 bVal = vld4q_f32(bPtr);
195 bPtr += 16;
196
197 __VOLK_PREFETCH(aPtr + 16);
198 __VOLK_PREFETCH(bPtr + 16);
199
200 bInv.val[0] = vrecpeq_f32(bVal.val[0]);
201 bInv.val[0] = vmulq_f32(bInv.val[0], vrecpsq_f32(bInv.val[0], bVal.val[0]));
202 bInv.val[0] = vmulq_f32(bInv.val[0], vrecpsq_f32(bInv.val[0], bVal.val[0]));
203 cVal.val[0] = vmulq_f32(aVal.val[0], bInv.val[0]);
204
205 bInv.val[1] = vrecpeq_f32(bVal.val[1]);
206 bInv.val[1] = vmulq_f32(bInv.val[1], vrecpsq_f32(bInv.val[1], bVal.val[1]));
207 bInv.val[1] = vmulq_f32(bInv.val[1], vrecpsq_f32(bInv.val[1], bVal.val[1]));
208 cVal.val[1] = vmulq_f32(aVal.val[1], bInv.val[1]);
209
210 bInv.val[2] = vrecpeq_f32(bVal.val[2]);
211 bInv.val[2] = vmulq_f32(bInv.val[2], vrecpsq_f32(bInv.val[2], bVal.val[2]));
212 bInv.val[2] = vmulq_f32(bInv.val[2], vrecpsq_f32(bInv.val[2], bVal.val[2]));
213 cVal.val[2] = vmulq_f32(aVal.val[2], bInv.val[2]);
214
215 bInv.val[3] = vrecpeq_f32(bVal.val[3]);
216 bInv.val[3] = vmulq_f32(bInv.val[3], vrecpsq_f32(bInv.val[3], bVal.val[3]));
217 bInv.val[3] = vmulq_f32(bInv.val[3], vrecpsq_f32(bInv.val[3], bVal.val[3]));
218 cVal.val[3] = vmulq_f32(aVal.val[3], bInv.val[3]);
219
220 vst4q_f32(cPtr, cVal);
221 cPtr += 16;
222 }
223
224 for (number = eighthPoints * 16; number < num_points; number++) {
225 *cPtr++ = (*aPtr++) / (*bPtr++);
226 }
227}
228
229#endif /* LV_HAVE_NEON */
230
231#ifdef LV_HAVE_NEONV8
232#include <arm_neon.h>
233
234static inline void volk_32f_x2_divide_32f_neonv8(float* cVector,
235 const float* aVector,
236 const float* bVector,
237 unsigned int num_points)
238{
239 const unsigned int eighthPoints = num_points / 8;
240
241 const float* aPtr = aVector;
242 const float* bPtr = bVector;
243 float* cPtr = cVector;
244
245 for (unsigned int number = 0; number < eighthPoints; number++) {
246 float32x4_t a0 = vld1q_f32(aPtr);
247 float32x4_t a1 = vld1q_f32(aPtr + 4);
248 float32x4_t b0 = vld1q_f32(bPtr);
249 float32x4_t b1 = vld1q_f32(bPtr + 4);
250 __VOLK_PREFETCH(aPtr + 16);
251 __VOLK_PREFETCH(bPtr + 16);
252
253 /* ARMv8 has native divide instruction */
254 vst1q_f32(cPtr, vdivq_f32(a0, b0));
255 vst1q_f32(cPtr + 4, vdivq_f32(a1, b1));
256
257 aPtr += 8;
258 bPtr += 8;
259 cPtr += 8;
260 }
261
262 for (unsigned int number = eighthPoints * 8; number < num_points; number++) {
263 *cPtr++ = (*aPtr++) / (*bPtr++);
264 }
265}
266#endif /* LV_HAVE_NEONV8 */
267
268
269#ifdef LV_HAVE_GENERIC
270
271static inline void volk_32f_x2_divide_32f_generic(float* cVector,
272 const float* aVector,
273 const float* bVector,
274 unsigned int num_points)
275{
276 float* cPtr = cVector;
277 const float* aPtr = aVector;
278 const float* bPtr = bVector;
279 unsigned int number = 0;
280
281 for (number = 0; number < num_points; number++) {
282 *cPtr++ = (*aPtr++) / (*bPtr++);
283 }
284}
285#endif /* LV_HAVE_GENERIC */
286
287
288#ifdef LV_HAVE_ORC
289
290extern void volk_32f_x2_divide_32f_a_orc_impl(float* cVector,
291 const float* aVector,
292 const float* bVector,
293 int num_points);
294
295static inline void volk_32f_x2_divide_32f_u_orc(float* cVector,
296 const float* aVector,
297 const float* bVector,
298 unsigned int num_points)
299{
300 volk_32f_x2_divide_32f_a_orc_impl(cVector, aVector, bVector, num_points);
301}
302#endif /* LV_HAVE_ORC */
303
304
305#endif /* INCLUDED_volk_32f_x2_divide_32f_a_H */
306
307
308#ifndef INCLUDED_volk_32f_x2_divide_32f_u_H
309#define INCLUDED_volk_32f_x2_divide_32f_u_H
310
311#include <inttypes.h>
312#include <stdio.h>
313
314#ifdef LV_HAVE_AVX512F
315#include <immintrin.h>
316
317static inline void volk_32f_x2_divide_32f_u_avx512f(float* cVector,
318 const float* aVector,
319 const float* bVector,
320 unsigned int num_points)
321{
322 unsigned int number = 0;
323 const unsigned int sixteenthPoints = num_points / 16;
324
325 float* cPtr = cVector;
326 const float* aPtr = aVector;
327 const float* bPtr = bVector;
328
329 __m512 aVal, bVal, cVal;
330 for (; number < sixteenthPoints; number++) {
331 aVal = _mm512_loadu_ps(aPtr);
332 bVal = _mm512_loadu_ps(bPtr);
333
334 cVal = _mm512_div_ps(aVal, bVal);
335
336 _mm512_storeu_ps(cPtr, cVal); // Store the results back into the C container
337
338 aPtr += 16;
339 bPtr += 16;
340 cPtr += 16;
341 }
342
343 number = sixteenthPoints * 16;
344 for (; number < num_points; number++) {
345 *cPtr++ = (*aPtr++) / (*bPtr++);
346 }
347}
348#endif /* LV_HAVE_AVX512F */
349
350
351#ifdef LV_HAVE_AVX
352#include <immintrin.h>
353
354static inline void volk_32f_x2_divide_32f_u_avx(float* cVector,
355 const float* aVector,
356 const float* bVector,
357 unsigned int num_points)
358{
359 unsigned int number = 0;
360 const unsigned int eighthPoints = num_points / 8;
361
362 float* cPtr = cVector;
363 const float* aPtr = aVector;
364 const float* bPtr = bVector;
365
366 __m256 aVal, bVal, cVal;
367 for (; number < eighthPoints; number++) {
368 aVal = _mm256_loadu_ps(aPtr);
369 bVal = _mm256_loadu_ps(bPtr);
370
371 cVal = _mm256_div_ps(aVal, bVal);
372
373 _mm256_storeu_ps(cPtr, cVal); // Store the results back into the C container
374
375 aPtr += 8;
376 bPtr += 8;
377 cPtr += 8;
378 }
379
380 number = eighthPoints * 8;
381 for (; number < num_points; number++) {
382 *cPtr++ = (*aPtr++) / (*bPtr++);
383 }
384}
385#endif /* LV_HAVE_AVX */
386
387#ifdef LV_HAVE_RVV
388#include <riscv_vector.h>
389
390static inline void volk_32f_x2_divide_32f_rvv(float* cVector,
391 const float* aVector,
392 const float* bVector,
393 unsigned int num_points)
394{
395 size_t n = num_points;
396 for (size_t vl; n > 0; n -= vl, aVector += vl, bVector += vl, cVector += vl) {
397 vl = __riscv_vsetvl_e32m8(n);
398 vfloat32m8_t va = __riscv_vle32_v_f32m8(aVector, vl);
399 vfloat32m8_t vb = __riscv_vle32_v_f32m8(bVector, vl);
400 __riscv_vse32(cVector, __riscv_vfdiv(va, vb, vl), vl);
401 }
402}
403#endif /*LV_HAVE_RVV*/
404
405#endif /* INCLUDED_volk_32f_x2_divide_32f_u_H */