Vector Optimized Library of Kernels 3.3.0
Architecture-tuned implementations of math kernels
Loading...
Searching...
No Matches
volk_32f_s32f_normalize.h
Go to the documentation of this file.
1/* -*- c++ -*- */
2/*
3 * Copyright 2012, 2014 Free Software Foundation, Inc.
4 *
5 * This file is part of VOLK
6 *
7 * SPDX-License-Identifier: LGPL-3.0-or-later
8 */
9
56
57#ifndef INCLUDED_volk_32f_s32f_normalize_a_H
58#define INCLUDED_volk_32f_s32f_normalize_a_H
59
60#include <inttypes.h>
61#include <stdio.h>
62
63#ifdef LV_HAVE_AVX
64#include <immintrin.h>
65
66static inline void volk_32f_s32f_normalize_a_avx(float* vecBuffer,
67 const float scalar,
68 unsigned int num_points)
69{
70 unsigned int number = 0;
71 float* inputPtr = vecBuffer;
72
73 const float invScalar = 1.0 / scalar;
74 __m256 vecScalar = _mm256_set1_ps(invScalar);
75
76 __m256 input1;
77
78 const uint64_t eighthPoints = num_points / 8;
79 for (; number < eighthPoints; number++) {
80
81 input1 = _mm256_load_ps(inputPtr);
82
83 input1 = _mm256_mul_ps(input1, vecScalar);
84
85 _mm256_store_ps(inputPtr, input1);
86
87 inputPtr += 8;
88 }
89
90 number = eighthPoints * 8;
91 for (; number < num_points; number++) {
92 *inputPtr *= invScalar;
93 inputPtr++;
94 }
95}
96#endif /* LV_HAVE_AVX */
97
98#ifdef LV_HAVE_SSE
99#include <xmmintrin.h>
100
101static inline void volk_32f_s32f_normalize_a_sse(float* vecBuffer,
102 const float scalar,
103 unsigned int num_points)
104{
105 unsigned int number = 0;
106 float* inputPtr = vecBuffer;
107
108 const float invScalar = 1.0 / scalar;
109 __m128 vecScalar = _mm_set_ps1(invScalar);
110
111 __m128 input1;
112
113 const uint64_t quarterPoints = num_points / 4;
114 for (; number < quarterPoints; number++) {
115
116 input1 = _mm_load_ps(inputPtr);
117
118 input1 = _mm_mul_ps(input1, vecScalar);
119
120 _mm_store_ps(inputPtr, input1);
121
122 inputPtr += 4;
123 }
124
125 number = quarterPoints * 4;
126 for (; number < num_points; number++) {
127 *inputPtr *= invScalar;
128 inputPtr++;
129 }
130}
131#endif /* LV_HAVE_SSE */
132
133#ifdef LV_HAVE_GENERIC
134
135static inline void volk_32f_s32f_normalize_generic(float* vecBuffer,
136 const float scalar,
137 unsigned int num_points)
138{
139 unsigned int number = 0;
140 float* inputPtr = vecBuffer;
141 const float invScalar = 1.0 / scalar;
142 for (number = 0; number < num_points; number++) {
143 *inputPtr *= invScalar;
144 inputPtr++;
145 }
146}
147#endif /* LV_HAVE_GENERIC */
148
149#ifdef LV_HAVE_ORC
150
151extern void volk_32f_s32f_normalize_a_orc_impl(float* dst,
152 float* src,
153 const float scalar,
154 int num_points);
155static inline void volk_32f_s32f_normalize_u_orc(float* vecBuffer,
156 const float scalar,
157 unsigned int num_points)
158{
159 float invscalar = 1.0 / scalar;
160 volk_32f_s32f_normalize_a_orc_impl(vecBuffer, vecBuffer, invscalar, num_points);
161}
162#endif /* LV_HAVE_GENERIC */
163
164#endif /* INCLUDED_volk_32f_s32f_normalize_a_H */
165
166#ifndef INCLUDED_volk_32f_s32f_normalize_u_H
167#define INCLUDED_volk_32f_s32f_normalize_u_H
168
169#include <inttypes.h>
170#include <stdio.h>
171#ifdef LV_HAVE_AVX
172#include <immintrin.h>
173
174static inline void volk_32f_s32f_normalize_u_avx(float* vecBuffer,
175 const float scalar,
176 unsigned int num_points)
177{
178 unsigned int number = 0;
179 float* inputPtr = vecBuffer;
180
181 const float invScalar = 1.0 / scalar;
182 __m256 vecScalar = _mm256_set1_ps(invScalar);
183
184 __m256 input1;
185
186 const uint64_t eighthPoints = num_points / 8;
187 for (; number < eighthPoints; number++) {
188
189 input1 = _mm256_loadu_ps(inputPtr);
190
191 input1 = _mm256_mul_ps(input1, vecScalar);
192
193 _mm256_storeu_ps(inputPtr, input1);
194
195 inputPtr += 8;
196 }
197
198 number = eighthPoints * 8;
199 for (; number < num_points; number++) {
200 *inputPtr *= invScalar;
201 inputPtr++;
202 }
203}
204#endif /* LV_HAVE_AVX */
205
206#ifdef LV_HAVE_NEON
207#include <arm_neon.h>
208
209static inline void volk_32f_s32f_normalize_neon(float* vecBuffer,
210 const float scalar,
211 unsigned int num_points)
212{
213 unsigned int number = 0;
214 float* inputPtr = vecBuffer;
215 const float invScalar = 1.0f / scalar;
216 float32x4_t vInvScalar = vdupq_n_f32(invScalar);
217 const unsigned int quarter_points = num_points / 4;
218
219 for (; number < quarter_points; number++) {
220 float32x4_t input = vld1q_f32(inputPtr);
221 input = vmulq_f32(input, vInvScalar);
222 vst1q_f32(inputPtr, input);
223 inputPtr += 4;
224 }
225
226 number = quarter_points * 4;
227 for (; number < num_points; number++) {
228 *inputPtr++ *= invScalar;
229 }
230}
231#endif /* LV_HAVE_NEON */
232
233#ifdef LV_HAVE_NEONV8
234#include <arm_neon.h>
235
236static inline void volk_32f_s32f_normalize_neonv8(float* vecBuffer,
237 const float scalar,
238 unsigned int num_points)
239{
240 unsigned int number = 0;
241 float* inputPtr = vecBuffer;
242 const float invScalar = 1.0f / scalar;
243 float32x4_t vInvScalar = vdupq_n_f32(invScalar);
244 const unsigned int eighth_points = num_points / 8;
245
246 for (; number < eighth_points; number++) {
247 float32x4_t input0 = vld1q_f32(inputPtr);
248 float32x4_t input1 = vld1q_f32(inputPtr + 4);
249 __VOLK_PREFETCH(inputPtr + 8);
250
251 input0 = vmulq_f32(input0, vInvScalar);
252 input1 = vmulq_f32(input1, vInvScalar);
253
254 vst1q_f32(inputPtr, input0);
255 vst1q_f32(inputPtr + 4, input1);
256 inputPtr += 8;
257 }
258
259 number = eighth_points * 8;
260 for (; number < num_points; number++) {
261 *inputPtr++ *= invScalar;
262 }
263}
264#endif /* LV_HAVE_NEONV8 */
265
266#ifdef LV_HAVE_RVV
267#include <riscv_vector.h>
268
269static inline void
270volk_32f_s32f_normalize_rvv(float* vecBuffer, const float scalar, unsigned int num_points)
271{
272 size_t n = num_points;
273 for (size_t vl; n > 0; n -= vl, vecBuffer += vl) {
274 vl = __riscv_vsetvl_e32m8(n);
275 vfloat32m8_t v = __riscv_vle32_v_f32m8(vecBuffer, vl);
276 __riscv_vse32(vecBuffer, __riscv_vfmul(v, 1.0f / scalar, vl), vl);
277 }
278}
279#endif /*LV_HAVE_RVV*/
280
281#endif /* INCLUDED_volk_32f_s32f_normalize_u_H */