Vector Optimized Library of Kernels 2.5.1
Architecture-tuned implementations of math kernels
 
Loading...
Searching...
No Matches
volk_16i_permute_and_scalar_add.h
Go to the documentation of this file.
1/* -*- c++ -*- */
2/*
3 * Copyright 2012, 2014 Free Software Foundation, Inc.
4 *
5 * This file is part of GNU Radio
6 *
7 * GNU Radio is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 3, or (at your option)
10 * any later version.
11 *
12 * GNU Radio is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with GNU Radio; see the file COPYING. If not, write to
19 * the Free Software Foundation, Inc., 51 Franklin Street,
20 * Boston, MA 02110-1301, USA.
21 */
22
59#ifndef INCLUDED_volk_16i_permute_and_scalar_add_a_H
60#define INCLUDED_volk_16i_permute_and_scalar_add_a_H
61
62#include <inttypes.h>
63#include <stdio.h>
64
65#ifdef LV_HAVE_SSE2
66
67#include <emmintrin.h>
68#include <xmmintrin.h>
69
70static inline void volk_16i_permute_and_scalar_add_a_sse2(short* target,
71 short* src0,
72 short* permute_indexes,
73 short* cntl0,
74 short* cntl1,
75 short* cntl2,
76 short* cntl3,
77 short* scalars,
78 unsigned int num_points)
79{
80
81 const unsigned int num_bytes = num_points * 2;
82
83 __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
84
85 __m128i *p_target, *p_cntl0, *p_cntl1, *p_cntl2, *p_cntl3, *p_scalars;
86
87 short* p_permute_indexes = permute_indexes;
88
89 p_target = (__m128i*)target;
90 p_cntl0 = (__m128i*)cntl0;
91 p_cntl1 = (__m128i*)cntl1;
92 p_cntl2 = (__m128i*)cntl2;
93 p_cntl3 = (__m128i*)cntl3;
94 p_scalars = (__m128i*)scalars;
95
96 int i = 0;
97
98 int bound = (num_bytes >> 4);
99 int leftovers = (num_bytes >> 1) & 7;
100
101 xmm0 = _mm_load_si128(p_scalars);
102
103 xmm1 = _mm_shufflelo_epi16(xmm0, 0);
104 xmm2 = _mm_shufflelo_epi16(xmm0, 0x55);
105 xmm3 = _mm_shufflelo_epi16(xmm0, 0xaa);
106 xmm4 = _mm_shufflelo_epi16(xmm0, 0xff);
107
108 xmm1 = _mm_shuffle_epi32(xmm1, 0x00);
109 xmm2 = _mm_shuffle_epi32(xmm2, 0x00);
110 xmm3 = _mm_shuffle_epi32(xmm3, 0x00);
111 xmm4 = _mm_shuffle_epi32(xmm4, 0x00);
112
113
114 for (; i < bound; ++i) {
115 xmm0 = _mm_setzero_si128();
116 xmm5 = _mm_setzero_si128();
117 xmm6 = _mm_setzero_si128();
118 xmm7 = _mm_setzero_si128();
119
120 xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[0]], 0);
121 xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[1]], 1);
122 xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[2]], 2);
123 xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[3]], 3);
124 xmm0 = _mm_insert_epi16(xmm0, src0[p_permute_indexes[4]], 4);
125 xmm5 = _mm_insert_epi16(xmm5, src0[p_permute_indexes[5]], 5);
126 xmm6 = _mm_insert_epi16(xmm6, src0[p_permute_indexes[6]], 6);
127 xmm7 = _mm_insert_epi16(xmm7, src0[p_permute_indexes[7]], 7);
128
129 xmm0 = _mm_add_epi16(xmm0, xmm5);
130 xmm6 = _mm_add_epi16(xmm6, xmm7);
131
132 p_permute_indexes += 8;
133
134 xmm0 = _mm_add_epi16(xmm0, xmm6);
135
136 xmm5 = _mm_load_si128(p_cntl0);
137 xmm6 = _mm_load_si128(p_cntl1);
138 xmm7 = _mm_load_si128(p_cntl2);
139
140 xmm5 = _mm_and_si128(xmm5, xmm1);
141 xmm6 = _mm_and_si128(xmm6, xmm2);
142 xmm7 = _mm_and_si128(xmm7, xmm3);
143
144 xmm0 = _mm_add_epi16(xmm0, xmm5);
145
146 xmm5 = _mm_load_si128(p_cntl3);
147
148 xmm6 = _mm_add_epi16(xmm6, xmm7);
149
150 p_cntl0 += 1;
151
152 xmm5 = _mm_and_si128(xmm5, xmm4);
153
154 xmm0 = _mm_add_epi16(xmm0, xmm6);
155
156 p_cntl1 += 1;
157 p_cntl2 += 1;
158
159 xmm0 = _mm_add_epi16(xmm0, xmm5);
160
161 p_cntl3 += 1;
162
163 _mm_store_si128(p_target, xmm0);
164
165 p_target += 1;
166 }
167
168 for (i = bound * 8; i < (bound * 8) + leftovers; ++i) {
169 target[i] = src0[permute_indexes[i]] + (cntl0[i] & scalars[0]) +
170 (cntl1[i] & scalars[1]) + (cntl2[i] & scalars[2]) +
171 (cntl3[i] & scalars[3]);
172 }
173}
174#endif /*LV_HAVE_SSE*/
175
176
177#ifdef LV_HAVE_GENERIC
178static inline void volk_16i_permute_and_scalar_add_generic(short* target,
179 short* src0,
180 short* permute_indexes,
181 short* cntl0,
182 short* cntl1,
183 short* cntl2,
184 short* cntl3,
185 short* scalars,
186 unsigned int num_points)
187{
188 const unsigned int num_bytes = num_points * 2;
189
190 int i = 0;
191
192 int bound = num_bytes >> 1;
193
194 for (i = 0; i < bound; ++i) {
195 target[i] = src0[permute_indexes[i]] + (cntl0[i] & scalars[0]) +
196 (cntl1[i] & scalars[1]) + (cntl2[i] & scalars[2]) +
197 (cntl3[i] & scalars[3]);
198 }
199}
200
201#endif /*LV_HAVE_GENERIC*/
202
203#endif /*INCLUDED_volk_16i_permute_and_scalar_add_a_H*/
static void volk_16i_permute_and_scalar_add_a_sse2(short *target, short *src0, short *permute_indexes, short *cntl0, short *cntl1, short *cntl2, short *cntl3, short *scalars, unsigned int num_points)
Definition: volk_16i_permute_and_scalar_add.h:70
static void volk_16i_permute_and_scalar_add_generic(short *target, short *src0, short *permute_indexes, short *cntl0, short *cntl1, short *cntl2, short *cntl3, short *scalars, unsigned int num_points)
Definition: volk_16i_permute_and_scalar_add.h:178
for i
Definition: volk_config_fixed.tmpl.h:25