VCV Rack API v2
Loading...
Searching...
No Matches
sse_mathfun.h
Go to the documentation of this file.
1/* Modified version of http://gruntthepeon.free.fr/ssemath/ for VCV Rack.
2
3The following changes were made.
4- Remove typedefs for __m128 to avoid type pollution, and because they're not that ugly.
5- Make all functions inline since this is a header file.
6- Remove non-SSE2 code, since Rack assumes SSE2 CPUs.
7- Move `const static` variables to function variables for clarity. See https://stackoverflow.com/a/52139901/272642 for explanation of why the performance is not worse.
8- Change header file to <pmmintrin.h> since we're using SSE2 intrinsics.
9- Prefix functions with `sse_mathfun_`.
10- Add floor, ceil, fmod.
11
12This derived source file is released under the zlib license.
13*/
14
15/* SIMD (SSE1+MMX or SSE2) implementation of sin, cos, exp and log
16
17 Inspired by Intel Approximate Math library, and based on the
18 corresponding algorithms of the cephes math library
19
20 The default is to use the SSE1 version. If you define USE_SSE2 the
21 the SSE2 intrinsics will be used in place of the MMX intrinsics. Do
22 not expect any significant performance improvement with SSE2.
23*/
24
25/* Copyright (C) 2007 Julien Pommier
26
27 This software is provided 'as-is', without any express or implied
28 warranty. In no event will the authors be held liable for any damages
29 arising from the use of this software.
30
31 Permission is granted to anyone to use this software for any purpose,
32 including commercial applications, and to alter it and redistribute it
33 freely, subject to the following restrictions:
34
35 1. The origin of this software must not be misrepresented; you must not
36 claim that you wrote the original software. If you use this software
37 in a product, an acknowledgment in the product documentation would be
38 appreciated but is not required.
39 2. Altered source versions must be plainly marked as such, and must not be
40 misrepresented as being the original software.
41 3. This notice may not be removed or altered from any source distribution.
42
43 (this is the zlib license)
44*/
45#pragma once
46#include "common.hpp"
47
48
50inline __m128 sse_mathfun_one_ps() {
51 __m128i zeros = _mm_setzero_si128();
52 __m128i ones = _mm_cmpeq_epi32(zeros, zeros);
53 __m128i a = _mm_slli_epi32(_mm_srli_epi32(ones, 25), 23);
54 return _mm_castsi128_ps(a);
55}
56
57
58inline __m128 sse_mathfun_log_ps(__m128 x) {
59 __m128i emm0;
60 __m128 one = _mm_set_ps1(1.0);
61
62 __m128 invalid_mask = _mm_cmple_ps(x, _mm_setzero_ps());
63
64 /* the smallest non denormalized float number */
65 x = _mm_max_ps(x, _mm_castsi128_ps(_mm_set1_epi32(0x00800000))); /* cut off denormalized stuff */
66
67 emm0 = _mm_srli_epi32(_mm_castps_si128(x), 23);
68 /* keep only the fractional part */
69 x = _mm_and_ps(x, _mm_castsi128_ps(_mm_set1_epi32(~0x7f800000)));
70 x = _mm_or_ps(x, _mm_set_ps1(0.5));
71
72 emm0 = _mm_sub_epi32(emm0, _mm_set1_epi32(0x7f));
73 __m128 e = _mm_cvtepi32_ps(emm0);
74
75 e = _mm_add_ps(e, one);
76
77 /* part2:
78 if( x < SQRTHF ) {
79 e -= 1;
80 x = x + x - 1.0;
81 } else { x = x - 1.0; }
82 */
83 __m128 mask = _mm_cmplt_ps(x, _mm_set_ps1(0.707106781186547524));
84 __m128 tmp = _mm_and_ps(x, mask);
85 x = _mm_sub_ps(x, one);
86 e = _mm_sub_ps(e, _mm_and_ps(one, mask));
87 x = _mm_add_ps(x, tmp);
88
89
90 __m128 z = _mm_mul_ps(x, x);
91
92 __m128 y = _mm_set_ps1(7.0376836292E-2);
93 y = _mm_mul_ps(y, x);
94 y = _mm_add_ps(y, _mm_set_ps1(-1.1514610310E-1));
95 y = _mm_mul_ps(y, x);
96 y = _mm_add_ps(y, _mm_set_ps1(1.1676998740E-1));
97 y = _mm_mul_ps(y, x);
98 y = _mm_add_ps(y, _mm_set_ps1(-1.2420140846E-1));
99 y = _mm_mul_ps(y, x);
100 y = _mm_add_ps(y, _mm_set_ps1(1.4249322787E-1));
101 y = _mm_mul_ps(y, x);
102 y = _mm_add_ps(y, _mm_set_ps1(-1.6668057665E-1));
103 y = _mm_mul_ps(y, x);
104 y = _mm_add_ps(y, _mm_set_ps1(2.0000714765E-1));
105 y = _mm_mul_ps(y, x);
106 y = _mm_add_ps(y, _mm_set_ps1(-2.4999993993E-1));
107 y = _mm_mul_ps(y, x);
108 y = _mm_add_ps(y, _mm_set_ps1(3.3333331174E-1));
109 y = _mm_mul_ps(y, x);
110
111 y = _mm_mul_ps(y, z);
112
113
114 tmp = _mm_mul_ps(e, _mm_set_ps1(-2.12194440e-4));
115 y = _mm_add_ps(y, tmp);
116
117
118 tmp = _mm_mul_ps(z, _mm_set_ps1(0.5));
119 y = _mm_sub_ps(y, tmp);
120
121 tmp = _mm_mul_ps(e, _mm_set_ps1(0.693359375));
122 x = _mm_add_ps(x, y);
123 x = _mm_add_ps(x, tmp);
124 x = _mm_or_ps(x, invalid_mask); // negative arg will be NAN
125 return x;
126}
127
128
129inline __m128 sse_mathfun_exp_ps(__m128 x) {
130 __m128 tmp = _mm_setzero_ps(), fx;
131 __m128i emm0;
132 __m128 one = _mm_set_ps1(1.0);
133
134 x = _mm_min_ps(x, _mm_set_ps1(88.3762626647949f));
135 x = _mm_max_ps(x, _mm_set_ps1(-88.3762626647949f));
136
137 /* express exp(x) as exp(g + n*log(2)) */
138 fx = _mm_mul_ps(x, _mm_set_ps1(1.44269504088896341));
139 fx = _mm_add_ps(fx, _mm_set_ps1(0.5));
140
141 /* how to perform a floorf with SSE: just below */
142 emm0 = _mm_cvttps_epi32(fx);
143 tmp = _mm_cvtepi32_ps(emm0);
144 /* if greater, substract 1 */
145 __m128 mask = _mm_cmpgt_ps(tmp, fx);
146 mask = _mm_and_ps(mask, one);
147 fx = _mm_sub_ps(tmp, mask);
148
149 tmp = _mm_mul_ps(fx, _mm_set_ps1(0.693359375));
150 __m128 z = _mm_mul_ps(fx, _mm_set_ps1(-2.12194440e-4));
151 x = _mm_sub_ps(x, tmp);
152 x = _mm_sub_ps(x, z);
153
154 z = _mm_mul_ps(x, x);
155
156 __m128 y = _mm_set_ps1(1.9875691500E-4);
157 y = _mm_mul_ps(y, x);
158 y = _mm_add_ps(y, _mm_set_ps1(1.3981999507E-3));
159 y = _mm_mul_ps(y, x);
160 y = _mm_add_ps(y, _mm_set_ps1(8.3334519073E-3));
161 y = _mm_mul_ps(y, x);
162 y = _mm_add_ps(y, _mm_set_ps1(4.1665795894E-2));
163 y = _mm_mul_ps(y, x);
164 y = _mm_add_ps(y, _mm_set_ps1(1.6666665459E-1));
165 y = _mm_mul_ps(y, x);
166 y = _mm_add_ps(y, _mm_set_ps1(5.0000001201E-1));
167 y = _mm_mul_ps(y, z);
168 y = _mm_add_ps(y, x);
169 y = _mm_add_ps(y, one);
170
171 /* build 2^n */
172 emm0 = _mm_cvttps_epi32(fx);
173 emm0 = _mm_add_epi32(emm0, _mm_set1_epi32(0x7f));
174 emm0 = _mm_slli_epi32(emm0, 23);
175 __m128 pow2n = _mm_castsi128_ps(emm0);
176 y = _mm_mul_ps(y, pow2n);
177 return y;
178}
179
180
181/* evaluation of 4 sines at onces, using only SSE1+MMX intrinsics so
182 it runs also on old athlons XPs and the pentium III of your grand
183 mother.
184
185 The code is the exact rewriting of the cephes sinf function.
186 Precision is excellent as long as x < 8192 (I did not bother to
187 take into account the special handling they have for greater values
188 -- it does not return garbage for arguments over 8192, though, but
189 the extra precision is missing).
190
191 Note that it is such that sinf((float)M_PI) = 8.74e-8, which is the
192 surprising but correct result.
193
194 Performance is also surprisingly good, 1.33 times faster than the
195 macos vsinf SSE2 function, and 1.5 times faster than the
196 __vrs4_sinf of amd's ACML (which is only available in 64 bits). Not
197 too bad for an SSE1 function (with no special tuning) !
198 However the latter libraries probably have a much better handling of NaN,
199 Inf, denormalized and other special arguments..
200
201 On my core 1 duo, the execution of this function takes approximately 95 cycles.
202
203 From what I have observed on the experiments with Intel AMath lib, switching to an
204 SSE2 version would improve the perf by only 10%.
205
206 Since it is based on SSE intrinsics, it has to be compiled at -O2 to
207 deliver full speed.
208*/
209inline __m128 sse_mathfun_sin_ps(__m128 x) { // any x
210 __m128 xmm1, xmm2 = _mm_setzero_ps(), xmm3, sign_bit, y;
211
212 __m128i emm0, emm2;
213 sign_bit = x;
214 /* take the absolute value */
215 const __m128 inv_sign_mask = _mm_castsi128_ps(_mm_set1_epi32(~0x80000000));
216 x = _mm_and_ps(x, inv_sign_mask);
217 /* extract the sign bit (upper one) */
218 const __m128 sign_mask = _mm_castsi128_ps(_mm_set1_epi32(0x80000000));
219 sign_bit = _mm_and_ps(sign_bit, sign_mask);
220
221 /* scale by 4/Pi */
222 const __m128 cephes_FOPI = _mm_set_ps1(1.27323954473516); // 4 / M_PI
223 y = _mm_mul_ps(x, cephes_FOPI);
224
225 /* store the integer part of y in mm0 */
226 emm2 = _mm_cvttps_epi32(y);
227 /* j=(j+1) & (~1) (see the cephes sources) */
228 emm2 = _mm_add_epi32(emm2, _mm_set1_epi32(1));
229 emm2 = _mm_and_si128(emm2, _mm_set1_epi32(~1));
230 y = _mm_cvtepi32_ps(emm2);
231
232 /* get the swap sign flag */
233 emm0 = _mm_and_si128(emm2, _mm_set1_epi32(4));
234 emm0 = _mm_slli_epi32(emm0, 29);
235 /* get the polynom selection mask
236 there is one polynom for 0 <= x <= Pi/4
237 and another one for Pi/4<x<=Pi/2
238
239 Both branches will be computed.
240 */
241 emm2 = _mm_and_si128(emm2, _mm_set1_epi32(2));
242 emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
243
244 __m128 swap_sign_bit = _mm_castsi128_ps(emm0);
245 __m128 poly_mask = _mm_castsi128_ps(emm2);
246 sign_bit = _mm_xor_ps(sign_bit, swap_sign_bit);
247
248 /* The magic pass: "Extended precision modular arithmetic"
249 x = ((x - y * DP1) - y * DP2) - y * DP3; */
250 xmm1 = _mm_set_ps1(-0.78515625);
251 xmm2 = _mm_set_ps1(-2.4187564849853515625e-4);
252 xmm3 = _mm_set_ps1(-3.77489497744594108e-8);
253 xmm1 = _mm_mul_ps(y, xmm1);
254 xmm2 = _mm_mul_ps(y, xmm2);
255 xmm3 = _mm_mul_ps(y, xmm3);
256 x = _mm_add_ps(x, xmm1);
257 x = _mm_add_ps(x, xmm2);
258 x = _mm_add_ps(x, xmm3);
259
260 /* Evaluate the first polynom (0 <= x <= Pi/4) */
261 y = _mm_set_ps1(2.443315711809948E-005);
262 __m128 z = _mm_mul_ps(x, x);
263
264 y = _mm_mul_ps(y, z);
265 y = _mm_add_ps(y, _mm_set_ps1(-1.388731625493765E-003));
266 y = _mm_mul_ps(y, z);
267 y = _mm_add_ps(y, _mm_set_ps1(4.166664568298827E-002));
268 y = _mm_mul_ps(y, z);
269 y = _mm_mul_ps(y, z);
270 __m128 tmp = _mm_mul_ps(z, _mm_set_ps1(0.5));
271 y = _mm_sub_ps(y, tmp);
272 y = _mm_add_ps(y, _mm_set_ps1(1.0));
273
274 /* Evaluate the second polynom (Pi/4 <= x <= 0) */
275
276 __m128 y2 = _mm_set_ps1(-1.9515295891E-4);
277 y2 = _mm_mul_ps(y2, z);
278 y2 = _mm_add_ps(y2, _mm_set_ps1(8.3321608736E-3));
279 y2 = _mm_mul_ps(y2, z);
280 y2 = _mm_add_ps(y2, _mm_set_ps1(-1.6666654611E-1));
281 y2 = _mm_mul_ps(y2, z);
282 y2 = _mm_mul_ps(y2, x);
283 y2 = _mm_add_ps(y2, x);
284
285 /* select the correct result from the two polynoms */
286 xmm3 = poly_mask;
287 y2 = _mm_and_ps(xmm3, y2); //, xmm3);
288 y = _mm_andnot_ps(xmm3, y);
289 y = _mm_add_ps(y, y2);
290 /* update the sign */
291 y = _mm_xor_ps(y, sign_bit);
292 return y;
293}
294
295
296/* almost the same as sin_ps */
297inline __m128 sse_mathfun_cos_ps(__m128 x) { // any x
298 __m128 xmm1, xmm2 = _mm_setzero_ps(), xmm3, y;
299 __m128i emm0, emm2;
300 /* take the absolute value */
301 const __m128 inv_sign_mask = _mm_castsi128_ps(_mm_set1_epi32(~0x80000000));
302 x = _mm_and_ps(x, inv_sign_mask);
303
304 /* scale by 4/Pi */
305 const __m128 cephes_FOPI = _mm_set_ps1(1.27323954473516); // 4 / M_PI
306 y = _mm_mul_ps(x, cephes_FOPI);
307
308 /* store the integer part of y in mm0 */
309 emm2 = _mm_cvttps_epi32(y);
310 /* j=(j+1) & (~1) (see the cephes sources) */
311 emm2 = _mm_add_epi32(emm2, _mm_set1_epi32(1));
312 emm2 = _mm_and_si128(emm2, _mm_set1_epi32(~1));
313 y = _mm_cvtepi32_ps(emm2);
314
315 emm2 = _mm_sub_epi32(emm2, _mm_set1_epi32(2));
316
317 /* get the swap sign flag */
318 emm0 = _mm_andnot_si128(emm2, _mm_set1_epi32(4));
319 emm0 = _mm_slli_epi32(emm0, 29);
320 /* get the polynom selection mask */
321 emm2 = _mm_and_si128(emm2, _mm_set1_epi32(2));
322 emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
323
324 __m128 sign_bit = _mm_castsi128_ps(emm0);
325 __m128 poly_mask = _mm_castsi128_ps(emm2);
326 /* The magic pass: "Extended precision modular arithmetic"
327 x = ((x - y * DP1) - y * DP2) - y * DP3; */
328 xmm1 = _mm_set_ps1(-0.78515625);
329 xmm2 = _mm_set_ps1(-2.4187564849853515625e-4);
330 xmm3 = _mm_set_ps1(-3.77489497744594108e-8);
331 xmm1 = _mm_mul_ps(y, xmm1);
332 xmm2 = _mm_mul_ps(y, xmm2);
333 xmm3 = _mm_mul_ps(y, xmm3);
334 x = _mm_add_ps(x, xmm1);
335 x = _mm_add_ps(x, xmm2);
336 x = _mm_add_ps(x, xmm3);
337
338 /* Evaluate the first polynom (0 <= x <= Pi/4) */
339 y = _mm_set_ps1(2.443315711809948E-005);
340 __m128 z = _mm_mul_ps(x, x);
341
342 y = _mm_mul_ps(y, z);
343 y = _mm_add_ps(y, _mm_set_ps1(-1.388731625493765E-003));
344 y = _mm_mul_ps(y, z);
345 y = _mm_add_ps(y, _mm_set_ps1(4.166664568298827E-002));
346 y = _mm_mul_ps(y, z);
347 y = _mm_mul_ps(y, z);
348 __m128 tmp = _mm_mul_ps(z, _mm_set_ps1(0.5));
349 y = _mm_sub_ps(y, tmp);
350 y = _mm_add_ps(y, _mm_set_ps1(1.0));
351
352 /* Evaluate the second polynom (Pi/4 <= x <= 0) */
353
354 __m128 y2 = _mm_set_ps1(-1.9515295891E-4);
355 y2 = _mm_mul_ps(y2, z);
356 y2 = _mm_add_ps(y2, _mm_set_ps1(8.3321608736E-3));
357 y2 = _mm_mul_ps(y2, z);
358 y2 = _mm_add_ps(y2, _mm_set_ps1(-1.6666654611E-1));
359 y2 = _mm_mul_ps(y2, z);
360 y2 = _mm_mul_ps(y2, x);
361 y2 = _mm_add_ps(y2, x);
362
363 /* select the correct result from the two polynoms */
364 xmm3 = poly_mask;
365 y2 = _mm_and_ps(xmm3, y2); //, xmm3);
366 y = _mm_andnot_ps(xmm3, y);
367 y = _mm_add_ps(y, y2);
368 /* update the sign */
369 y = _mm_xor_ps(y, sign_bit);
370
371 return y;
372}
373
374
375/* since sin_ps and cos_ps are almost identical, sincos_ps could replace both of them..
376 it is almost as fast, and gives you a free cosine with your sine */
377inline void sse_mathfun_sincos_ps(__m128 x, __m128* s, __m128* c) {
378 __m128 xmm1, xmm2, xmm3 = _mm_setzero_ps(), sign_bit_sin, y;
379 __m128i emm0, emm2, emm4;
380 sign_bit_sin = x;
381 /* take the absolute value */
382 const __m128 inv_sign_mask = _mm_castsi128_ps(_mm_set1_epi32(~0x80000000));
383 x = _mm_and_ps(x, inv_sign_mask);
384 /* extract the sign bit (upper one) */
385 const __m128 sign_mask = _mm_castsi128_ps(_mm_set1_epi32(0x80000000));
386 sign_bit_sin = _mm_and_ps(sign_bit_sin, sign_mask);
387
388 /* scale by 4/Pi */
389 const __m128 cephes_FOPI = _mm_set_ps1(1.27323954473516); // 4 / M_PI
390 y = _mm_mul_ps(x, cephes_FOPI);
391
392 /* store the integer part of y in emm2 */
393 emm2 = _mm_cvttps_epi32(y);
394
395 /* j=(j+1) & (~1) (see the cephes sources) */
396 emm2 = _mm_add_epi32(emm2, _mm_set1_epi32(1));
397 emm2 = _mm_and_si128(emm2, _mm_set1_epi32(~1));
398 y = _mm_cvtepi32_ps(emm2);
399
400 emm4 = emm2;
401
402 /* get the swap sign flag for the sine */
403 emm0 = _mm_and_si128(emm2, _mm_set1_epi32(4));
404 emm0 = _mm_slli_epi32(emm0, 29);
405 __m128 swap_sign_bit_sin = _mm_castsi128_ps(emm0);
406
407 /* get the polynom selection mask for the sine*/
408 emm2 = _mm_and_si128(emm2, _mm_set1_epi32(2));
409 emm2 = _mm_cmpeq_epi32(emm2, _mm_setzero_si128());
410 __m128 poly_mask = _mm_castsi128_ps(emm2);
411
412 /* The magic pass: "Extended precision modular arithmetic"
413 x = ((x - y * DP1) - y * DP2) - y * DP3; */
414 xmm1 = _mm_set_ps1(-0.78515625);
415 xmm2 = _mm_set_ps1(-2.4187564849853515625e-4);
416 xmm3 = _mm_set_ps1(-3.77489497744594108e-8);
417 xmm1 = _mm_mul_ps(y, xmm1);
418 xmm2 = _mm_mul_ps(y, xmm2);
419 xmm3 = _mm_mul_ps(y, xmm3);
420 x = _mm_add_ps(x, xmm1);
421 x = _mm_add_ps(x, xmm2);
422 x = _mm_add_ps(x, xmm3);
423
424 emm4 = _mm_sub_epi32(emm4, _mm_set1_epi32(2));
425 emm4 = _mm_andnot_si128(emm4, _mm_set1_epi32(4));
426 emm4 = _mm_slli_epi32(emm4, 29);
427 __m128 sign_bit_cos = _mm_castsi128_ps(emm4);
428
429 sign_bit_sin = _mm_xor_ps(sign_bit_sin, swap_sign_bit_sin);
430
431
432 /* Evaluate the first polynom (0 <= x <= Pi/4) */
433 __m128 z = _mm_mul_ps(x, x);
434 y = _mm_set_ps1(2.443315711809948E-005);
435
436 y = _mm_mul_ps(y, z);
437 y = _mm_add_ps(y, _mm_set_ps1(-1.388731625493765E-003));
438 y = _mm_mul_ps(y, z);
439 y = _mm_add_ps(y, _mm_set_ps1(4.166664568298827E-002));
440 y = _mm_mul_ps(y, z);
441 y = _mm_mul_ps(y, z);
442 __m128 tmp = _mm_mul_ps(z, _mm_set_ps1(0.5));
443 y = _mm_sub_ps(y, tmp);
444 y = _mm_add_ps(y, _mm_set_ps1(1.0));
445
446 /* Evaluate the second polynom (Pi/4 <= x <= 0) */
447
448 __m128 y2 = _mm_set_ps1(-1.9515295891E-4);
449 y2 = _mm_mul_ps(y2, z);
450 y2 = _mm_add_ps(y2, _mm_set_ps1(8.3321608736E-3));
451 y2 = _mm_mul_ps(y2, z);
452 y2 = _mm_add_ps(y2, _mm_set_ps1(-1.6666654611E-1));
453 y2 = _mm_mul_ps(y2, z);
454 y2 = _mm_mul_ps(y2, x);
455 y2 = _mm_add_ps(y2, x);
456
457 /* select the correct result from the two polynoms */
458 xmm3 = poly_mask;
459 __m128 ysin2 = _mm_and_ps(xmm3, y2);
460 __m128 ysin1 = _mm_andnot_ps(xmm3, y);
461 y2 = _mm_sub_ps(y2, ysin2);
462 y = _mm_sub_ps(y, ysin1);
463
464 xmm1 = _mm_add_ps(ysin1, ysin2);
465 xmm2 = _mm_add_ps(y, y2);
466
467 /* update the sign */
468 *s = _mm_xor_ps(xmm1, sign_bit_sin);
469 *c = _mm_xor_ps(xmm2, sign_bit_cos);
470}
__m128 sse_mathfun_cos_ps(__m128 x)
Definition sse_mathfun.h:297
__m128 sse_mathfun_log_ps(__m128 x)
Definition sse_mathfun.h:58
void sse_mathfun_sincos_ps(__m128 x, __m128 *s, __m128 *c)
Definition sse_mathfun.h:377
__m128 sse_mathfun_one_ps()
Generate 1.f without accessing memory.
Definition sse_mathfun.h:50
__m128 sse_mathfun_exp_ps(__m128 x)
Definition sse_mathfun.h:129
__m128 sse_mathfun_sin_ps(__m128 x)
Definition sse_mathfun.h:209