Qt
Internal/Contributor docs for the Qt SDK. <b>Note:</b> These are NOT official API docs; those are found <a href='https://doc.qt.io/'>here</a>.
Loading...
Searching...
No Matches
qdrawhelper_sse4.cpp
Go to the documentation of this file.
1// Copyright (C) 2016 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include <private/qdrawhelper_p.h>
5#include <private/qdrawingprimitive_sse2_p.h>
6#include <private/qpaintengine_raster_p.h>
7#include <private/qpixellayout_p.h>
8
9#if defined(QT_COMPILER_SUPPORTS_SSE4_1)
10
12
13#ifndef __haswell__
14template<bool RGBA>
15static void convertARGBToARGB32PM_sse4(uint *buffer, const uint *src, int count)
16{
17 int i = 0;
18 const __m128i alphaMask = _mm_set1_epi32(0xff000000);
19 const __m128i rgbaMask = _mm_setr_epi8(2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15);
20 const __m128i shuffleMask = _mm_setr_epi8(6, 7, 6, 7, 6, 7, 6, 7, 14, 15, 14, 15, 14, 15, 14, 15);
21 const __m128i half = _mm_set1_epi16(0x0080);
22 const __m128i zero = _mm_setzero_si128();
23
24 for (; i < count - 3; i += 4) {
25 __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[i]);
26 if (!_mm_testz_si128(srcVector, alphaMask)) {
27 if (!_mm_testc_si128(srcVector, alphaMask)) {
28 if (RGBA)
29 srcVector = _mm_shuffle_epi8(srcVector, rgbaMask);
30 __m128i src1 = _mm_unpacklo_epi8(srcVector, zero);
31 __m128i src2 = _mm_unpackhi_epi8(srcVector, zero);
32 __m128i alpha1 = _mm_shuffle_epi8(src1, shuffleMask);
33 __m128i alpha2 = _mm_shuffle_epi8(src2, shuffleMask);
34 src1 = _mm_mullo_epi16(src1, alpha1);
35 src2 = _mm_mullo_epi16(src2, alpha2);
36 src1 = _mm_add_epi16(src1, _mm_srli_epi16(src1, 8));
37 src2 = _mm_add_epi16(src2, _mm_srli_epi16(src2, 8));
38 src1 = _mm_add_epi16(src1, half);
39 src2 = _mm_add_epi16(src2, half);
40 src1 = _mm_srli_epi16(src1, 8);
41 src2 = _mm_srli_epi16(src2, 8);
42 src1 = _mm_blend_epi16(src1, alpha1, 0x88);
43 src2 = _mm_blend_epi16(src2, alpha2, 0x88);
44 srcVector = _mm_packus_epi16(src1, src2);
45 _mm_storeu_si128((__m128i *)&buffer[i], srcVector);
46 } else {
47 if (RGBA)
48 _mm_storeu_si128((__m128i *)&buffer[i], _mm_shuffle_epi8(srcVector, rgbaMask));
49 else if (buffer != src)
50 _mm_storeu_si128((__m128i *)&buffer[i], srcVector);
51 }
52 } else {
53 _mm_storeu_si128((__m128i *)&buffer[i], zero);
54 }
55 }
56
57 SIMD_EPILOGUE(i, count, 3) {
59 buffer[i] = RGBA ? RGBA2ARGB(v) : v;
60 }
61}
62
63template<bool RGBA>
64static void convertARGBToRGBA64PM_sse4(QRgba64 *buffer, const uint *src, int count)
65{
66 int i = 0;
67 const __m128i alphaMask = _mm_set1_epi32(0xff000000);
68 const __m128i rgbaMask = _mm_setr_epi8(2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15);
69 const __m128i shuffleMask = _mm_setr_epi8(6, 7, 6, 7, 6, 7, 6, 7, 14, 15, 14, 15, 14, 15, 14, 15);
70 const __m128i zero = _mm_setzero_si128();
71
72 for (; i < count - 3; i += 4) {
73 __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[i]);
74 if (!_mm_testz_si128(srcVector, alphaMask)) {
75 bool cf = _mm_testc_si128(srcVector, alphaMask);
76
77 if (!RGBA)
78 srcVector = _mm_shuffle_epi8(srcVector, rgbaMask);
79 const __m128i src1 = _mm_unpacklo_epi8(srcVector, srcVector);
80 const __m128i src2 = _mm_unpackhi_epi8(srcVector, srcVector);
81 if (!cf) {
82 __m128i alpha1 = _mm_shuffle_epi8(src1, shuffleMask);
83 __m128i alpha2 = _mm_shuffle_epi8(src2, shuffleMask);
84 __m128i dst1 = _mm_mulhi_epu16(src1, alpha1);
85 __m128i dst2 = _mm_mulhi_epu16(src2, alpha2);
86 // Map 0->0xfffe to 0->0xffff
87 dst1 = _mm_add_epi16(dst1, _mm_srli_epi16(dst1, 15));
88 dst2 = _mm_add_epi16(dst2, _mm_srli_epi16(dst2, 15));
89 // correct alpha value:
90 dst1 = _mm_blend_epi16(dst1, src1, 0x88);
91 dst2 = _mm_blend_epi16(dst2, src2, 0x88);
92 _mm_storeu_si128((__m128i *)&buffer[i], dst1);
93 _mm_storeu_si128((__m128i *)&buffer[i + 2], dst2);
94 } else {
95 _mm_storeu_si128((__m128i *)&buffer[i], src1);
96 _mm_storeu_si128((__m128i *)&buffer[i + 2], src2);
97 }
98 } else {
99 _mm_storeu_si128((__m128i *)&buffer[i], zero);
100 _mm_storeu_si128((__m128i *)&buffer[i + 2], zero);
101 }
102 }
103
104 SIMD_EPILOGUE(i, count, 3) {
105 const uint s = RGBA ? RGBA2ARGB(src[i]) : src[i];
106 buffer[i] = QRgba64::fromArgb32(s).premultiplied();
107 }
108}
109#endif // __haswell__
110
111static inline __m128 Q_DECL_VECTORCALL reciprocal_mul_ps(__m128 a, float mul)
112{
113 __m128 ia = _mm_rcp_ps(a); // Approximate 1/a
114 // Improve precision of ia using Newton-Raphson
115 ia = _mm_sub_ps(_mm_add_ps(ia, ia), _mm_mul_ps(ia, _mm_mul_ps(ia, a)));
116 ia = _mm_mul_ps(ia, _mm_set1_ps(mul));
117 return ia;
118}
119
120template<bool RGBA, bool RGBx>
121static inline void convertARGBFromARGB32PM_sse4(uint *buffer, const uint *src, int count)
122{
123 int i = 0;
124 if ((_MM_GET_EXCEPTION_MASK() & _MM_MASK_INVALID) == 0) {
125 for (; i < count; ++i) {
127 if (RGBx)
128 v = 0xff000000 | v;
129 if (RGBA)
130 v = ARGB2RGBA(v);
131 buffer[i] = v;
132 }
133 return;
134 }
135 const __m128i alphaMask = _mm_set1_epi32(0xff000000);
136 const __m128i rgbaMask = _mm_setr_epi8(2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15);
137 const __m128i zero = _mm_setzero_si128();
138
139 for (; i < count - 3; i += 4) {
140 __m128i srcVector = _mm_loadu_si128((const __m128i *)&src[i]);
141 if (!_mm_testz_si128(srcVector, alphaMask)) {
142 if (!_mm_testc_si128(srcVector, alphaMask)) {
143 __m128i srcVectorAlpha = _mm_srli_epi32(srcVector, 24);
144 if (RGBA)
145 srcVector = _mm_shuffle_epi8(srcVector, rgbaMask);
146 const __m128 a = _mm_cvtepi32_ps(srcVectorAlpha);
147 const __m128 ia = reciprocal_mul_ps(a, 255.0f);
148 __m128i src1 = _mm_unpacklo_epi8(srcVector, zero);
149 __m128i src3 = _mm_unpackhi_epi8(srcVector, zero);
150 __m128i src2 = _mm_unpackhi_epi16(src1, zero);
151 __m128i src4 = _mm_unpackhi_epi16(src3, zero);
152 src1 = _mm_unpacklo_epi16(src1, zero);
153 src3 = _mm_unpacklo_epi16(src3, zero);
154 __m128 ia1 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(0, 0, 0, 0));
155 __m128 ia2 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(1, 1, 1, 1));
156 __m128 ia3 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(2, 2, 2, 2));
157 __m128 ia4 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(3, 3, 3, 3));
158 src1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src1), ia1));
159 src2 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src2), ia2));
160 src3 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src3), ia3));
161 src4 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src4), ia4));
162 src1 = _mm_packus_epi32(src1, src2);
163 src3 = _mm_packus_epi32(src3, src4);
164 src1 = _mm_packus_epi16(src1, src3);
165 // Handle potential alpha == 0 values:
166 __m128i srcVectorAlphaMask = _mm_cmpeq_epi32(srcVectorAlpha, zero);
167 src1 = _mm_andnot_si128(srcVectorAlphaMask, src1);
168 // Fixup alpha values:
169 if (RGBx)
170 srcVector = _mm_or_si128(src1, alphaMask);
171 else
172 srcVector = _mm_blendv_epi8(src1, srcVector, alphaMask);
173 _mm_storeu_si128((__m128i *)&buffer[i], srcVector);
174 } else {
175 if (RGBA)
176 _mm_storeu_si128((__m128i *)&buffer[i], _mm_shuffle_epi8(srcVector, rgbaMask));
177 else if (buffer != src)
178 _mm_storeu_si128((__m128i *)&buffer[i], srcVector);
179 }
180 } else {
181 if (RGBx)
182 _mm_storeu_si128((__m128i *)&buffer[i], alphaMask);
183 else
184 _mm_storeu_si128((__m128i *)&buffer[i], zero);
185 }
186 }
187
188 SIMD_EPILOGUE(i, count, 3) {
189 uint v = qUnpremultiply_sse4(src[i]);
190 if (RGBx)
191 v = 0xff000000 | v;
192 if (RGBA)
193 v = ARGB2RGBA(v);
194 buffer[i] = v;
195 }
196}
197
198template<bool RGBA>
199static inline void convertARGBFromRGBA64PM_sse4(uint *buffer, const QRgba64 *src, int count)
200{
201 int i = 0;
202 if ((_MM_GET_EXCEPTION_MASK() & _MM_MASK_INVALID) == 0) {
203 for (; i < count; ++i) {
204 const QRgba64 v = src[i].unpremultiplied();
205 buffer[i] = RGBA ? toRgba8888(v) : toArgb32(v);
206 }
207 return;
208 }
209 const __m128i alphaMask = _mm_set1_epi64x(qint64(Q_UINT64_C(0xffff) << 48));
210 const __m128i alphaMask32 = _mm_set1_epi32(0xff000000);
211 const __m128i rgbaMask = _mm_setr_epi8(2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15);
212 const __m128i zero = _mm_setzero_si128();
213
214 for (; i < count - 3; i += 4) {
215 __m128i srcVector1 = _mm_loadu_si128((const __m128i *)&src[i]);
216 __m128i srcVector2 = _mm_loadu_si128((const __m128i *)&src[i + 2]);
217 bool transparent1 = _mm_testz_si128(srcVector1, alphaMask);
218 bool opaque1 = _mm_testc_si128(srcVector1, alphaMask);
219 bool transparent2 = _mm_testz_si128(srcVector2, alphaMask);
220 bool opaque2 = _mm_testc_si128(srcVector2, alphaMask);
221
222 if (!(transparent1 && transparent2)) {
223 if (!(opaque1 && opaque2)) {
224 __m128i srcVector1Alpha = _mm_srli_epi64(srcVector1, 48);
225 __m128i srcVector2Alpha = _mm_srli_epi64(srcVector2, 48);
226 __m128i srcVectorAlpha = _mm_packus_epi32(srcVector1Alpha, srcVector2Alpha);
227 const __m128 a = _mm_cvtepi32_ps(srcVectorAlpha);
228 // Convert srcVectorAlpha to final 8-bit alpha channel
229 srcVectorAlpha = _mm_add_epi32(srcVectorAlpha, _mm_set1_epi32(128));
230 srcVectorAlpha = _mm_sub_epi32(srcVectorAlpha, _mm_srli_epi32(srcVectorAlpha, 8));
231 srcVectorAlpha = _mm_srli_epi32(srcVectorAlpha, 8);
232 srcVectorAlpha = _mm_slli_epi32(srcVectorAlpha, 24);
233 const __m128 ia = reciprocal_mul_ps(a, 255.0f);
234 __m128i src1 = _mm_unpacklo_epi16(srcVector1, zero);
235 __m128i src2 = _mm_unpackhi_epi16(srcVector1, zero);
236 __m128i src3 = _mm_unpacklo_epi16(srcVector2, zero);
237 __m128i src4 = _mm_unpackhi_epi16(srcVector2, zero);
238 __m128 ia1 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(0, 0, 0, 0));
239 __m128 ia2 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(1, 1, 1, 1));
240 __m128 ia3 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(2, 2, 2, 2));
241 __m128 ia4 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(3, 3, 3, 3));
242 src1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src1), ia1));
243 src2 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src2), ia2));
244 src3 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src3), ia3));
245 src4 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src4), ia4));
246 src1 = _mm_packus_epi32(src1, src2);
247 src3 = _mm_packus_epi32(src3, src4);
248 // Handle potential alpha == 0 values:
249 __m128i srcVector1AlphaMask = _mm_cmpeq_epi64(srcVector1Alpha, zero);
250 __m128i srcVector2AlphaMask = _mm_cmpeq_epi64(srcVector2Alpha, zero);
251 src1 = _mm_andnot_si128(srcVector1AlphaMask, src1);
252 src3 = _mm_andnot_si128(srcVector2AlphaMask, src3);
253 src1 = _mm_packus_epi16(src1, src3);
254 // Fixup alpha values:
255 src1 = _mm_blendv_epi8(src1, srcVectorAlpha, alphaMask32);
256 // Fix RGB order
257 if (!RGBA)
258 src1 = _mm_shuffle_epi8(src1, rgbaMask);
259 _mm_storeu_si128((__m128i *)&buffer[i], src1);
260 } else {
261 __m128i src1 = _mm_unpacklo_epi16(srcVector1, zero);
262 __m128i src2 = _mm_unpackhi_epi16(srcVector1, zero);
263 __m128i src3 = _mm_unpacklo_epi16(srcVector2, zero);
264 __m128i src4 = _mm_unpackhi_epi16(srcVector2, zero);
265 src1 = _mm_add_epi32(src1, _mm_set1_epi32(128));
266 src2 = _mm_add_epi32(src2, _mm_set1_epi32(128));
267 src3 = _mm_add_epi32(src3, _mm_set1_epi32(128));
268 src4 = _mm_add_epi32(src4, _mm_set1_epi32(128));
269 src1 = _mm_sub_epi32(src1, _mm_srli_epi32(src1, 8));
270 src2 = _mm_sub_epi32(src2, _mm_srli_epi32(src2, 8));
271 src3 = _mm_sub_epi32(src3, _mm_srli_epi32(src3, 8));
272 src4 = _mm_sub_epi32(src4, _mm_srli_epi32(src4, 8));
273 src1 = _mm_srli_epi32(src1, 8);
274 src2 = _mm_srli_epi32(src2, 8);
275 src3 = _mm_srli_epi32(src3, 8);
276 src4 = _mm_srli_epi32(src4, 8);
277 src1 = _mm_packus_epi32(src1, src2);
278 src3 = _mm_packus_epi32(src3, src4);
279 src1 = _mm_packus_epi16(src1, src3);
280 if (!RGBA)
281 src1 = _mm_shuffle_epi8(src1, rgbaMask);
282 _mm_storeu_si128((__m128i *)&buffer[i], src1);
283 }
284 } else {
285 _mm_storeu_si128((__m128i *)&buffer[i], zero);
286 }
287 }
288
289 SIMD_EPILOGUE(i, count, 3) {
290 buffer[i] = qConvertRgba64ToRgb32_sse4<RGBA ? PixelOrderRGB : PixelOrderBGR>(src[i]);
291 }
292}
293
294template<bool mask>
295static inline void convertRGBA64FromRGBA64PM_sse4(QRgba64 *buffer, const QRgba64 *src, int count)
296{
297 int i = 0;
298 if ((_MM_GET_EXCEPTION_MASK() & _MM_MASK_INVALID) == 0) {
299 for (; i < count; ++i) {
301 if (mask)
302 v.setAlpha(65535);
303 buffer[i] = v;
304 }
305 return;
306 }
307 const __m128i alphaMask = _mm_set1_epi64x(qint64(Q_UINT64_C(0xffff) << 48));
308 const __m128i zero = _mm_setzero_si128();
309
310 for (; i < count - 3; i += 4) {
311 __m128i srcVector1 = _mm_loadu_si128((const __m128i *)&src[i + 0]);
312 __m128i srcVector2 = _mm_loadu_si128((const __m128i *)&src[i + 2]);
313 bool transparent1 = _mm_testz_si128(srcVector1, alphaMask);
314 bool opaque1 = _mm_testc_si128(srcVector1, alphaMask);
315 bool transparent2 = _mm_testz_si128(srcVector2, alphaMask);
316 bool opaque2 = _mm_testc_si128(srcVector2, alphaMask);
317
318 if (!(transparent1 && transparent2)) {
319 if (!(opaque1 && opaque2)) {
320 __m128i srcVector1Alpha = _mm_srli_epi64(srcVector1, 48);
321 __m128i srcVector2Alpha = _mm_srli_epi64(srcVector2, 48);
322 __m128i srcVectorAlpha = _mm_packus_epi32(srcVector1Alpha, srcVector2Alpha);
323 const __m128 a = _mm_cvtepi32_ps(srcVectorAlpha);
324 const __m128 ia = reciprocal_mul_ps(a, 65535.0f);
325 __m128i src1 = _mm_unpacklo_epi16(srcVector1, zero);
326 __m128i src2 = _mm_unpackhi_epi16(srcVector1, zero);
327 __m128i src3 = _mm_unpacklo_epi16(srcVector2, zero);
328 __m128i src4 = _mm_unpackhi_epi16(srcVector2, zero);
329 __m128 ia1 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(0, 0, 0, 0));
330 __m128 ia2 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(1, 1, 1, 1));
331 __m128 ia3 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(2, 2, 2, 2));
332 __m128 ia4 = _mm_shuffle_ps(ia, ia, _MM_SHUFFLE(3, 3, 3, 3));
333 src1 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src1), ia1));
334 src2 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src2), ia2));
335 src3 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src3), ia3));
336 src4 = _mm_cvtps_epi32(_mm_mul_ps(_mm_cvtepi32_ps(src4), ia4));
337 src1 = _mm_packus_epi32(src1, src2);
338 src3 = _mm_packus_epi32(src3, src4);
339 // Handle potential alpha == 0 values:
340 __m128i srcVector1AlphaMask = _mm_cmpeq_epi64(srcVector1Alpha, zero);
341 __m128i srcVector2AlphaMask = _mm_cmpeq_epi64(srcVector2Alpha, zero);
342 src1 = _mm_andnot_si128(srcVector1AlphaMask, src1);
343 src3 = _mm_andnot_si128(srcVector2AlphaMask, src3);
344 // Fixup alpha values:
345 if (mask) {
346 src1 = _mm_or_si128(src1, alphaMask);
347 src3 = _mm_or_si128(src3, alphaMask);
348 } else {
349 src1 = _mm_blendv_epi8(src1, srcVector1, alphaMask);
350 src3 = _mm_blendv_epi8(src3, srcVector2, alphaMask);
351 }
352 _mm_storeu_si128((__m128i *)&buffer[i + 0], src1);
353 _mm_storeu_si128((__m128i *)&buffer[i + 2], src3);
354 } else {
355 if (mask) {
356 srcVector1 = _mm_or_si128(srcVector1, alphaMask);
357 srcVector2 = _mm_or_si128(srcVector2, alphaMask);
358 }
359 if (mask || src != buffer) {
360 _mm_storeu_si128((__m128i *)&buffer[i + 0], srcVector1);
361 _mm_storeu_si128((__m128i *)&buffer[i + 2], srcVector2);
362 }
363 }
364 } else {
365 _mm_storeu_si128((__m128i *)&buffer[i + 0], zero);
366 _mm_storeu_si128((__m128i *)&buffer[i + 2], zero);
367 }
368 }
369
370 SIMD_EPILOGUE(i, count, 3) {
372 if (mask)
373 v.setAlpha(65535);
374 buffer[i] = v;
375 }
376}
377
378#ifndef __haswell__
379void QT_FASTCALL convertARGB32ToARGB32PM_sse4(uint *buffer, int count, const QList<QRgb> *)
380{
381 convertARGBToARGB32PM_sse4<false>(buffer, buffer, count);
382}
383
384void QT_FASTCALL convertRGBA8888ToARGB32PM_sse4(uint *buffer, int count, const QList<QRgb> *)
385{
386 convertARGBToARGB32PM_sse4<true>(buffer, buffer, count);
387}
388
389const QRgba64 * QT_FASTCALL convertARGB32ToRGBA64PM_sse4(QRgba64 *buffer, const uint *src, int count,
390 const QList<QRgb> *, QDitherInfo *)
391{
392 convertARGBToRGBA64PM_sse4<false>(buffer, src, count);
393 return buffer;
394}
395
396const QRgba64 * QT_FASTCALL convertRGBA8888ToRGBA64PM_sse4(QRgba64 *buffer, const uint *src, int count,
397 const QList<QRgb> *, QDitherInfo *)
398{
399 convertARGBToRGBA64PM_sse4<true>(buffer, src, count);
400 return buffer;
401}
402
403const uint *QT_FASTCALL fetchARGB32ToARGB32PM_sse4(uint *buffer, const uchar *src, int index, int count,
404 const QList<QRgb> *, QDitherInfo *)
405{
406 convertARGBToARGB32PM_sse4<false>(buffer, reinterpret_cast<const uint *>(src) + index, count);
407 return buffer;
408}
409
410const uint *QT_FASTCALL fetchRGBA8888ToARGB32PM_sse4(uint *buffer, const uchar *src, int index, int count,
411 const QList<QRgb> *, QDitherInfo *)
412{
413 convertARGBToARGB32PM_sse4<true>(buffer, reinterpret_cast<const uint *>(src) + index, count);
414 return buffer;
415}
416
417const QRgba64 *QT_FASTCALL fetchARGB32ToRGBA64PM_sse4(QRgba64 *buffer, const uchar *src, int index, int count,
418 const QList<QRgb> *, QDitherInfo *)
419{
420 convertARGBToRGBA64PM_sse4<false>(buffer, reinterpret_cast<const uint *>(src) + index, count);
421 return buffer;
422}
423
424const QRgba64 *QT_FASTCALL fetchRGBA8888ToRGBA64PM_sse4(QRgba64 *buffer, const uchar *src, int index, int count,
425 const QList<QRgb> *, QDitherInfo *)
426{
427 convertARGBToRGBA64PM_sse4<true>(buffer, reinterpret_cast<const uint *>(src) + index, count);
428 return buffer;
429}
430#endif // __haswell__
431
432void QT_FASTCALL storeRGB32FromARGB32PM_sse4(uchar *dest, const uint *src, int index, int count,
433 const QList<QRgb> *, QDitherInfo *)
434{
435 uint *d = reinterpret_cast<uint *>(dest) + index;
436 convertARGBFromARGB32PM_sse4<false,true>(d, src, count);
437}
438
439void QT_FASTCALL storeARGB32FromARGB32PM_sse4(uchar *dest, const uint *src, int index, int count,
440 const QList<QRgb> *, QDitherInfo *)
441{
442 uint *d = reinterpret_cast<uint *>(dest) + index;
443 convertARGBFromARGB32PM_sse4<false,false>(d, src, count);
444}
445
446void QT_FASTCALL storeRGBA8888FromARGB32PM_sse4(uchar *dest, const uint *src, int index, int count,
447 const QList<QRgb> *, QDitherInfo *)
448{
449 uint *d = reinterpret_cast<uint *>(dest) + index;
450 convertARGBFromARGB32PM_sse4<true,false>(d, src, count);
451}
452
453void QT_FASTCALL storeRGBXFromARGB32PM_sse4(uchar *dest, const uint *src, int index, int count,
454 const QList<QRgb> *, QDitherInfo *)
455{
456 uint *d = reinterpret_cast<uint *>(dest) + index;
457 convertARGBFromARGB32PM_sse4<true,true>(d, src, count);
458}
459
460template<QtPixelOrder PixelOrder>
461void QT_FASTCALL storeA2RGB30PMFromARGB32PM_sse4(uchar *dest, const uint *src, int index, int count,
462 const QList<QRgb> *, QDitherInfo *)
463{
464 uint *d = reinterpret_cast<uint *>(dest) + index;
465 for (int i = 0; i < count; ++i)
466 d[i] = qConvertArgb32ToA2rgb30_sse4<PixelOrder>(src[i]);
467}
468
469template
470void QT_FASTCALL storeA2RGB30PMFromARGB32PM_sse4<PixelOrderBGR>(uchar *dest, const uint *src, int index, int count,
471 const QList<QRgb> *, QDitherInfo *);
472template
473void QT_FASTCALL storeA2RGB30PMFromARGB32PM_sse4<PixelOrderRGB>(uchar *dest, const uint *src, int index, int count,
474 const QList<QRgb> *, QDitherInfo *);
475
476#if QT_CONFIG(raster_64bit)
477void QT_FASTCALL destStore64ARGB32_sse4(QRasterBuffer *rasterBuffer, int x, int y, const QRgba64 *buffer, int length)
478{
479 uint *dest = (uint*)rasterBuffer->scanLine(y) + x;
480 convertARGBFromRGBA64PM_sse4<false>(dest, buffer, length);
481}
482
483void QT_FASTCALL destStore64RGBA8888_sse4(QRasterBuffer *rasterBuffer, int x, int y, const QRgba64 *buffer, int length)
484{
485 uint *dest = (uint*)rasterBuffer->scanLine(y) + x;
486 convertARGBFromRGBA64PM_sse4<true>(dest, buffer, length);
487}
488#endif
489
490void QT_FASTCALL storeARGB32FromRGBA64PM_sse4(uchar *dest, const QRgba64 *src, int index, int count,
491 const QList<QRgb> *, QDitherInfo *)
492{
493 uint *d = (uint*)dest + index;
494 convertARGBFromRGBA64PM_sse4<false>(d, src, count);
495}
496
497void QT_FASTCALL storeRGBA8888FromRGBA64PM_sse4(uchar *dest, const QRgba64 *src, int index, int count,
498 const QList<QRgb> *, QDitherInfo *)
499{
500 uint *d = (uint*)dest + index;
501 convertARGBFromRGBA64PM_sse4<true>(d, src, count);
502}
503
504void QT_FASTCALL storeRGBA64FromRGBA64PM_sse4(uchar *dest, const QRgba64 *src, int index, int count,
505 const QList<QRgb> *, QDitherInfo *)
506{
507 QRgba64 *d = (QRgba64 *)dest + index;
508 convertRGBA64FromRGBA64PM_sse4<false>(d, src, count);
509}
510
511void QT_FASTCALL storeRGBx64FromRGBA64PM_sse4(uchar *dest, const QRgba64 *src, int index, int count,
512 const QList<QRgb> *, QDitherInfo *)
513{
514 QRgba64 *d = (QRgba64 *)dest + index;
515 convertRGBA64FromRGBA64PM_sse4<true>(d, src, count);
516}
517
518#if QT_CONFIG(raster_fp)
519const QRgbaFloat32 *QT_FASTCALL fetchRGBA32FToRGBA32F_sse4(QRgbaFloat32 *buffer, const uchar *src, int index, int count,
520 const QList<QRgb> *, QDitherInfo *)
521{
522 const QRgbaFloat32 *s = reinterpret_cast<const QRgbaFloat32 *>(src) + index;
523 for (int i = 0; i < count; ++i) {
524 __m128 vsf = _mm_load_ps(reinterpret_cast<const float *>(s + i));
525 __m128 vsa = _mm_shuffle_ps(vsf, vsf, _MM_SHUFFLE(3, 3, 3, 3));
526 vsf = _mm_mul_ps(vsf, vsa);
527 vsf = _mm_insert_ps(vsf, vsa, 0x30);
528 _mm_store_ps(reinterpret_cast<float *>(buffer + i), vsf);
529 }
530 return buffer;
531}
532
533void QT_FASTCALL storeRGBX32FFromRGBA32F_sse4(uchar *dest, const QRgbaFloat32 *src, int index, int count,
534 const QList<QRgb> *, QDitherInfo *)
535{
536 QRgbaFloat32 *d = reinterpret_cast<QRgbaFloat32 *>(dest) + index;
537 const __m128 zero = _mm_set_ps(1.0f, 0.0f, 0.0f, 0.0f);
538 for (int i = 0; i < count; ++i) {
539 __m128 vsf = _mm_load_ps(reinterpret_cast<const float *>(src + i));
540 const __m128 vsa = _mm_shuffle_ps(vsf, vsf, _MM_SHUFFLE(3, 3, 3, 3));
541 const float a = _mm_cvtss_f32(vsa);
542 if (a == 1.0f)
543 { }
544 else if (a == 0.0f)
545 vsf = zero;
546 else {
547 __m128 vsr = _mm_rcp_ps(vsa);
548 vsr = _mm_sub_ps(_mm_add_ps(vsr, vsr), _mm_mul_ps(vsr, _mm_mul_ps(vsr, vsa)));
549 vsf = _mm_mul_ps(vsf, vsr);
550 vsf = _mm_insert_ps(vsf, _mm_set_ss(1.0f), 0x30);
551 }
552 _mm_store_ps(reinterpret_cast<float *>(d + i), vsf);
553 }
554}
555
556void QT_FASTCALL storeRGBA32FFromRGBA32F_sse4(uchar *dest, const QRgbaFloat32 *src, int index, int count,
557 const QList<QRgb> *, QDitherInfo *)
558{
559 QRgbaFloat32 *d = reinterpret_cast<QRgbaFloat32 *>(dest) + index;
560 const __m128 zero = _mm_set1_ps(0.0f);
561 for (int i = 0; i < count; ++i) {
562 __m128 vsf = _mm_load_ps(reinterpret_cast<const float *>(src + i));
563 const __m128 vsa = _mm_shuffle_ps(vsf, vsf, _MM_SHUFFLE(3, 3, 3, 3));
564 const float a = _mm_cvtss_f32(vsa);
565 if (a == 1.0f)
566 { }
567 else if (a == 0.0f)
568 vsf = zero;
569 else {
570 __m128 vsr = _mm_rcp_ps(vsa);
571 vsr = _mm_sub_ps(_mm_add_ps(vsr, vsr), _mm_mul_ps(vsr, _mm_mul_ps(vsr, vsa)));
572 vsr = _mm_insert_ps(vsr, _mm_set_ss(1.0f), 0x30);
573 vsf = _mm_mul_ps(vsf, vsr);
574 }
575 _mm_store_ps(reinterpret_cast<float *>(d + i), vsf);
576 }
577}
578#endif
579
580
582
583#endif
uchar * scanLine(int y)
constexpr QRgba64 unpremultiplied() const
Definition qrgba64.h:130
static constexpr QRgba64 fromArgb32(uint rgb)
Definition qrgba64.h:56
Combined button and popup list for selecting options.
#define QT_FASTCALL
#define Q_DECL_VECTORCALL
GLsizei const GLfloat * v
[13]
GLint GLint GLint GLint GLint x
[0]
GLboolean GLboolean GLboolean GLboolean a
[7]
GLuint index
[2]
GLenum GLuint GLenum GLsizei length
GLenum GLenum GLsizei count
GLenum src
GLenum GLuint buffer
GLint GLint GLint GLint GLint GLint GLint GLbitfield mask
GLint y
GLdouble s
[6]
Definition qopenglext.h:235
static quint32 RGBA2ARGB(quint32 x)
static quint32 ARGB2RGBA(quint32 x)
QRgb qUnpremultiply(QRgb p)
Definition qrgb.h:60
constexpr QRgb qPremultiply(QRgb x)
Definition qrgb.h:45
static uint toArgb32(QRgba64 rgba64)
Definition qrgba64_p.h:219
static uint toRgba8888(QRgba64 rgba64)
Definition qrgba64_p.h:239
#define SIMD_EPILOGUE(i, length, max)
Definition qsimd_p.h:33
#define zero
#define Q_UINT64_C(c)
Definition qtypes.h:58
unsigned char uchar
Definition qtypes.h:32
unsigned int uint
Definition qtypes.h:34
long long qint64
Definition qtypes.h:60