Qt
Internal/Contributor docs for the Qt SDK. <b>Note:</b> These are NOT official API docs; those are found <a href='https://doc.qt.io/'>here</a>.
Loading...
Searching...
No Matches
qdrawhelper_neon.cpp
Go to the documentation of this file.
1// Copyright (C) 2016 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include <private/qdrawhelper_neon_p.h>
5#include <private/qblendfunctions_p.h>
6#include <private/qmath_p.h>
7#include <private/qpixellayout_p.h>
8
9#ifdef __ARM_NEON__
10
11#include <private/qpaintengine_raster_p.h>
12
14
16{
17 const int epilogueSize = count % 16;
18#if defined(Q_CC_GHS) || defined(Q_CC_MSVC)
19 // inline assembler free version:
20 if (count >= 16) {
21 quint32 *const neonEnd = dest + count - epilogueSize;
22 const uint32x4_t valueVector1 = vdupq_n_u32(value);
23 const uint32x4x4_t valueVector4 = { valueVector1, valueVector1, valueVector1, valueVector1 };
24 do {
25 vst4q_u32(dest, valueVector4);
26 dest += 16;
27 } while (dest != neonEnd);
28 }
29#elif !defined(Q_PROCESSOR_ARM_64)
30 if (count >= 16) {
31 quint32 *const neonEnd = dest + count - epilogueSize;
32 register uint32x4_t valueVector1 asm ("q0") = vdupq_n_u32(value);
33 register uint32x4_t valueVector2 asm ("q1") = valueVector1;
34 while (dest != neonEnd) {
35 asm volatile (
36 "vst2.32 { d0, d1, d2, d3 }, [%[DST]] !\n\t"
37 "vst2.32 { d0, d1, d2, d3 }, [%[DST]] !\n\t"
38 : [DST]"+r" (dest)
39 : [VALUE1]"w"(valueVector1), [VALUE2]"w"(valueVector2)
40 : "memory"
41 );
42 }
43 }
44#else
45 if (count >= 16) {
46 quint32 *const neonEnd = dest + count - epilogueSize;
47 register uint32x4_t valueVector1 asm ("v0") = vdupq_n_u32(value);
48 register uint32x4_t valueVector2 asm ("v1") = valueVector1;
49 while (dest != neonEnd) {
50 asm volatile (
51 "st2 { v0.4s, v1.4s }, [%[DST]], #32 \n\t"
52 "st2 { v0.4s, v1.4s }, [%[DST]], #32 \n\t"
53 : [DST]"+r" (dest)
54 : [VALUE1]"w"(valueVector1), [VALUE2]"w"(valueVector2)
55 : "memory"
56 );
57 }
58 }
59#endif
60
61 switch (epilogueSize)
62 {
63 case 15: *dest++ = value; Q_FALLTHROUGH();
64 case 14: *dest++ = value; Q_FALLTHROUGH();
65 case 13: *dest++ = value; Q_FALLTHROUGH();
66 case 12: *dest++ = value; Q_FALLTHROUGH();
67 case 11: *dest++ = value; Q_FALLTHROUGH();
68 case 10: *dest++ = value; Q_FALLTHROUGH();
69 case 9: *dest++ = value; Q_FALLTHROUGH();
70 case 8: *dest++ = value; Q_FALLTHROUGH();
71 case 7: *dest++ = value; Q_FALLTHROUGH();
72 case 6: *dest++ = value; Q_FALLTHROUGH();
73 case 5: *dest++ = value; Q_FALLTHROUGH();
74 case 4: *dest++ = value; Q_FALLTHROUGH();
75 case 3: *dest++ = value; Q_FALLTHROUGH();
76 case 2: *dest++ = value; Q_FALLTHROUGH();
77 case 1: *dest++ = value;
78 }
79}
80
81static inline uint16x8_t qvdiv_255_u16(uint16x8_t x, uint16x8_t half)
82{
83 // result = (x + (x >> 8) + 0x80) >> 8
84
85 const uint16x8_t temp = vshrq_n_u16(x, 8); // x >> 8
86 const uint16x8_t sum_part = vaddq_u16(x, half); // x + 0x80
87 const uint16x8_t sum = vaddq_u16(temp, sum_part);
88
89 return vshrq_n_u16(sum, 8);
90}
91
92static inline uint16x8_t qvbyte_mul_u16(uint16x8_t x, uint16x8_t alpha, uint16x8_t half)
93{
94 // t = qRound(x * alpha / 255.0)
95
96 const uint16x8_t t = vmulq_u16(x, alpha); // t
97 return qvdiv_255_u16(t, half);
98}
99
100static inline uint16x8_t qvinterpolate_pixel_255(uint16x8_t x, uint16x8_t a, uint16x8_t y, uint16x8_t b, uint16x8_t half)
101{
102 // t = x * a + y * b
103
104 const uint16x8_t ta = vmulq_u16(x, a);
105 const uint16x8_t tb = vmulq_u16(y, b);
106
107 return qvdiv_255_u16(vaddq_u16(ta, tb), half);
108}
109
110static inline uint16x8_t qvsource_over_u16(uint16x8_t src16, uint16x8_t dst16, uint16x8_t half, uint16x8_t full)
111{
112 const uint16x4_t alpha16_high = vdup_lane_u16(vget_high_u16(src16), 3);
113 const uint16x4_t alpha16_low = vdup_lane_u16(vget_low_u16(src16), 3);
114
115 const uint16x8_t alpha16 = vsubq_u16(full, vcombine_u16(alpha16_low, alpha16_high));
116
117 return vaddq_u16(src16, qvbyte_mul_u16(dst16, alpha16, half));
118}
119
120#if defined(ENABLE_PIXMAN_DRAWHELPERS)
121extern "C" void
122pixman_composite_over_8888_0565_asm_neon (int32_t w,
123 int32_t h,
124 uint16_t *dst,
125 int32_t dst_stride,
126 uint32_t *src,
127 int32_t src_stride);
128
129extern "C" void
130pixman_composite_over_8888_8888_asm_neon (int32_t w,
131 int32_t h,
132 uint32_t *dst,
133 int32_t dst_stride,
134 uint32_t *src,
135 int32_t src_stride);
136
137extern "C" void
138pixman_composite_src_0565_8888_asm_neon (int32_t w,
139 int32_t h,
140 uint32_t *dst,
141 int32_t dst_stride,
142 uint16_t *src,
143 int32_t src_stride);
144
145extern "C" void
146pixman_composite_over_n_8_0565_asm_neon (int32_t w,
147 int32_t h,
148 uint16_t *dst,
149 int32_t dst_stride,
150 uint32_t src,
151 int32_t unused,
152 uint8_t *mask,
153 int32_t mask_stride);
154
155extern "C" void
156pixman_composite_scanline_over_asm_neon (int32_t w,
157 const uint32_t *dst,
158 const uint32_t *src);
159
160extern "C" void
161pixman_composite_src_0565_0565_asm_neon (int32_t w,
162 int32_t h,
163 uint16_t *dst,
164 int32_t dst_stride,
165 uint16_t *src,
166 int32_t src_stride);
167// qblendfunctions.cpp
168void qt_blend_argb32_on_rgb16_const_alpha(uchar *destPixels, int dbpl,
169 const uchar *srcPixels, int sbpl,
170 int w, int h,
171 int const_alpha);
172
173void qt_blend_rgb16_on_argb32_neon(uchar *destPixels, int dbpl,
174 const uchar *srcPixels, int sbpl,
175 int w, int h,
176 int const_alpha)
177{
178 dbpl /= 4;
179 sbpl /= 2;
180
181 quint32 *dst = (quint32 *) destPixels;
182 quint16 *src = (quint16 *) srcPixels;
183
184 if (const_alpha != 256) {
185 quint8 a = (255 * const_alpha) >> 8;
186 quint8 ia = 255 - a;
187
188 while (--h >= 0) {
189 for (int x=0; x<w; ++x)
191 dst += dbpl;
192 src += sbpl;
193 }
194 return;
195 }
196
197 pixman_composite_src_0565_8888_asm_neon(w, h, dst, dbpl, src, sbpl);
198}
199
200// qblendfunctions.cpp
201void qt_blend_rgb16_on_rgb16(uchar *dst, int dbpl,
202 const uchar *src, int sbpl,
203 int w, int h,
204 int const_alpha);
205
206
207template <int N>
208static inline void scanLineBlit16(quint16 *dst, quint16 *src, int dstride)
209{
210 if (N >= 2) {
211 ((quint32 *)dst)[0] = ((quint32 *)src)[0];
212 __builtin_prefetch(dst + dstride, 1, 0);
213 }
214 for (int i = 1; i < N/2; ++i)
215 ((quint32 *)dst)[i] = ((quint32 *)src)[i];
216 if (N & 1)
217 dst[N-1] = src[N-1];
218}
219
220template <int Width>
221static inline void blockBlit16(quint16 *dst, quint16 *src, int dstride, int sstride, int h)
222{
223 union {
226 } u;
227
228 u.pointer = dst;
229
230 if (u.address & 2) {
231 while (--h >= 0) {
232 // align dst
233 dst[0] = src[0];
234 if (Width > 1)
235 scanLineBlit16<Width-1>(dst + 1, src + 1, dstride);
236 dst += dstride;
237 src += sstride;
238 }
239 } else {
240 while (--h >= 0) {
241 scanLineBlit16<Width>(dst, src, dstride);
242
243 dst += dstride;
244 src += sstride;
245 }
246 }
247}
248
249void qt_blend_rgb16_on_rgb16_neon(uchar *destPixels, int dbpl,
250 const uchar *srcPixels, int sbpl,
251 int w, int h,
252 int const_alpha)
253{
254 // testing show that the default memcpy is faster for widths 150 and up
255 if (const_alpha != 256 || w >= 150) {
256 qt_blend_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, w, h, const_alpha);
257 return;
258 }
259
260 int dstride = dbpl / 2;
261 int sstride = sbpl / 2;
262
263 quint16 *dst = (quint16 *) destPixels;
264 quint16 *src = (quint16 *) srcPixels;
265
266 switch (w) {
267#define BLOCKBLIT(n) case n: blockBlit16<n>(dst, src, dstride, sstride, h); return;
268 BLOCKBLIT(1);
269 BLOCKBLIT(2);
270 BLOCKBLIT(3);
271 BLOCKBLIT(4);
272 BLOCKBLIT(5);
273 BLOCKBLIT(6);
274 BLOCKBLIT(7);
275 BLOCKBLIT(8);
276 BLOCKBLIT(9);
277 BLOCKBLIT(10);
278 BLOCKBLIT(11);
279 BLOCKBLIT(12);
280 BLOCKBLIT(13);
281 BLOCKBLIT(14);
282 BLOCKBLIT(15);
283#undef BLOCKBLIT
284 default:
285 break;
286 }
287
288 pixman_composite_src_0565_0565_asm_neon (w, h, dst, dstride, src, sstride);
289}
290
291extern "C" void blend_8_pixels_argb32_on_rgb16_neon(quint16 *dst, const quint32 *src, int const_alpha);
292
293void qt_blend_argb32_on_rgb16_neon(uchar *destPixels, int dbpl,
294 const uchar *srcPixels, int sbpl,
295 int w, int h,
296 int const_alpha)
297{
298 quint16 *dst = (quint16 *) destPixels;
299 quint32 *src = (quint32 *) srcPixels;
300
301 if (const_alpha != 256) {
302 for (int y=0; y<h; ++y) {
303 int i = 0;
304 for (; i < w-7; i += 8)
305 blend_8_pixels_argb32_on_rgb16_neon(&dst[i], &src[i], const_alpha);
306
307 if (i < w) {
308 int tail = w - i;
309
310 quint16 dstBuffer[8];
311 quint32 srcBuffer[8];
312
313 for (int j = 0; j < tail; ++j) {
314 dstBuffer[j] = dst[i + j];
315 srcBuffer[j] = src[i + j];
316 }
317
318 blend_8_pixels_argb32_on_rgb16_neon(dstBuffer, srcBuffer, const_alpha);
319
320 for (int j = 0; j < tail; ++j)
321 dst[i + j] = dstBuffer[j];
322 }
323
324 dst = (quint16 *)(((uchar *) dst) + dbpl);
325 src = (quint32 *)(((uchar *) src) + sbpl);
326 }
327 return;
328 }
329
330 pixman_composite_over_8888_0565_asm_neon(w, h, dst, dbpl / 2, src, sbpl / 4);
331}
332#endif
333
334void qt_blend_argb32_on_argb32_scanline_neon(uint *dest, const uint *src, int length, uint const_alpha)
335{
336 if (const_alpha == 255) {
337#if defined(ENABLE_PIXMAN_DRAWHELPERS)
338 pixman_composite_scanline_over_asm_neon(length, dest, src);
339#else
340 qt_blend_argb32_on_argb32_neon((uchar *)dest, 4 * length, (uchar *)src, 4 * length, length, 1, 256);
341#endif
342 } else {
343 qt_blend_argb32_on_argb32_neon((uchar *)dest, 4 * length, (uchar *)src, 4 * length, length, 1, (const_alpha * 256) / 255);
344 }
345}
346
347void qt_blend_argb32_on_argb32_neon(uchar *destPixels, int dbpl,
348 const uchar *srcPixels, int sbpl,
349 int w, int h,
350 int const_alpha)
351{
352 const uint *src = (const uint *) srcPixels;
353 uint *dst = (uint *) destPixels;
354 uint16x8_t half = vdupq_n_u16(0x80);
355 uint16x8_t full = vdupq_n_u16(0xff);
356 if (const_alpha == 256) {
357#if defined(ENABLE_PIXMAN_DRAWHELPERS)
358 pixman_composite_over_8888_8888_asm_neon(w, h, (uint32_t *)destPixels, dbpl / 4, (uint32_t *)srcPixels, sbpl / 4);
359#else
360 for (int y=0; y<h; ++y) {
361 int x = 0;
362 for (; x < w-3; x += 4) {
363 if (src[x] | src[x+1] | src[x+2] | src[x+3]) {
364 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
365 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
366
367 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
368 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
369
370 const uint8x8_t src8_low = vget_low_u8(src8);
371 const uint8x8_t dst8_low = vget_low_u8(dst8);
372
373 const uint8x8_t src8_high = vget_high_u8(src8);
374 const uint8x8_t dst8_high = vget_high_u8(dst8);
375
376 const uint16x8_t src16_low = vmovl_u8(src8_low);
377 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
378
379 const uint16x8_t src16_high = vmovl_u8(src8_high);
380 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
381
382 const uint16x8_t result16_low = qvsource_over_u16(src16_low, dst16_low, half, full);
383 const uint16x8_t result16_high = qvsource_over_u16(src16_high, dst16_high, half, full);
384
385 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
386 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
387
388 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
389 }
390 }
391 for (; x<w; ++x) {
392 uint s = src[x];
393 if (s >= 0xff000000)
394 dst[x] = s;
395 else if (s != 0)
396 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s));
397 }
398 dst = (quint32 *)(((uchar *) dst) + dbpl);
399 src = (const quint32 *)(((const uchar *) src) + sbpl);
400 }
401#endif
402 } else if (const_alpha != 0) {
403 const_alpha = (const_alpha * 255) >> 8;
404 uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
405 for (int y = 0; y < h; ++y) {
406 int x = 0;
407 for (; x < w-3; x += 4) {
408 if (src[x] | src[x+1] | src[x+2] | src[x+3]) {
409 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
410 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
411
412 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
413 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
414
415 const uint8x8_t src8_low = vget_low_u8(src8);
416 const uint8x8_t dst8_low = vget_low_u8(dst8);
417
418 const uint8x8_t src8_high = vget_high_u8(src8);
419 const uint8x8_t dst8_high = vget_high_u8(dst8);
420
421 const uint16x8_t src16_low = vmovl_u8(src8_low);
422 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
423
424 const uint16x8_t src16_high = vmovl_u8(src8_high);
425 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
426
427 const uint16x8_t srcalpha16_low = qvbyte_mul_u16(src16_low, const_alpha16, half);
428 const uint16x8_t srcalpha16_high = qvbyte_mul_u16(src16_high, const_alpha16, half);
429
430 const uint16x8_t result16_low = qvsource_over_u16(srcalpha16_low, dst16_low, half, full);
431 const uint16x8_t result16_high = qvsource_over_u16(srcalpha16_high, dst16_high, half, full);
432
433 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
434 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
435
436 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
437 }
438 }
439 for (; x<w; ++x) {
440 uint s = src[x];
441 if (s != 0) {
442 s = BYTE_MUL(s, const_alpha);
443 dst[x] = s + BYTE_MUL(dst[x], qAlpha(~s));
444 }
445 }
446 dst = (quint32 *)(((uchar *) dst) + dbpl);
447 src = (const quint32 *)(((const uchar *) src) + sbpl);
448 }
449 }
450}
451
452// qblendfunctions.cpp
453void qt_blend_rgb32_on_rgb32(uchar *destPixels, int dbpl,
454 const uchar *srcPixels, int sbpl,
455 int w, int h,
456 int const_alpha);
457
458void qt_blend_rgb32_on_rgb32_neon(uchar *destPixels, int dbpl,
459 const uchar *srcPixels, int sbpl,
460 int w, int h,
461 int const_alpha)
462{
463 if (const_alpha != 256) {
464 if (const_alpha != 0) {
465 const uint *src = (const uint *) srcPixels;
466 uint *dst = (uint *) destPixels;
467 uint16x8_t half = vdupq_n_u16(0x80);
468 const_alpha = (const_alpha * 255) >> 8;
469 int one_minus_const_alpha = 255 - const_alpha;
470 uint16x8_t const_alpha16 = vdupq_n_u16(const_alpha);
471 uint16x8_t one_minus_const_alpha16 = vdupq_n_u16(255 - const_alpha);
472 for (int y = 0; y < h; ++y) {
473 int x = 0;
474 for (; x < w-3; x += 4) {
475 uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
476 uint32x4_t dst32 = vld1q_u32((uint32_t *)&dst[x]);
477
478 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
479 const uint8x16_t dst8 = vreinterpretq_u8_u32(dst32);
480
481 const uint8x8_t src8_low = vget_low_u8(src8);
482 const uint8x8_t dst8_low = vget_low_u8(dst8);
483
484 const uint8x8_t src8_high = vget_high_u8(src8);
485 const uint8x8_t dst8_high = vget_high_u8(dst8);
486
487 const uint16x8_t src16_low = vmovl_u8(src8_low);
488 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
489
490 const uint16x8_t src16_high = vmovl_u8(src8_high);
491 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
492
493 const uint16x8_t result16_low = qvinterpolate_pixel_255(src16_low, const_alpha16, dst16_low, one_minus_const_alpha16, half);
494 const uint16x8_t result16_high = qvinterpolate_pixel_255(src16_high, const_alpha16, dst16_high, one_minus_const_alpha16, half);
495
496 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
497 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
498
499 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
500 }
501 for (; x<w; ++x) {
502 dst[x] = INTERPOLATE_PIXEL_255(src[x], const_alpha, dst[x], one_minus_const_alpha);
503 }
504 dst = (quint32 *)(((uchar *) dst) + dbpl);
505 src = (const quint32 *)(((const uchar *) src) + sbpl);
506 }
507 }
508 } else {
509 qt_blend_rgb32_on_rgb32(destPixels, dbpl, srcPixels, sbpl, w, h, const_alpha);
510 }
511}
512
513#if defined(ENABLE_PIXMAN_DRAWHELPERS)
514extern void qt_alphamapblit_quint16(QRasterBuffer *rasterBuffer,
515 int x, int y, const QRgba64 &color,
516 const uchar *map,
517 int mapWidth, int mapHeight, int mapStride,
518 const QClipData *clip, bool useGammaCorrection);
519
520void qt_alphamapblit_quint16_neon(QRasterBuffer *rasterBuffer,
521 int x, int y, const QRgba64 &color,
522 const uchar *bitmap,
523 int mapWidth, int mapHeight, int mapStride,
524 const QClipData *clip, bool useGammaCorrection)
525{
526 if (clip || useGammaCorrection) {
527 qt_alphamapblit_quint16(rasterBuffer, x, y, color, bitmap, mapWidth, mapHeight, mapStride, clip, useGammaCorrection);
528 return;
529 }
530
531 quint16 *dest = reinterpret_cast<quint16*>(rasterBuffer->scanLine(y)) + x;
532 const int destStride = rasterBuffer->bytesPerLine() / sizeof(quint16);
533
534 uchar *mask = const_cast<uchar *>(bitmap);
535 const uint c = color.toArgb32();
536
537 pixman_composite_over_n_8_0565_asm_neon(mapWidth, mapHeight, dest, destStride, c, 0, mask, mapStride);
538}
539
540extern "C" void blend_8_pixels_rgb16_on_rgb16_neon(quint16 *dst, const quint16 *src, int const_alpha);
541
542template <typename SRC, typename BlendFunc>
543struct Blend_on_RGB16_SourceAndConstAlpha_Neon {
544 Blend_on_RGB16_SourceAndConstAlpha_Neon(BlendFunc blender, int const_alpha)
545 : m_index(0)
546 , m_blender(blender)
547 , m_const_alpha(const_alpha)
548 {
549 }
550
551 inline void write(quint16 *dst, quint32 src)
552 {
553 srcBuffer[m_index++] = src;
554
555 if (m_index == 8) {
556 m_blender(dst - 7, srcBuffer, m_const_alpha);
557 m_index = 0;
558 }
559 }
560
561 inline void flush(quint16 *dst)
562 {
563 if (m_index > 0) {
564 quint16 dstBuffer[8];
565 for (int i = 0; i < m_index; ++i)
566 dstBuffer[i] = dst[i - m_index];
567
568 m_blender(dstBuffer, srcBuffer, m_const_alpha);
569
570 for (int i = 0; i < m_index; ++i)
571 dst[i - m_index] = dstBuffer[i];
572
573 m_index = 0;
574 }
575 }
576
577 SRC srcBuffer[8];
578
579 int m_index;
580 BlendFunc m_blender;
581 int m_const_alpha;
582};
583
584template <typename SRC, typename BlendFunc>
585Blend_on_RGB16_SourceAndConstAlpha_Neon<SRC, BlendFunc>
586Blend_on_RGB16_SourceAndConstAlpha_Neon_create(BlendFunc blender, int const_alpha)
587{
588 return Blend_on_RGB16_SourceAndConstAlpha_Neon<SRC, BlendFunc>(blender, const_alpha);
589}
590
591void qt_scale_image_argb32_on_rgb16_neon(uchar *destPixels, int dbpl,
592 const uchar *srcPixels, int sbpl, int srch,
593 const QRectF &targetRect,
594 const QRectF &sourceRect,
595 const QRect &clip,
596 int const_alpha)
597{
598 if (const_alpha == 0)
599 return;
600
601 qt_scale_image_16bit<quint32>(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip,
602 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint32>(blend_8_pixels_argb32_on_rgb16_neon, const_alpha));
603}
604
605void qt_scale_image_rgb16_on_rgb16(uchar *destPixels, int dbpl,
606 const uchar *srcPixels, int sbpl, int srch,
607 const QRectF &targetRect,
608 const QRectF &sourceRect,
609 const QRect &clip,
610 int const_alpha);
611
612void qt_scale_image_rgb16_on_rgb16_neon(uchar *destPixels, int dbpl,
613 const uchar *srcPixels, int sbpl, int srch,
614 const QRectF &targetRect,
615 const QRectF &sourceRect,
616 const QRect &clip,
617 int const_alpha)
618{
619 if (const_alpha == 0)
620 return;
621
622 if (const_alpha == 256) {
623 qt_scale_image_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip, const_alpha);
624 return;
625 }
626
627 qt_scale_image_16bit<quint16>(destPixels, dbpl, srcPixels, sbpl, srch, targetRect, sourceRect, clip,
628 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint16>(blend_8_pixels_rgb16_on_rgb16_neon, const_alpha));
629}
630
631extern void qt_transform_image_rgb16_on_rgb16(uchar *destPixels, int dbpl,
632 const uchar *srcPixels, int sbpl,
633 const QRectF &targetRect,
634 const QRectF &sourceRect,
635 const QRect &clip,
636 const QTransform &targetRectTransform,
637 int const_alpha);
638
639void qt_transform_image_rgb16_on_rgb16_neon(uchar *destPixels, int dbpl,
640 const uchar *srcPixels, int sbpl,
641 const QRectF &targetRect,
642 const QRectF &sourceRect,
643 const QRect &clip,
644 const QTransform &targetRectTransform,
645 int const_alpha)
646{
647 if (const_alpha == 0)
648 return;
649
650 if (const_alpha == 256) {
651 qt_transform_image_rgb16_on_rgb16(destPixels, dbpl, srcPixels, sbpl, targetRect, sourceRect, clip, targetRectTransform, const_alpha);
652 return;
653 }
654
655 qt_transform_image(reinterpret_cast<quint16 *>(destPixels), dbpl,
656 reinterpret_cast<const quint16 *>(srcPixels), sbpl, targetRect, sourceRect, clip, targetRectTransform,
657 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint16>(blend_8_pixels_rgb16_on_rgb16_neon, const_alpha));
658}
659
660void qt_transform_image_argb32_on_rgb16_neon(uchar *destPixels, int dbpl,
661 const uchar *srcPixels, int sbpl,
662 const QRectF &targetRect,
663 const QRectF &sourceRect,
664 const QRect &clip,
665 const QTransform &targetRectTransform,
666 int const_alpha)
667{
668 if (const_alpha == 0)
669 return;
670
671 qt_transform_image(reinterpret_cast<quint16 *>(destPixels), dbpl,
672 reinterpret_cast<const quint32 *>(srcPixels), sbpl, targetRect, sourceRect, clip, targetRectTransform,
673 Blend_on_RGB16_SourceAndConstAlpha_Neon_create<quint32>(blend_8_pixels_argb32_on_rgb16_neon, const_alpha));
674}
675
676static inline void convert_8_pixels_rgb16_to_argb32(quint32 *dst, const quint16 *src)
677{
678 asm volatile (
679 "vld1.16 { d0, d1 }, [%[SRC]]\n\t"
680
681 /* convert 8 r5g6b5 pixel data from {d0, d1} to planar 8-bit format
682 and put data into d4 - red, d3 - green, d2 - blue */
683 "vshrn.u16 d4, q0, #8\n\t"
684 "vshrn.u16 d3, q0, #3\n\t"
685 "vsli.u16 q0, q0, #5\n\t"
686 "vsri.u8 d4, d4, #5\n\t"
687 "vsri.u8 d3, d3, #6\n\t"
688 "vshrn.u16 d2, q0, #2\n\t"
689
690 /* fill d5 - alpha with 0xff */
691 "mov r2, #255\n\t"
692 "vdup.8 d5, r2\n\t"
693
694 "vst4.8 { d2, d3, d4, d5 }, [%[DST]]"
695 : : [DST]"r" (dst), [SRC]"r" (src)
696 : "memory", "r2", "d0", "d1", "d2", "d3", "d4", "d5"
697 );
698}
699
700uint * QT_FASTCALL qt_destFetchRGB16_neon(uint *buffer, QRasterBuffer *rasterBuffer, int x, int y, int length)
701{
702 const ushort *data = (const ushort *)rasterBuffer->scanLine(y) + x;
703
704 int i = 0;
705 for (; i < length - 7; i += 8)
706 convert_8_pixels_rgb16_to_argb32(&buffer[i], &data[i]);
707
708 if (i < length) {
709 quint16 srcBuffer[8];
710 quint32 dstBuffer[8];
711
712 int tail = length - i;
713 for (int j = 0; j < tail; ++j)
714 srcBuffer[j] = data[i + j];
715
716 convert_8_pixels_rgb16_to_argb32(dstBuffer, srcBuffer);
717
718 for (int j = 0; j < tail; ++j)
719 buffer[i + j] = dstBuffer[j];
720 }
721
722 return buffer;
723}
724
725static inline void convert_8_pixels_argb32_to_rgb16(quint16 *dst, const quint32 *src)
726{
727 asm volatile (
728 "vld4.8 { d0, d1, d2, d3 }, [%[SRC]]\n\t"
729
730 /* convert to r5g6b5 and store it into {d28, d29} */
731 "vshll.u8 q14, d2, #8\n\t"
732 "vshll.u8 q8, d1, #8\n\t"
733 "vshll.u8 q9, d0, #8\n\t"
734 "vsri.u16 q14, q8, #5\n\t"
735 "vsri.u16 q14, q9, #11\n\t"
736
737 "vst1.16 { d28, d29 }, [%[DST]]"
738 : : [DST]"r" (dst), [SRC]"r" (src)
739 : "memory", "d0", "d1", "d2", "d3", "d16", "d17", "d18", "d19", "d28", "d29"
740 );
741}
742
743void QT_FASTCALL qt_destStoreRGB16_neon(QRasterBuffer *rasterBuffer, int x, int y, const uint *buffer, int length)
744{
745 quint16 *data = (quint16*)rasterBuffer->scanLine(y) + x;
746
747 int i = 0;
748 for (; i < length - 7; i += 8)
749 convert_8_pixels_argb32_to_rgb16(&data[i], &buffer[i]);
750
751 if (i < length) {
752 quint32 srcBuffer[8];
753 quint16 dstBuffer[8];
754
755 int tail = length - i;
756 for (int j = 0; j < tail; ++j)
757 srcBuffer[j] = buffer[i + j];
758
759 convert_8_pixels_argb32_to_rgb16(dstBuffer, srcBuffer);
760
761 for (int j = 0; j < tail; ++j)
762 data[i + j] = dstBuffer[j];
763 }
764}
765#endif
766
767void QT_FASTCALL comp_func_solid_SourceOver_neon(uint *destPixels, int length, uint color, uint const_alpha)
768{
769 if ((const_alpha & qAlpha(color)) == 255) {
770 qt_memfill32(destPixels, color, length);
771 } else {
772 if (const_alpha != 255)
773 color = BYTE_MUL(color, const_alpha);
774
775 const quint32 minusAlphaOfColor = qAlpha(~color);
776 int x = 0;
777
778 uint32_t *dst = (uint32_t *) destPixels;
779 const uint32x4_t colorVector = vdupq_n_u32(color);
780 uint16x8_t half = vdupq_n_u16(0x80);
781 const uint16x8_t minusAlphaOfColorVector = vdupq_n_u16(minusAlphaOfColor);
782
783 for (; x < length-3; x += 4) {
784 uint32x4_t dstVector = vld1q_u32(&dst[x]);
785
786 const uint8x16_t dst8 = vreinterpretq_u8_u32(dstVector);
787
788 const uint8x8_t dst8_low = vget_low_u8(dst8);
789 const uint8x8_t dst8_high = vget_high_u8(dst8);
790
791 const uint16x8_t dst16_low = vmovl_u8(dst8_low);
792 const uint16x8_t dst16_high = vmovl_u8(dst8_high);
793
794 const uint16x8_t result16_low = qvbyte_mul_u16(dst16_low, minusAlphaOfColorVector, half);
795 const uint16x8_t result16_high = qvbyte_mul_u16(dst16_high, minusAlphaOfColorVector, half);
796
797 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result16_low));
798 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result16_high));
799
800 uint32x4_t blendedPixels = vcombine_u32(result32_low, result32_high);
801 uint32x4_t colorPlusBlendedPixels = vaddq_u32(colorVector, blendedPixels);
802 vst1q_u32(&dst[x], colorPlusBlendedPixels);
803 }
804
806 destPixels[x] = color + BYTE_MUL(destPixels[x], minusAlphaOfColor);
807 }
808}
809
810void QT_FASTCALL comp_func_Plus_neon(uint *dst, const uint *src, int length, uint const_alpha)
811{
812 if (const_alpha == 255) {
813 uint *const end = dst + length;
814 uint *const neonEnd = end - 3;
815
816 while (dst < neonEnd) {
817 uint8x16_t vs = vld1q_u8((const uint8_t*)src);
818 const uint8x16_t vd = vld1q_u8((uint8_t*)dst);
819 vs = vqaddq_u8(vs, vd);
820 vst1q_u8((uint8_t*)dst, vs);
821 src += 4;
822 dst += 4;
823 };
824
825 while (dst != end) {
827 ++dst;
828 ++src;
829 }
830 } else {
831 int x = 0;
832 const int one_minus_const_alpha = 255 - const_alpha;
833 const uint16x8_t constAlphaVector = vdupq_n_u16(const_alpha);
834 const uint16x8_t oneMinusconstAlphaVector = vdupq_n_u16(one_minus_const_alpha);
835
836 const uint16x8_t half = vdupq_n_u16(0x80);
837 for (; x < length - 3; x += 4) {
838 const uint32x4_t src32 = vld1q_u32((uint32_t *)&src[x]);
839 const uint8x16_t src8 = vreinterpretq_u8_u32(src32);
840 uint8x16_t dst8 = vld1q_u8((uint8_t *)&dst[x]);
841 uint8x16_t result = vqaddq_u8(dst8, src8);
842
843 uint16x8_t result_low = vmovl_u8(vget_low_u8(result));
844 uint16x8_t result_high = vmovl_u8(vget_high_u8(result));
845
846 uint16x8_t dst_low = vmovl_u8(vget_low_u8(dst8));
847 uint16x8_t dst_high = vmovl_u8(vget_high_u8(dst8));
848
849 result_low = qvinterpolate_pixel_255(result_low, constAlphaVector, dst_low, oneMinusconstAlphaVector, half);
850 result_high = qvinterpolate_pixel_255(result_high, constAlphaVector, dst_high, oneMinusconstAlphaVector, half);
851
852 const uint32x2_t result32_low = vreinterpret_u32_u8(vmovn_u16(result_low));
853 const uint32x2_t result32_high = vreinterpret_u32_u8(vmovn_u16(result_high));
854 vst1q_u32((uint32_t *)&dst[x], vcombine_u32(result32_low, result32_high));
855 }
856
858 dst[x] = comp_func_Plus_one_pixel_const_alpha(dst[x], src[x], const_alpha, one_minus_const_alpha);
859 }
860}
861
862#if defined(ENABLE_PIXMAN_DRAWHELPERS)
863static const int tileSize = 32;
864
865extern "C" void qt_rotate90_16_neon(quint16 *dst, const quint16 *src, int sstride, int dstride, int count);
866
867void qt_memrotate90_16_neon(const uchar *srcPixels, int w, int h, int sstride, uchar *destPixels, int dstride)
868{
869 const ushort *src = (const ushort *)srcPixels;
870 ushort *dest = (ushort *)destPixels;
871
872 sstride /= sizeof(ushort);
873 dstride /= sizeof(ushort);
874
875 const int pack = sizeof(quint32) / sizeof(ushort);
876 const int unaligned =
877 qMin(uint((quintptr(dest) & (sizeof(quint32)-1)) / sizeof(ushort)), uint(h));
878 const int restX = w % tileSize;
879 const int restY = (h - unaligned) % tileSize;
880 const int unoptimizedY = restY % pack;
881 const int numTilesX = w / tileSize + (restX > 0);
882 const int numTilesY = (h - unaligned) / tileSize + (restY >= pack);
883
884 for (int tx = 0; tx < numTilesX; ++tx) {
885 const int startx = w - tx * tileSize - 1;
886 const int stopx = qMax(startx - tileSize, 0);
887
888 if (unaligned) {
889 for (int x = startx; x >= stopx; --x) {
890 ushort *d = dest + (w - x - 1) * dstride;
891 for (int y = 0; y < unaligned; ++y) {
892 *d++ = src[y * sstride + x];
893 }
894 }
895 }
896
897 for (int ty = 0; ty < numTilesY; ++ty) {
898 const int starty = ty * tileSize + unaligned;
899 const int stopy = qMin(starty + tileSize, h - unoptimizedY);
900
901 int x = startx;
902 // qt_rotate90_16_neon writes to eight rows, four pixels at a time
903 for (; x >= stopx + 7; x -= 8) {
904 ushort *d = dest + (w - x - 1) * dstride + starty;
905 const ushort *s = &src[starty * sstride + x - 7];
906 qt_rotate90_16_neon(d, s, sstride * 2, dstride * 2, stopy - starty);
907 }
908
909 for (; x >= stopx; --x) {
910 quint32 *d = reinterpret_cast<quint32*>(dest + (w - x - 1) * dstride + starty);
911 for (int y = starty; y < stopy; y += pack) {
912 quint32 c = src[y * sstride + x];
913 for (int i = 1; i < pack; ++i) {
914 const int shift = (sizeof(int) * 8 / pack * i);
915 const ushort color = src[(y + i) * sstride + x];
916 c |= color << shift;
917 }
918 *d++ = c;
919 }
920 }
921 }
922
923 if (unoptimizedY) {
924 const int starty = h - unoptimizedY;
925 for (int x = startx; x >= stopx; --x) {
926 ushort *d = dest + (w - x - 1) * dstride + starty;
927 for (int y = starty; y < h; ++y) {
928 *d++ = src[y * sstride + x];
929 }
930 }
931 }
932 }
933}
934
935extern "C" void qt_rotate270_16_neon(quint16 *dst, const quint16 *src, int sstride, int dstride, int count);
936
937void qt_memrotate270_16_neon(const uchar *srcPixels, int w, int h,
938 int sstride,
939 uchar *destPixels, int dstride)
940{
941 const ushort *src = (const ushort *)srcPixels;
942 ushort *dest = (ushort *)destPixels;
943
944 sstride /= sizeof(ushort);
945 dstride /= sizeof(ushort);
946
947 const int pack = sizeof(quint32) / sizeof(ushort);
948 const int unaligned =
949 qMin(uint((long(dest) & (sizeof(quint32)-1)) / sizeof(ushort)), uint(h));
950 const int restX = w % tileSize;
951 const int restY = (h - unaligned) % tileSize;
952 const int unoptimizedY = restY % pack;
953 const int numTilesX = w / tileSize + (restX > 0);
954 const int numTilesY = (h - unaligned) / tileSize + (restY >= pack);
955
956 for (int tx = 0; tx < numTilesX; ++tx) {
957 const int startx = tx * tileSize;
958 const int stopx = qMin(startx + tileSize, w);
959
960 if (unaligned) {
961 for (int x = startx; x < stopx; ++x) {
962 ushort *d = dest + x * dstride;
963 for (int y = h - 1; y >= h - unaligned; --y) {
964 *d++ = src[y * sstride + x];
965 }
966 }
967 }
968
969 for (int ty = 0; ty < numTilesY; ++ty) {
970 const int starty = h - 1 - unaligned - ty * tileSize;
971 const int stopy = qMax(starty - tileSize, unoptimizedY);
972
973 int x = startx;
974 // qt_rotate90_16_neon writes to eight rows, four pixels at a time
975 for (; x < stopx - 7; x += 8) {
976 ushort *d = dest + x * dstride + h - 1 - starty;
977 const ushort *s = &src[starty * sstride + x];
978 qt_rotate90_16_neon(d + 7 * dstride, s, -sstride * 2, -dstride * 2, starty - stopy);
979 }
980
981 for (; x < stopx; ++x) {
982 quint32 *d = reinterpret_cast<quint32*>(dest + x * dstride
983 + h - 1 - starty);
984 for (int y = starty; y > stopy; y -= pack) {
985 quint32 c = src[y * sstride + x];
986 for (int i = 1; i < pack; ++i) {
987 const int shift = (sizeof(int) * 8 / pack * i);
988 const ushort color = src[(y - i) * sstride + x];
989 c |= color << shift;
990 }
991 *d++ = c;
992 }
993 }
994 }
995 if (unoptimizedY) {
996 const int starty = unoptimizedY - 1;
997 for (int x = startx; x < stopx; ++x) {
998 ushort *d = dest + x * dstride + h - 1 - starty;
999 for (int y = starty; y >= 0; --y) {
1000 *d++ = src[y * sstride + x];
1001 }
1002 }
1003 }
1004 }
1005}
1006#endif
1007
1008class QSimdNeon
1009{
1010public:
1011 typedef int32x4_t Int32x4;
1012 typedef float32x4_t Float32x4;
1013
1014 union Vect_buffer_i { Int32x4 v; int i[4]; };
1015 union Vect_buffer_f { Float32x4 v; float f[4]; };
1016
1017 static inline Float32x4 v_dup(double x) { return vdupq_n_f32(float(x)); }
1018 static inline Float32x4 v_dup(float x) { return vdupq_n_f32(x); }
1019 static inline Int32x4 v_dup(int x) { return vdupq_n_s32(x); }
1020 static inline Int32x4 v_dup(uint x) { return vdupq_n_s32(x); }
1021
1022 static inline Float32x4 v_add(Float32x4 a, Float32x4 b) { return vaddq_f32(a, b); }
1023 static inline Int32x4 v_add(Int32x4 a, Int32x4 b) { return vaddq_s32(a, b); }
1024
1025 static inline Float32x4 v_max(Float32x4 a, Float32x4 b) { return vmaxq_f32(a, b); }
1026 static inline Float32x4 v_min(Float32x4 a, Float32x4 b) { return vminq_f32(a, b); }
1027 static inline Int32x4 v_min_16(Int32x4 a, Int32x4 b) { return vminq_s32(a, b); }
1028
1029 static inline Int32x4 v_and(Int32x4 a, Int32x4 b) { return vandq_s32(a, b); }
1030
1031 static inline Float32x4 v_sub(Float32x4 a, Float32x4 b) { return vsubq_f32(a, b); }
1032 static inline Int32x4 v_sub(Int32x4 a, Int32x4 b) { return vsubq_s32(a, b); }
1033
1034 static inline Float32x4 v_mul(Float32x4 a, Float32x4 b) { return vmulq_f32(a, b); }
1035
1036 static inline Float32x4 v_sqrt(Float32x4 x) { Float32x4 y = vrsqrteq_f32(x); y = vmulq_f32(y, vrsqrtsq_f32(x, vmulq_f32(y, y))); return vmulq_f32(x, y); }
1037
1038 static inline Int32x4 v_toInt(Float32x4 x) { return vcvtq_s32_f32(x); }
1039
1040 static inline Int32x4 v_greaterOrEqual(Float32x4 a, Float32x4 b) { return vreinterpretq_s32_u32(vcgeq_f32(a, b)); }
1041};
1042
1043const uint * QT_FASTCALL qt_fetch_radial_gradient_neon(uint *buffer, const Operator *op, const QSpanData *data,
1044 int y, int x, int length)
1045{
1046 return qt_fetch_radial_gradient_template<QRadialFetchSimd<QSimdNeon>,uint>(buffer, op, data, y, x, length);
1047}
1048
1050
1051const uint * QT_FASTCALL qt_fetchUntransformed_888_neon(uint *buffer, const Operator *, const QSpanData *data,
1052 int y, int x, int length)
1053{
1054 const uchar *line = data->texture.scanLine(y) + x * 3;
1056 return buffer;
1057}
1058
1059#if Q_BYTE_ORDER == Q_LITTLE_ENDIAN
1060static inline uint32x4_t vrgba2argb(uint32x4_t srcVector)
1061{
1062#if defined(Q_PROCESSOR_ARM_64)
1063 const uint8x16_t rgbaMask = { 2, 1, 0, 3, 6, 5, 4, 7, 10, 9, 8, 11, 14, 13, 12, 15};
1064#else
1065 const uint8x8_t rgbaMask = { 2, 1, 0, 3, 6, 5, 4, 7 };
1066#endif
1067#if defined(Q_PROCESSOR_ARM_64)
1068 srcVector = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(srcVector), rgbaMask));
1069#else
1070 // no vqtbl1q_u8, so use two vtbl1_u8
1071 const uint8x8_t low = vtbl1_u8(vreinterpret_u8_u32(vget_low_u32(srcVector)), rgbaMask);
1072 const uint8x8_t high = vtbl1_u8(vreinterpret_u8_u32(vget_high_u32(srcVector)), rgbaMask);
1073 srcVector = vcombine_u32(vreinterpret_u32_u8(low), vreinterpret_u32_u8(high));
1074#endif
1075 return srcVector;
1076}
1077
1078template<bool RGBA>
1079static inline void convertARGBToARGB32PM_neon(uint *buffer, const uint *src, int count)
1080{
1081 int i = 0;
1082 const uint8x8_t shuffleMask = { 3, 3, 3, 3, 7, 7, 7, 7};
1083 const uint32x4_t blendMask = vdupq_n_u32(0xff000000);
1084
1085 for (; i < count - 3; i += 4) {
1086 uint32x4_t srcVector = vld1q_u32(src + i);
1087 uint32x4_t alphaVector = vshrq_n_u32(srcVector, 24);
1088#if defined(Q_PROCESSOR_ARM_64)
1089 uint32_t alphaSum = vaddvq_u32(alphaVector);
1090#else
1091 // no vaddvq_u32
1092 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1093 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1094#endif
1095 if (alphaSum) {
1096 if (alphaSum != 255 * 4) {
1097 if (RGBA)
1098 srcVector = vrgba2argb(srcVector);
1099 const uint8x8_t s1 = vreinterpret_u8_u32(vget_low_u32(srcVector));
1100 const uint8x8_t s2 = vreinterpret_u8_u32(vget_high_u32(srcVector));
1101 const uint8x8_t alpha1 = vtbl1_u8(s1, shuffleMask);
1102 const uint8x8_t alpha2 = vtbl1_u8(s2, shuffleMask);
1103 uint16x8_t src1 = vmull_u8(s1, alpha1);
1104 uint16x8_t src2 = vmull_u8(s2, alpha2);
1105 src1 = vsraq_n_u16(src1, src1, 8);
1106 src2 = vsraq_n_u16(src2, src2, 8);
1107 const uint8x8_t d1 = vrshrn_n_u16(src1, 8);
1108 const uint8x8_t d2 = vrshrn_n_u16(src2, 8);
1109 const uint32x4_t d = vbslq_u32(blendMask, srcVector, vreinterpretq_u32_u8(vcombine_u8(d1, d2)));
1110 vst1q_u32(buffer + i, d);
1111 } else {
1112 if (RGBA)
1113 vst1q_u32(buffer + i, vrgba2argb(srcVector));
1114 else if (buffer != src)
1115 vst1q_u32(buffer + i, srcVector);
1116 }
1117 } else {
1118 vst1q_u32(buffer + i, vdupq_n_u32(0));
1119 }
1120 }
1121
1122 SIMD_EPILOGUE(i, count, 3) {
1123 uint v = qPremultiply(src[i]);
1124 buffer[i] = RGBA ? RGBA2ARGB(v) : v;
1125 }
1126}
1127
1128template<bool RGBA>
1129static inline void convertARGB32ToRGBA64PM_neon(QRgba64 *buffer, const uint *src, int count)
1130{
1131 if (count <= 0)
1132 return;
1133
1134 const uint8x8_t shuffleMask = { 3, 3, 3, 3, 7, 7, 7, 7};
1135 const uint64x2_t blendMask = vdupq_n_u64(Q_UINT64_C(0xffff000000000000));
1136
1137 int i = 0;
1138 for (; i < count-3; i += 4) {
1139 uint32x4_t vs32 = vld1q_u32(src + i);
1140 uint32x4_t alphaVector = vshrq_n_u32(vs32, 24);
1141#if defined(Q_PROCESSOR_ARM_64)
1142 uint32_t alphaSum = vaddvq_u32(alphaVector);
1143#else
1144 // no vaddvq_u32
1145 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1146 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1147#endif
1148 if (alphaSum) {
1149 if (!RGBA)
1150 vs32 = vrgba2argb(vs32);
1151 const uint8x16_t vs8 = vreinterpretq_u8_u32(vs32);
1152 const uint8x16x2_t v = vzipq_u8(vs8, vs8);
1153 if (alphaSum != 255 * 4) {
1154 const uint8x8_t s1 = vreinterpret_u8_u32(vget_low_u32(vs32));
1155 const uint8x8_t s2 = vreinterpret_u8_u32(vget_high_u32(vs32));
1156 const uint8x8_t alpha1 = vtbl1_u8(s1, shuffleMask);
1157 const uint8x8_t alpha2 = vtbl1_u8(s2, shuffleMask);
1158 uint16x8_t src1 = vmull_u8(s1, alpha1);
1159 uint16x8_t src2 = vmull_u8(s2, alpha2);
1160 // convert from 0->(255x255) to 0->(255x257)
1161 src1 = vsraq_n_u16(src1, src1, 7);
1162 src2 = vsraq_n_u16(src2, src2, 7);
1163
1164 // now restore alpha from the trivial conversion
1165 const uint64x2_t d1 = vbslq_u64(blendMask, vreinterpretq_u64_u8(v.val[0]), vreinterpretq_u64_u16(src1));
1166 const uint64x2_t d2 = vbslq_u64(blendMask, vreinterpretq_u64_u8(v.val[1]), vreinterpretq_u64_u16(src2));
1167
1168 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u64(d1));
1169 buffer += 2;
1170 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u64(d2));
1171 buffer += 2;
1172 } else {
1173 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u8(v.val[0]));
1174 buffer += 2;
1175 vst1q_u16((uint16_t *)buffer, vreinterpretq_u16_u8(v.val[1]));
1176 buffer += 2;
1177 }
1178 } else {
1179 vst1q_u16((uint16_t *)buffer, vdupq_n_u16(0));
1180 buffer += 2;
1181 vst1q_u16((uint16_t *)buffer, vdupq_n_u16(0));
1182 buffer += 2;
1183 }
1184 }
1185
1186 SIMD_EPILOGUE(i, count, 3) {
1187 uint s = src[i];
1188 if (RGBA)
1189 s = RGBA2ARGB(s);
1190 *buffer++ = QRgba64::fromArgb32(s).premultiplied();
1191 }
1192}
1193
1194static inline float32x4_t reciprocal_mul_ps(float32x4_t a, float mul)
1195{
1196 float32x4_t ia = vrecpeq_f32(a); // estimate 1/a
1197 ia = vmulq_f32(vrecpsq_f32(a, ia), vmulq_n_f32(ia, mul)); // estimate improvement step * mul
1198 return ia;
1199}
1200
1201template<bool RGBA, bool RGBx>
1202static inline void convertARGBFromARGB32PM_neon(uint *buffer, const uint *src, int count)
1203{
1204 int i = 0;
1205 const uint32x4_t alphaMask = vdupq_n_u32(0xff000000);
1206
1207 for (; i < count - 3; i += 4) {
1208 uint32x4_t srcVector = vld1q_u32(src + i);
1209 uint32x4_t alphaVector = vshrq_n_u32(srcVector, 24);
1210#if defined(Q_PROCESSOR_ARM_64)
1211 uint32_t alphaSum = vaddvq_u32(alphaVector);
1212#else
1213 // no vaddvq_u32
1214 uint32x2_t tmp = vpadd_u32(vget_low_u32(alphaVector), vget_high_u32(alphaVector));
1215 uint32_t alphaSum = vget_lane_u32(vpadd_u32(tmp, tmp), 0);
1216#endif
1217 if (alphaSum) {
1218 if (alphaSum != 255 * 4) {
1219 if (RGBA)
1220 srcVector = vrgba2argb(srcVector);
1221 const float32x4_t a = vcvtq_f32_u32(alphaVector);
1222 const float32x4_t ia = reciprocal_mul_ps(a, 255.0f);
1223 // Convert 4x(4xU8) to 4x(4xF32)
1224 uint16x8_t tmp1 = vmovl_u8(vget_low_u8(vreinterpretq_u8_u32(srcVector)));
1225 uint16x8_t tmp3 = vmovl_u8(vget_high_u8(vreinterpretq_u8_u32(srcVector)));
1226 float32x4_t src1 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp1)));
1227 float32x4_t src2 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp1)));
1228 float32x4_t src3 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(tmp3)));
1229 float32x4_t src4 = vcvtq_f32_u32(vmovl_u16(vget_high_u16(tmp3)));
1230 src1 = vmulq_lane_f32(src1, vget_low_f32(ia), 0);
1231 src2 = vmulq_lane_f32(src2, vget_low_f32(ia), 1);
1232 src3 = vmulq_lane_f32(src3, vget_high_f32(ia), 0);
1233 src4 = vmulq_lane_f32(src4, vget_high_f32(ia), 1);
1234 // Convert 4x(4xF32) back to 4x(4xU8) (over a 8.1 fixed point format to get rounding)
1235 tmp1 = vcombine_u16(vrshrn_n_u32(vcvtq_n_u32_f32(src1, 1), 1),
1236 vrshrn_n_u32(vcvtq_n_u32_f32(src2, 1), 1));
1237 tmp3 = vcombine_u16(vrshrn_n_u32(vcvtq_n_u32_f32(src3, 1), 1),
1238 vrshrn_n_u32(vcvtq_n_u32_f32(src4, 1), 1));
1239 uint32x4_t dstVector = vreinterpretq_u32_u8(vcombine_u8(vmovn_u16(tmp1), vmovn_u16(tmp3)));
1240 // Overwrite any undefined results from alpha==0 with zeros:
1241#if defined(Q_PROCESSOR_ARM_64)
1242 uint32x4_t srcVectorAlphaMask = vceqzq_u32(alphaVector);
1243#else
1244 uint32x4_t srcVectorAlphaMask = vceqq_u32(alphaVector, vdupq_n_u32(0));
1245#endif
1246 dstVector = vbicq_u32(dstVector, srcVectorAlphaMask);
1247 // Restore or mask alpha values:
1248 if (RGBx)
1249 dstVector = vorrq_u32(alphaMask, dstVector);
1250 else
1251 dstVector = vbslq_u32(alphaMask, srcVector, dstVector);
1252 vst1q_u32(&buffer[i], dstVector);
1253 } else {
1254 // 4xAlpha==255, no change except if we are doing RGBA->ARGB:
1255 if (RGBA)
1256 vst1q_u32(&buffer[i], vrgba2argb(srcVector));
1257 else if (buffer != src)
1258 vst1q_u32(&buffer[i], srcVector);
1259 }
1260 } else {
1261 // 4xAlpha==0, always zero, except if output is RGBx:
1262 if (RGBx)
1263 vst1q_u32(&buffer[i], alphaMask);
1264 else
1265 vst1q_u32(&buffer[i], vdupq_n_u32(0));
1266 }
1267 }
1268
1269 SIMD_EPILOGUE(i, count, 3) {
1271 if (RGBx)
1272 v = 0xff000000 | v;
1273 if (RGBA)
1274 v = ARGB2RGBA(v);
1275 buffer[i] = v;
1276 }
1277}
1278
1279void QT_FASTCALL convertARGB32ToARGB32PM_neon(uint *buffer, int count, const QList<QRgb> *)
1280{
1281 convertARGBToARGB32PM_neon<false>(buffer, buffer, count);
1282}
1283
1284void QT_FASTCALL convertRGBA8888ToARGB32PM_neon(uint *buffer, int count, const QList<QRgb> *)
1285{
1286 convertARGBToARGB32PM_neon<true>(buffer, buffer, count);
1287}
1288
1289const uint *QT_FASTCALL fetchARGB32ToARGB32PM_neon(uint *buffer, const uchar *src, int index, int count,
1290 const QList<QRgb> *, QDitherInfo *)
1291{
1292 convertARGBToARGB32PM_neon<false>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1293 return buffer;
1294}
1295
1296const uint *QT_FASTCALL fetchRGBA8888ToARGB32PM_neon(uint *buffer, const uchar *src, int index, int count,
1297 const QList<QRgb> *, QDitherInfo *)
1298{
1299 convertARGBToARGB32PM_neon<true>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1300 return buffer;
1301}
1302
1303const QRgba64 * QT_FASTCALL convertARGB32ToRGBA64PM_neon(QRgba64 *buffer, const uint *src, int count,
1304 const QList<QRgb> *, QDitherInfo *)
1305{
1306 convertARGB32ToRGBA64PM_neon<false>(buffer, src, count);
1307 return buffer;
1308}
1309
1310const QRgba64 * QT_FASTCALL convertRGBA8888ToRGBA64PM_neon(QRgba64 *buffer, const uint *src, int count,
1311 const QList<QRgb> *, QDitherInfo *)
1312{
1313 convertARGB32ToRGBA64PM_neon<true>(buffer, src, count);
1314 return buffer;
1315}
1316
1317const QRgba64 *QT_FASTCALL fetchARGB32ToRGBA64PM_neon(QRgba64 *buffer, const uchar *src, int index, int count,
1318 const QList<QRgb> *, QDitherInfo *)
1319{
1320 convertARGB32ToRGBA64PM_neon<false>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1321 return buffer;
1322}
1323
1324const QRgba64 *QT_FASTCALL fetchRGBA8888ToRGBA64PM_neon(QRgba64 *buffer, const uchar *src, int index, int count,
1325 const QList<QRgb> *, QDitherInfo *)
1326{
1327 convertARGB32ToRGBA64PM_neon<true>(buffer, reinterpret_cast<const uint *>(src) + index, count);
1328 return buffer;
1329}
1330
1331void QT_FASTCALL storeRGB32FromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1332 const QList<QRgb> *, QDitherInfo *)
1333{
1334 uint *d = reinterpret_cast<uint *>(dest) + index;
1335 convertARGBFromARGB32PM_neon<false,true>(d, src, count);
1336}
1337
1338void QT_FASTCALL storeARGB32FromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1339 const QList<QRgb> *, QDitherInfo *)
1340{
1341 uint *d = reinterpret_cast<uint *>(dest) + index;
1342 convertARGBFromARGB32PM_neon<false,false>(d, src, count);
1343}
1344
1345void QT_FASTCALL storeRGBA8888FromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1346 const QList<QRgb> *, QDitherInfo *)
1347{
1348 uint *d = reinterpret_cast<uint *>(dest) + index;
1349 convertARGBFromARGB32PM_neon<true,false>(d, src, count);
1350}
1351
1352void QT_FASTCALL storeRGBXFromARGB32PM_neon(uchar *dest, const uint *src, int index, int count,
1353 const QList<QRgb> *, QDitherInfo *)
1354{
1355 uint *d = reinterpret_cast<uint *>(dest) + index;
1356 convertARGBFromARGB32PM_neon<true,true>(d, src, count);
1357}
1358
1359#endif // Q_BYTE_ORDER == Q_LITTLE_ENDIAN
1360
1362
1363#endif // __ARM_NEON__
1364
qsizetype bytesPerLine() const
uchar * scanLine(int y)
\inmodule QtCore\reentrant
Definition qrect.h:484
\inmodule QtCore\reentrant
Definition qrect.h:30
static constexpr QRgba64 fromArgb32(uint rgb)
Definition qrgba64.h:56
The QTransform class specifies 2D transformations of a coordinate system.
Definition qtransform.h:20
QMap< QString, QString > map
[6]
Combined button and popup list for selecting options.
QTextStream & flush(QTextStream &stream)
Calls QTextStream::flush() on stream and returns stream.
static QT_WARNING_DISABLE_FLOAT_COMPARE ShiftResult shift(const QBezier *orig, QBezier *shifted, qreal offset, qreal threshold)
Definition qbezier.cpp:207
void qt_blend_argb32_on_rgb16_const_alpha(uchar *destPixels, int dbpl, const uchar *srcPixels, int sbpl, int w, int h, int const_alpha)
void qt_transform_image_rgb16_on_rgb16(uchar *destPixels, int dbpl, const uchar *srcPixels, int sbpl, const QRectF &targetRect, const QRectF &sourceRect, const QRect &clip, const QTransform &targetRectTransform, int const_alpha)
void qt_blend_rgb32_on_rgb32(uchar *destPixels, int dbpl, const uchar *srcPixels, int sbpl, int w, int h, int const_alpha)
void qt_scale_image_rgb16_on_rgb16(uchar *destPixels, int dbpl, const uchar *srcPixels, int sbpl, int srch, const QRectF &targetRect, const QRectF &sourceRect, const QRect &clip, int const_alpha)
void qt_blend_rgb16_on_rgb16(uchar *dst, int dbpl, const uchar *src, int sbpl, int w, int h, int const_alpha)
void qt_transform_image(DestT *destPixels, int dbpl, const SrcT *srcPixels, int sbpl, const QRectF &targetRect, const QRectF &sourceRect, const QRect &clip, const QTransform &targetRectTransform, Blender blender)
#define Q_FALLTHROUGH()
#define QT_FASTCALL
void qt_memfill32(quint32 *dest, quint32 color, qsizetype count)
void qt_alphamapblit_quint16(QRasterBuffer *rasterBuffer, int x, int y, const QRgba64 &color, const uchar *map, int mapWidth, int mapHeight, int mapStride, const QClipData *clip, bool useGammaCorrection)
uint comp_func_Plus_one_pixel_const_alpha(uint d, const uint s, const uint const_alpha, const uint one_minus_const_alpha)
static uint INTERPOLATE_PIXEL_255(uint x, uint a, uint y, uint b)
static uint BYTE_MUL(uint x, uint a)
uint comp_func_Plus_one_pixel(uint d, const uint s)
QRgb qConvertRgb16To32(uint c)
EGLOutputLayerEXT EGLint EGLAttrib value
[5]
Q_GUI_EXPORT void QT_FASTCALL qt_convert_rgb888_to_rgb32_neon(quint32 *dst, const uchar *src, int len)
static QT_BEGIN_NAMESPACE const int tileSize
Definition qmemrotate.cpp:9
constexpr const T & qMin(const T &a, const T &b)
Definition qminmax.h:40
constexpr const T & qMax(const T &a, const T &b)
Definition qminmax.h:42
GLboolean GLboolean GLboolean b
GLsizei const GLfloat * v
[13]
GLint GLint GLint GLint GLint x
[0]
GLfloat GLfloat GLfloat w
[0]
GLboolean GLboolean GLboolean GLboolean a
[7]
GLuint index
[2]
GLuint GLuint end
GLuint GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat GLfloat s1
GLenum GLuint GLenum GLsizei length
GLenum GLenum GLsizei count
GLint GLsizei GLsizei GLenum GLenum GLsizei void * data
GLfloat GLfloat f
GLenum src
GLenum GLuint buffer
GLuint color
[2]
GLenum GLenum dst
GLint GLint GLint GLint GLint GLint GLint GLbitfield mask
GLint y
GLfloat GLfloat GLfloat GLfloat h
GLdouble s
[6]
Definition qopenglext.h:235
const GLubyte * c
GLsizei GLfixed GLfixed GLfixed GLfixed const GLubyte * bitmap
GLdouble GLdouble t
Definition qopenglext.h:243
GLuint GLuint64EXT address
GLsizei const void * pointer
Definition qopenglext.h:384
GLuint64EXT * result
[6]
GLenum GLsizei len
GLfloat GLfloat GLfloat alpha
Definition qopenglext.h:418
GLbyte ty
static quint32 RGBA2ARGB(quint32 x)
static quint32 ARGB2RGBA(quint32 x)
QRgb qUnpremultiply(QRgb p)
Definition qrgb.h:60
constexpr QRgb qPremultiply(QRgb x)
Definition qrgb.h:45
constexpr int qAlpha(QRgb rgb)
Definition qrgb.h:27
#define SIMD_EPILOGUE(i, length, max)
Definition qsimd_p.h:33
#define s2
#define Q_UINT64_C(c)
Definition qtypes.h:58
unsigned int quint32
Definition qtypes.h:50
unsigned char uchar
Definition qtypes.h:32
unsigned short quint16
Definition qtypes.h:48
size_t quintptr
Definition qtypes.h:167
ptrdiff_t qsizetype
Definition qtypes.h:165
unsigned int uint
Definition qtypes.h:34
unsigned short ushort
Definition qtypes.h:33
unsigned char quint8
Definition qtypes.h:46
gzip write("uncompressed data")
QDate d1(1995, 5, 17)
[0]
QDate d2(1995, 5, 20)