texlive[64747] Build/source/libs: pixman 0.42.0

commits+kakuto at tug.org commits+kakuto at tug.org
Tue Oct 18 23:13:30 CEST 2022


Revision: 64747
          http://tug.org/svn/texlive?view=revision&revision=64747
Author:   kakuto
Date:     2022-10-18 23:13:29 +0200 (Tue, 18 Oct 2022)
Log Message:
-----------
pixman 0.42.0

Modified Paths:
--------------
    trunk/Build/source/libs/README
    trunk/Build/source/libs/pixman/ChangeLog
    trunk/Build/source/libs/pixman/README
    trunk/Build/source/libs/pixman/TLpatches/ChangeLog
    trunk/Build/source/libs/pixman/TLpatches/TL-Changes
    trunk/Build/source/libs/pixman/TLpatches/patch-01-hide-symbols
    trunk/Build/source/libs/pixman/configure
    trunk/Build/source/libs/pixman/pixman-src/README
    trunk/Build/source/libs/pixman/pixman-src/config.h.in
    trunk/Build/source/libs/pixman/pixman-src/configure.ac
    trunk/Build/source/libs/pixman/pixman-src/meson.build
    trunk/Build/source/libs/pixman/pixman-src/meson_options.txt
    trunk/Build/source/libs/pixman/pixman-src/pixman/Makefile.am
    trunk/Build/source/libs/pixman/pixman-src/pixman/meson.build
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arm-neon.c
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arm.c
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-bits-image.c
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-compiler.h
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-fast-path.c
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-filter.c
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-implementation.c
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-ppc.c
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-private.h
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-region.c
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-sse2.c
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman.h
    trunk/Build/source/libs/pixman/version.ac

Added Paths:
-----------
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm-bilinear.S
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm.S
    trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm.h

Modified: trunk/Build/source/libs/README
===================================================================
--- trunk/Build/source/libs/README	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/README	2022-10-18 21:13:29 UTC (rev 64747)
@@ -49,7 +49,7 @@
 mpfr 4.1.0 - checked 12jul20
   http://ftp.gnu.org/gnu/mpfr/
 
-pixman 0.40.0 - checked 21apr20
+pixman 0.42.0 - checked 19oct22
   http://cairographics.org/releases/
 
 potrace 1.16 - checked 29aug22

Modified: trunk/Build/source/libs/pixman/ChangeLog
===================================================================
--- trunk/Build/source/libs/pixman/ChangeLog	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/ChangeLog	2022-10-18 21:13:29 UTC (rev 64747)
@@ -1,3 +1,8 @@
+2022-10-19  Akira Kakuto  <kakuto at jcom.zaq.ne.jp>
+
+	Import pixman-0.42.0.
+	* version.ac: Adapted.
+
 2020-04-21  Akira Kakuto  <kakuto at w32tex.org>
 
 	Import pixman-0.40.0.

Modified: trunk/Build/source/libs/pixman/README
===================================================================
--- trunk/Build/source/libs/pixman/README	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/README	2022-10-18 21:13:29 UTC (rev 64747)
@@ -1,4 +1,4 @@
-	Building pixman-0.40.0 as part of the TL tree
+	Building pixman-0.42.0 as part of the TL tree
 	=============================================
 
 This directory libs/pixman/ uses a proxy Makefile.am to build the pixman
@@ -12,4 +12,4 @@
 
 2012-11-10	Taco Hoekwater <taco at metatex.org>
 2012-11-15	Peter Breitenlohner <peb at mppmu.mpg.de>
-2020-04-21	Akira Kakuto <kakuto at w32tex.org>
+2022-10-19	Akira Kakuto <kakuto at jcom.zaq.ne.jp>

Modified: trunk/Build/source/libs/pixman/TLpatches/ChangeLog
===================================================================
--- trunk/Build/source/libs/pixman/TLpatches/ChangeLog	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/TLpatches/ChangeLog	2022-10-18 21:13:29 UTC (rev 64747)
@@ -1,3 +1,8 @@
+2022-10-19  Akira Kakuto  <kakuto at jcom.zaq.ne.jp>
+
+	Import pixman-0.42.0.
+	* patch-01-hide-symbols: Adapted.
+
 2020-04-21  Akira Kakuto  <kakuto at w32tex.org>
 
 	Import pixman-0.40.0.

Modified: trunk/Build/source/libs/pixman/TLpatches/TL-Changes
===================================================================
--- trunk/Build/source/libs/pixman/TLpatches/TL-Changes	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/TLpatches/TL-Changes	2022-10-18 21:13:29 UTC (rev 64747)
@@ -1,4 +1,4 @@
-Changes applied to the pixman-0.40.0/ tree as obtained from:
+Changes applied to the pixman-0.42.0/ tree as obtained from:
 	http://cairographics.org/releases/
 
 Removed:

Modified: trunk/Build/source/libs/pixman/TLpatches/patch-01-hide-symbols
===================================================================
--- trunk/Build/source/libs/pixman/TLpatches/patch-01-hide-symbols	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/TLpatches/patch-01-hide-symbols	2022-10-18 21:13:29 UTC (rev 64747)
@@ -1,6 +1,6 @@
-diff -ur pixman-0.40.0/pixman/pixman-compiler.h pixman-src/pixman/pixman-compiler.h
---- pixman-0.40.0/pixman/pixman-compiler.h	Sat Mar 21 04:36:42 2020
-+++ pixman-src/pixman/pixman-compiler.h	Tue Apr 21 06:45:24 2020
+diff -ur pixman-0.42.0/pixman/pixman-compiler.h pixman-src/pixman/pixman-compiler.h
+--- pixman-0.42.0/pixman/pixman-compiler.h	Mon Mar 01 22:49:17 2021
++++ pixman-src/pixman/pixman-compiler.h	Wed Oct 19 05:24:10 2022
 @@ -91,7 +91,7 @@
  
  /* GCC visibility */

Modified: trunk/Build/source/libs/pixman/configure
===================================================================
--- trunk/Build/source/libs/pixman/configure	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/configure	2022-10-18 21:13:29 UTC (rev 64747)
@@ -1,6 +1,6 @@
 #! /bin/sh
 # Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.71 for pixman (TeX Live) 0.40.0.
+# Generated by GNU Autoconf 2.71 for pixman (TeX Live) 0.42.0.
 #
 # Report bugs to <tex-k at tug.org>.
 #
@@ -611,8 +611,8 @@
 # Identity of this package.
 PACKAGE_NAME='pixman (TeX Live)'
 PACKAGE_TARNAME='pixman--tex-live-'
-PACKAGE_VERSION='0.40.0'
-PACKAGE_STRING='pixman (TeX Live) 0.40.0'
+PACKAGE_VERSION='0.42.0'
+PACKAGE_STRING='pixman (TeX Live) 0.42.0'
 PACKAGE_BUGREPORT='tex-k at tug.org'
 PACKAGE_URL=''
 
@@ -1316,7 +1316,7 @@
   # Omit some internal or obsolete options to make the list less imposing.
   # This message is too long to be a string in the A/UX 3.1 sh.
   cat <<_ACEOF
-\`configure' configures pixman (TeX Live) 0.40.0 to adapt to many kinds of systems.
+\`configure' configures pixman (TeX Live) 0.42.0 to adapt to many kinds of systems.
 
 Usage: $0 [OPTION]... [VAR=VALUE]...
 
@@ -1384,7 +1384,7 @@
 
 if test -n "$ac_init_help"; then
   case $ac_init_help in
-     short | recursive ) echo "Configuration of pixman (TeX Live) 0.40.0:";;
+     short | recursive ) echo "Configuration of pixman (TeX Live) 0.42.0:";;
    esac
   cat <<\_ACEOF
 
@@ -1481,7 +1481,7 @@
 test -n "$ac_init_help" && exit $ac_status
 if $ac_init_version; then
   cat <<\_ACEOF
-pixman (TeX Live) configure 0.40.0
+pixman (TeX Live) configure 0.42.0
 generated by GNU Autoconf 2.71
 
 Copyright (C) 2021 Free Software Foundation, Inc.
@@ -1983,7 +1983,7 @@
 This file contains any messages produced by compilers while
 running configure, to aid debugging if configure makes a mistake.
 
-It was created by pixman (TeX Live) $as_me 0.40.0, which was
+It was created by pixman (TeX Live) $as_me 0.42.0, which was
 generated by GNU Autoconf 2.71.  Invocation command line was
 
   $ $0$ac_configure_args_raw
@@ -4526,7 +4526,7 @@
 
 # Define the identity of the package.
  PACKAGE='pixman--tex-live-'
- VERSION='0.40.0'
+ VERSION='0.42.0'
 
 
 printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h
@@ -4742,7 +4742,7 @@
 
 
 PIXMAN_VERSION_MAJOR=0
-PIXMAN_VERSION_MINOR=40
+PIXMAN_VERSION_MINOR=42
 PIXMAN_VERSION_MICRO=0
 
 test_CFLAGS=${CFLAGS+set} # We may override autoconf default CFLAGS.
@@ -7179,7 +7179,7 @@
 # report actual input values of CONFIG_FILES etc. instead of their
 # values after options handling.
 ac_log="
-This file was extended by pixman (TeX Live) $as_me 0.40.0, which was
+This file was extended by pixman (TeX Live) $as_me 0.42.0, which was
 generated by GNU Autoconf 2.71.  Invocation command line was
 
   CONFIG_FILES    = $CONFIG_FILES
@@ -7247,7 +7247,7 @@
 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
 ac_cs_config='$ac_cs_config_escaped'
 ac_cs_version="\\
-pixman (TeX Live) config.status 0.40.0
+pixman (TeX Live) config.status 0.42.0
 configured by $0, generated by GNU Autoconf 2.71,
   with options \\"\$ac_cs_config\\"
 

Modified: trunk/Build/source/libs/pixman/pixman-src/README
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/README	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/README	2022-10-18 21:13:29 UTC (rev 64747)
@@ -1,15 +1,21 @@
+Pixman
+======
+
 Pixman is a library that provides low-level pixel manipulation
 features such as image compositing and trapezoid rasterization.
 
-Questions, bug reports and patches should be directed to the pixman
-mailing list:
+Questions should be directed to the pixman mailing list:
 
-        http://lists.freedesktop.org/mailman/listinfo/pixman
+    https://lists.freedesktop.org/mailman/listinfo/pixman
 
 You can also file bugs at
 
-        https://bugs.freedesktop.org/enter_bug.cgi?product=pixman
+    https://gitlab.freedesktop.org/pixman/pixman/-/issues/new
 
+or submit improvements in form of a Merge Request via
+
+    https://gitlab.freedesktop.org/pixman/pixman/-/merge_requests
+
 For real time discussions about pixman, feel free to join the IRC
 channels #cairo and #xorg-devel on the FreeNode IRC network.
 
@@ -21,54 +27,66 @@
 the git version control system. For a quick getting started guide,
 there is the "Everyday Git With 20 Commands Or So guide"
 
-        http://www.kernel.org/pub/software/scm/git/docs/everyday.html
+    https://www.kernel.org/pub/software/scm/git/docs/everyday.html
 
 from the Git homepage. For more in depth git documentation, see the
 resources on the Git community documentation page:
 
-        http://git-scm.com/documentation
+    https://git-scm.com/documentation
 
 Pixman uses the infrastructure from the freedesktop.org umbrella
 project. For instructions about how to use the git service on
 freedesktop.org, see:
 
-        http://www.freedesktop.org/wiki/Infrastructure/git/Developers
+    https://www.freedesktop.org/wiki/Infrastructure/git/Developers
 
 The Pixman master repository can be found at:
 
-	git://anongit.freedesktop.org/git/pixman
+    https://gitlab.freedesktop.org/pixman/pixman
 
-and browsed on the web here:
 
-	http://cgit.freedesktop.org/pixman/
-
-
 Sending patches
 ---------------
 
-The general workflow for sending patches is to first make sure that
-git can send mail on your system. Then, 
+Patches should be submitted in form of Merge Requests via Gitlab.
 
- - create a branch off of master in your local git repository
+You will first need to create a fork of the main pixman repository at
 
- - make your changes as one or more commits
+    https://gitlab.freedesktop.org/pixman/pixman
 
- - use the 
+via the Fork button on the top right. Once that is done you can add your
+personal repository as a remote to your local pixman development git checkout:
 
-        git send-email
+    git remote add my-gitlab git at gitlab.freedesktop.org:YOURUSERNAME/pixman.git
 
-   command to send the patch series to pixman at lists.freedesktop.org.
+    git fetch my-gitlab
 
-In order for your patches to be accepted, please consider the
-following guidelines:
+Make sure to have added ssh keys to your gitlab profile at
 
- - This link:
+    https://gitlab.freedesktop.org/profile/keys
 
-        http://www.kernel.org/pub/software/scm/git/docs/user-manual.html#patch-series
+Once that is set up, the general workflow for sending patches is to create a
+new local branch with your improvements and once it's ready push it to your
+personal pixman fork:
 
-   describes how what a good patch series is, and to create one with
-   git.
+    git checkout -b fix-some-bug
+    ...
+    git push my-gitlab
 
+The output of the `git push` command will include a link that allows you to
+create a Merge Request against the official pixman repository.
+
+Whenever you make changes to your branch (add new commits or fix up commits)
+you push them back to your personal pixman fork:
+
+    git push -f my-gitlab
+
+If there is an open Merge Request Gitlab will automatically pick up the
+changes from your branch and pixman developers can review them anew.
+
+In order for your patches to be accepted, please consider the
+following guidelines:
+
  - At each point in the series, pixman should compile and the test
    suite should pass.
 
@@ -81,6 +99,12 @@
 
         make check
 
+   if you built pixman with autotools or
+
+       meson test -C builddir
+
+   if you built pixman with meson.
+
    It will take around two minutes to run on a modern PC.
 
  - Follow the coding style described in the CODING_STYLE file
@@ -101,7 +125,7 @@
 	- If review comments were incorporated, a brief version
           history describing what those changes were.
 
- - For big patch series, send an introductory email with an overall
+ - For big patch series, write an introductory post with an overall
    description of the patch series, including benchmarks and
    motivation. Each commit message should still be descriptive and
    include enough information to understand why this particular commit
@@ -111,6 +135,6 @@
 should expect to have the first versions of their patches rejected.
 
 If you think that the reviewers are wrong about something, or that the
-guidelines above are wrong, feel free to discuss the issue on the
-list. The purpose of the guidelines and code review is to ensure high
-code quality; it is not an exercise in compliance.
+guidelines above are wrong, feel free to discuss the issue. The purpose
+of the guidelines and code review is to ensure high code quality; it is
+not an exercise in compliance.

Modified: trunk/Build/source/libs/pixman/pixman-src/config.h.in
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/config.h.in	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/config.h.in	2022-10-18 21:13:29 UTC (rev 64747)
@@ -45,9 +45,6 @@
 /* Whether we have libpng */
 #undef HAVE_LIBPNG
 
-/* Define to 1 if you have the <memory.h> header file. */
-#undef HAVE_MEMORY_H
-
 /* Whether we have mmap() */
 #undef HAVE_MMAP
 
@@ -66,6 +63,9 @@
 /* Define to 1 if you have the <stdint.h> header file. */
 #undef HAVE_STDINT_H
 
+/* Define to 1 if you have the <stdio.h> header file. */
+#undef HAVE_STDIO_H
+
 /* Define to 1 if you have the <stdlib.h> header file. */
 #undef HAVE_STDLIB_H
 
@@ -120,7 +120,9 @@
 /* The size of `long', as computed by sizeof. */
 #undef SIZEOF_LONG
 
-/* Define to 1 if you have the ANSI C header files. */
+/* Define to 1 if all of the C90 standard headers exist (not just the ones
+   required in a freestanding environment). This macro is provided for
+   backward compatibility; new code need not use it. */
 #undef STDC_HEADERS
 
 /* The compiler supported TLS storage class */
@@ -129,6 +131,9 @@
 /* Whether the tool chain supports __attribute__((constructor)) */
 #undef TOOLCHAIN_SUPPORTS_ATTRIBUTE_CONSTRUCTOR
 
+/* use ARM A64_NEON assembly optimizations */
+#undef USE_ARM_A64_NEON
+
 /* use ARM IWMMXT compiler intrinsics */
 #undef USE_ARM_IWMMXT
 

Modified: trunk/Build/source/libs/pixman/pixman-src/configure.ac
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/configure.ac	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/configure.ac	2022-10-18 21:13:29 UTC (rev 64747)
@@ -53,7 +53,7 @@
 #
 
 m4_define([pixman_major], 0)
-m4_define([pixman_minor], 40)
+m4_define([pixman_minor], 42)
 m4_define([pixman_micro], 0)
 
 m4_define([pixman_version],[pixman_major.pixman_minor.pixman_micro])
@@ -503,19 +503,19 @@
 AM_CONDITIONAL(USE_SSSE3, test $have_ssse3_intrinsics = yes)
 
 dnl ===========================================================================
-dnl Other special flags needed when building code using MMX or SSE instructions
+dnl Other special flags needed when building code using x86 ISA extensions
 case $host_os in
    solaris*)
-      # When building 32-bit binaries, apply a mapfile to ensure that the
-      # binaries aren't flagged as only able to run on MMX+SSE capable CPUs
-      # since they check at runtime before using those instructions.
+      # When building Solaris binaries, apply a mapfile to ensure that the
+      # binaries aren't flagged as only able to run on MMX/SSE/SSSE3 capable
+      # CPUs since they check at runtime before using those instructions.
       # Not all linkers grok the mapfile format so we check for that first.
-      if test "$AMD64_ABI" = "no" ; then
+      if test "$host_cpu" == "i386" -o "$host_cpu" == "x86_64"; then
 	 use_hwcap_mapfile=no
 	 AC_MSG_CHECKING(whether to use a hardware capability map file)
 	 hwcap_save_LDFLAGS="$LDFLAGS"
 	 HWCAP_LDFLAGS='-Wl,-M,$(srcdir)/solaris-hwcap.mapfile'
-	 LDFLAGS="$LDFLAGS -Wl,-M,pixman/solaris-hwcap.mapfile"
+	 LDFLAGS="$LDFLAGS -Wl,-M,${srcdir}/pixman/solaris-hwcap.mapfile"
 	 AC_LINK_IFELSE([AC_LANG_SOURCE([[int main() { return 0; }]])],
 			use_hwcap_mapfile=yes,
 			HWCAP_LDFLAGS="")
@@ -528,6 +528,9 @@
       if test "x$SSE2_LDFLAGS" = "x" ; then
 	 SSE2_LDFLAGS="$HWCAP_LDFLAGS"
       fi
+      if test "x$SSSE3_LDFLAGS" = "x" ; then
+	 SSSE3_LDFLAGS="$HWCAP_LDFLAGS"
+      fi
       ;;
 esac
 
@@ -538,6 +541,7 @@
 AC_SUBST(SSE2_CFLAGS)
 AC_SUBST(SSE2_LDFLAGS)
 AC_SUBST(SSSE3_CFLAGS)
+AC_SUBST(SSSE3_LDFLAGS)
 
 dnl ===========================================================================
 dnl Check for VMX/Altivec
@@ -667,6 +671,40 @@
    AC_MSG_ERROR([ARM NEON intrinsics not detected])
 fi
 
+dnl ==========================================================================
+dnl Check if assembler is gas compatible and supports ARM-a64 NEON instructions
+have_arm_a64_neon=no
+AC_MSG_CHECKING(whether to use ARM A64 NEON assembler)
+xserver_save_CFLAGS=$CFLAGS
+CFLAGS="-x assembler-with-cpp $CFLAGS"
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+.text
+.arch armv8-a
+.altmacro
+prfm pldl2strm, [x0]
+xtn v0.8b, v0.8h]])], have_arm_a64_neon=yes)
+CFLAGS=$xserver_save_CFLAGS
+
+AC_ARG_ENABLE(arm-a64-neon,
+   [AC_HELP_STRING([--disable-arm-a64-neon],
+                   [disable ARM A64 NEON fast paths])],
+   [enable_arm_a64_neon=$enableval], [enable_arm_a64_neon=auto])
+
+if test $enable_arm_a64_neon = no ; then
+   have_arm_a64_neon=disabled
+fi
+
+if test $have_arm_a64_neon = yes ; then
+   AC_DEFINE(USE_ARM_A64_NEON, 1, [use ARM A64_NEON assembly optimizations])
+fi
+
+AM_CONDITIONAL(USE_ARM_A64_NEON, test $have_arm_a64_neon = yes)
+
+AC_MSG_RESULT($have_arm_a64_neon)
+if test $enable_arm_a64_neon = yes && test $have_arm_a64_neon4 = no ; then
+   AC_MSG_ERROR([ARM A64 NEON intrinsics not detected])
+fi
+
 dnl ===========================================================================
 dnl Check for IWMMXT
 
@@ -858,7 +896,7 @@
 
 if test $enable_gtk = yes ; then
    AC_CHECK_LIB([pixman-1], [pixman_version_string])
-   PKG_CHECK_MODULES(GTK, [gtk+-2.0 >= 2.16 pixman-1])
+   PKG_CHECK_MODULES(GTK, [gtk+-3.0 pixman-1])
 fi
 
 if test $enable_gtk = auto ; then
@@ -866,7 +904,7 @@
 fi
 
 if test $enable_gtk = auto ; then
-   PKG_CHECK_MODULES(GTK, [gtk+-2.0 >= 2.16 pixman-1], [enable_gtk=yes], [enable_gtk=no])
+   PKG_CHECK_MODULES(GTK, [gtk+-3.0 pixman-1], [enable_gtk=yes], [enable_gtk=no])
 fi
 
 AM_CONDITIONAL(HAVE_GTK, [test "x$enable_gtk" = xyes])
@@ -1148,7 +1186,7 @@
    echo
    echo "      or by filing a bug at "
    echo
-   echo "          https://bugs.freedesktop.org/enter_bug.cgi?product=pixman "
+   echo "          https://gitlab.freedesktop.org/pixman/pixman/-/issues/new "
    echo
    echo "      If you are looking for a stable release of pixman, please note "
    echo "      that stable releases have _even_ minor version numbers. Ie., "

Modified: trunk/Build/source/libs/pixman/pixman-src/meson.build
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/meson.build	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/meson.build	2022-10-18 21:13:29 UTC (rev 64747)
@@ -21,9 +21,9 @@
 project(
   'pixman',
   ['c'],
-  version : '0.40.0',
+  version : '0.42.0',
   license : 'MIT',
-  meson_version : '>= 0.50.0',
+  meson_version : '>= 0.52.0',
   default_options : ['buildtype=debugoptimized'],
 )
 
@@ -37,6 +37,12 @@
     '-fno-strict-aliasing',
     '-fvisibility=hidden',
     '-Wundef',
+    # -ftrapping-math is the default for gcc, but -fno-trapping-math is the
+    # default for clang.  The FLOAT_IS_ZERO macro is used to guard against
+    # floating-point exceptions, however with -fno-trapping-math, the compiler
+    # can reorder floating-point operations so that they occur before the guard.
+    # Note, this function is ignored in clang < 10.0.0.
+    '-ftrapping-math'
   ]),
   language : ['c']
 )
@@ -275,6 +281,22 @@
   error('NEON Support unavailable, but required')
 endif
 
+use_a64neon = get_option('a64-neon')
+have_a64neon = false
+if not use_a64neon.disabled()
+  if host_machine.cpu_family() == 'aarch64'
+    if cc.compiles(files('a64-neon-test.S'), name : 'NEON A64 Intrinsic Support')
+      have_a64neon = true
+    endif
+  endif
+endif
+
+if have_a64neon
+  config.set10('USE_ARM_A64_NEON', true)
+elif use_a64neon.enabled()
+  error('A64 NEON Support unavailable, but required')
+endif
+
 use_iwmmxt = get_option('iwmmxt')
 have_iwmmxt = false
 iwmmxt_flags = ['-flax-vector-conversions', '-Winline']
@@ -390,10 +412,8 @@
   dep_openmp = null_dep
 endif
 
-dep_gtk = dependency('gtk+-2.0', version : '>= 2.16', required : get_option('gtk'))
+dep_gtk = dependency('gtk+-3.0', required : get_option('gtk'))
 dep_glib = dependency('glib-2.0', required : get_option('gtk'))
-dep_pixman = dependency('pixman-1', required : get_option('gtk'),
-                        version : '>= ' + meson.project_version())
 
 dep_png = null_dep
 if not get_option('libpng').disabled()
@@ -472,17 +492,27 @@
   endif
 endforeach
 
-# gcc on Windows only warns that __declspec(thread) isn't supported,
-# passing -Werror=attributes makes it fail.
-if (host_machine.system() == 'windows' and
-    cc.compiles('int __declspec(thread) foo;',
-                args : cc.get_supported_arguments(['-Werror=attributes']),
-                name : 'TLS via __declspec(thread)'))
-  config.set('TLS', '__declspec(thread)')
-elif cc.compiles('int __thread foo;', name : 'TLS via __thread')
-  config.set('TLS', '__thread')
+use_tls = get_option('tls')
+have_tls = ''
+if not use_tls.disabled()
+  # gcc on Windows only warns that __declspec(thread) isn't supported,
+  # passing -Werror=attributes makes it fail.
+  if (host_machine.system() == 'windows' and
+      cc.compiles('int __declspec(thread) foo;',
+                  args : cc.get_supported_arguments(['-Werror=attributes']),
+                  name : 'TLS via __declspec(thread)'))
+    have_tls = '__declspec(thread)'
+  elif cc.compiles('int __thread foo;', name : 'TLS via __thread')
+    have_tls = '__thread'
+  endif
 endif
 
+if have_tls != ''
+  config.set('TLS', have_tls)
+elif use_tls.enabled()
+  error('Compiler TLS Support unavailable, but required')
+endif
+
 if cc.links('''
     static int x = 1;
     static void __attribute__((constructor)) constructor_function () { x = 0; }
@@ -528,15 +558,17 @@
 add_project_arguments('-DHAVE_CONFIG_H', language : ['c'])
 
 subdir('pixman')
-subdir('test')
-subdir('demos')
 
+if not get_option('tests').disabled()
+  subdir('test')
+  subdir('demos')
+endif
+
 pkg = import('pkgconfig')
-pkg.generate(
+pkg.generate(libpixman,
   name : 'Pixman',
   filebase : 'pixman-1',
   description : 'The pixman library (version 1)',
-  libraries : libpixman,
   subdirs: 'pixman-1',
   version : meson.project_version(),
 )

Modified: trunk/Build/source/libs/pixman/pixman-src/meson_options.txt
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/meson_options.txt	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/meson_options.txt	2022-10-18 21:13:29 UTC (rev 64747)
@@ -54,6 +54,11 @@
   description : 'Use ARM NEON intrinsic optimized paths',
 )
 option(
+  'a64-neon',
+  type : 'feature',
+  description : 'Use ARM A64 NEON intrinsic optimized paths',
+)
+option(
   'iwmmxt',
   type : 'feature',
   description : 'Use ARM IWMMXT intrinsic optimized paths',
@@ -75,9 +80,19 @@
   description : 'Use GNU style inline assembler',
 )
 option(
+  'tls',
+  type : 'feature',
+  description : 'Use compiler support for thread-local storage',
+)
+option(
+  'cpu-features-path',
+  type : 'string',
+  description : 'Path to platform-specific cpu-features.[ch] for systems that do not provide it (e.g. Android)',
+)
+option(
   'openmp',
   type : 'feature',
-  description : 'Enable openmp support',
+  description : 'Enable OpenMP for tests',
 )
 option(
   'timers',
@@ -94,10 +109,15 @@
 option(
   'gtk',
   type : 'feature',
-  description : 'Enable tests using GTK',
+  description : 'Enable demos using GTK',
 )
 option(
   'libpng',
   type : 'feature',
-  description : 'Use libpng'
+  description : 'Use libpng in tests'
 )
+option(
+  'tests',
+  type : 'feature',
+  description : 'Build tests and demos'
+)

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/Makefile.am
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/Makefile.am	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/Makefile.am	2022-10-18 21:13:29 UTC (rev 64747)
@@ -96,6 +96,21 @@
 ASM_CFLAGS_arm_neon=
 endif
 
+# arm a64 neon code
+if USE_ARM_A64_NEON
+noinst_LTLIBRARIES += libpixman-arma64-neon.la
+libpixman_arma64_neon_la_SOURCES = \
+        pixman-arm-neon.c        \
+        pixman-arm-common.h      \
+        pixman-arma64-neon-asm.S \
+        pixman-arma64-neon-asm-bilinear.S \
+        pixman-arm-asm.h         \
+        pixman-arma64-neon-asm.h
+libpixman_1_la_LIBADD += libpixman-arma64-neon.la
+
+ASM_CFLAGS_arm_neon=
+endif
+
 # iwmmxt code
 if USE_ARM_IWMMXT
 libpixman_iwmmxt_la_SOURCES = pixman-mmx.c

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/meson.build
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/meson.build	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/meson.build	2022-10-18 21:13:29 UTC (rev 64747)
@@ -54,6 +54,8 @@
    ['pixman-arm-simd-asm.S', 'pixman-arm-simd-asm-scaled.S']],
   ['arm-neon', have_neon, [],
    ['pixman-arm-neon-asm.S', 'pixman-arm-neon-asm-bilinear.S']],
+  ['arm-neon', have_a64neon, [],
+   ['pixman-arma64-neon-asm.S', 'pixman-arma64-neon-asm-bilinear.S']],
   ['mips-dspr2', have_mips_dspr2, mips_dspr2_flags,
    ['pixman-mips-dspr2-asm.S', 'pixman-mips-memcpy-asm.S']],
 ]
@@ -102,19 +104,26 @@
   'pixman-utils.c',
 )
 
-# We cannot use 'link_with' or 'link_whole' because meson wont do the right
-# thing for static archives.
-_obs = []
-foreach l : pixman_simd_libs
-  _obs += l.extract_all_objects()
-endforeach
+# Android cpu-features
+cpu_features_path = get_option('cpu-features-path')
+cpu_features_sources = []
+cpu_features_inc = []
+if cpu_features_path != ''
+  message('Using cpu-features.[ch] from ' + cpu_features_path)
+  cpu_features_sources = files(
+    cpu_features_path / 'cpu-features.h',
+    cpu_features_path / 'cpu-features.c',
+  )
+  cpu_features_inc = include_directories(cpu_features_path)
+endif
 
 libpixman = library(
   'pixman-1',
-  [pixman_files, config_h, version_h],
-  objects : _obs,
+  [pixman_files, config_h, version_h, cpu_features_sources],
+  link_with: pixman_simd_libs,
   c_args : libpixman_extra_cargs,
   dependencies : [dep_m, dep_threads],
+  include_directories : cpu_features_inc,
   version : meson.project_version(),
   install : true,
 )

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arm-neon.c
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arm-neon.c	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arm-neon.c	2022-10-18 21:13:29 UTC (rev 64747)
@@ -194,7 +194,7 @@
 	       uint32_t                 _xor)
 {
     /* stride is always multiple of 32bit units in pixman */
-    uint32_t byte_stride = stride * sizeof(uint32_t);
+    int32_t byte_stride = stride * sizeof(uint32_t);
 
     switch (bpp)
     {
@@ -331,6 +331,7 @@
     PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, a8,       b5g6r5,   neon_composite_over_8888_8_0565),
     PIXMAN_STD_FAST_PATH (OVER, r5g6b5,   a8,       r5g6b5,   neon_composite_over_0565_8_0565),
     PIXMAN_STD_FAST_PATH (OVER, b5g6r5,   a8,       b5g6r5,   neon_composite_over_0565_8_0565),
+    PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, x8r8g8b8, neon_composite_over_8888_8888_8888),
     PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_over_8888_8888_8888),
     PIXMAN_STD_FAST_PATH (OVER, a8r8g8b8, null,     r5g6b5,   neon_composite_over_8888_0565),
     PIXMAN_STD_FAST_PATH (OVER, a8b8g8r8, null,     b5g6r5,   neon_composite_over_8888_0565),
@@ -341,17 +342,33 @@
     PIXMAN_STD_FAST_PATH (OVER, x8r8g8b8, null,     a8r8g8b8, neon_composite_src_x888_8888),
     PIXMAN_STD_FAST_PATH (OVER, x8b8g8r8, null,     a8b8g8r8, neon_composite_src_x888_8888),
     PIXMAN_STD_FAST_PATH (ADD,  solid,    a8,       a8,       neon_composite_add_n_8_8),
+    PIXMAN_STD_FAST_PATH (ADD,  solid,    a8,       x8r8g8b8, neon_composite_add_n_8_8888),
     PIXMAN_STD_FAST_PATH (ADD,  solid,    a8,       a8r8g8b8, neon_composite_add_n_8_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  solid,    a8,       x8b8g8r8, neon_composite_add_n_8_8888),
     PIXMAN_STD_FAST_PATH (ADD,  solid,    a8,       a8b8g8r8, neon_composite_add_n_8_8888),
     PIXMAN_STD_FAST_PATH (ADD,  a8,       a8,       a8,       neon_composite_add_8_8_8),
     PIXMAN_STD_FAST_PATH (ADD,  r5g6b5,   a8,       r5g6b5,   neon_composite_add_0565_8_0565),
     PIXMAN_STD_FAST_PATH (ADD,  b5g6r5,   a8,       b5g6r5,   neon_composite_add_0565_8_0565),
+    PIXMAN_STD_FAST_PATH (ADD,  x8r8g8b8, a8,       x8r8g8b8, neon_composite_add_8888_8_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  a8r8g8b8, a8,       x8r8g8b8, neon_composite_add_8888_8_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  x8b8g8r8, a8,       x8b8g8r8, neon_composite_add_8888_8_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  a8b8g8r8, a8,       x8b8g8r8, neon_composite_add_8888_8_8888),
     PIXMAN_STD_FAST_PATH (ADD,  a8r8g8b8, a8,       a8r8g8b8, neon_composite_add_8888_8_8888),
     PIXMAN_STD_FAST_PATH (ADD,  a8b8g8r8, a8,       a8b8g8r8, neon_composite_add_8888_8_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  x8r8g8b8, a8r8g8b8, x8r8g8b8, neon_composite_add_8888_8888_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  a8r8g8b8, a8r8g8b8, x8r8g8b8, neon_composite_add_8888_8888_8888),
     PIXMAN_STD_FAST_PATH (ADD,  a8r8g8b8, a8r8g8b8, a8r8g8b8, neon_composite_add_8888_8888_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  x8r8g8b8, solid,    x8r8g8b8, neon_composite_add_8888_n_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  a8r8g8b8, solid,    x8r8g8b8, neon_composite_add_8888_n_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  x8b8g8r8, solid,    x8b8g8r8, neon_composite_add_8888_n_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  a8b8g8r8, solid,    x8b8g8r8, neon_composite_add_8888_n_8888),
     PIXMAN_STD_FAST_PATH (ADD,  a8r8g8b8, solid,    a8r8g8b8, neon_composite_add_8888_n_8888),
     PIXMAN_STD_FAST_PATH (ADD,  a8b8g8r8, solid,    a8b8g8r8, neon_composite_add_8888_n_8888),
     PIXMAN_STD_FAST_PATH (ADD,  a8,       null,     a8,       neon_composite_add_8_8),
+    PIXMAN_STD_FAST_PATH (ADD,  x8r8g8b8, null,     x8r8g8b8, neon_composite_add_8888_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  a8r8g8b8, null,     x8r8g8b8, neon_composite_add_8888_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  x8b8g8r8, null,     x8b8g8r8, neon_composite_add_8888_8888),
+    PIXMAN_STD_FAST_PATH (ADD,  a8b8g8r8, null,     x8b8g8r8, neon_composite_add_8888_8888),
     PIXMAN_STD_FAST_PATH (ADD,  a8r8g8b8, null,     a8r8g8b8, neon_composite_add_8888_8888),
     PIXMAN_STD_FAST_PATH (ADD,  a8b8g8r8, null,     a8b8g8r8, neon_composite_add_8888_8888),
     PIXMAN_STD_FAST_PATH (IN,   solid,    null,     a8,       neon_composite_in_n_8),
@@ -359,7 +376,9 @@
     PIXMAN_STD_FAST_PATH (OVER_REVERSE, solid, null, a8b8g8r8, neon_composite_over_reverse_n_8888),
     PIXMAN_STD_FAST_PATH (OUT_REVERSE,  a8,    null, r5g6b5,   neon_composite_out_reverse_8_0565),
     PIXMAN_STD_FAST_PATH (OUT_REVERSE,  a8,    null, b5g6r5,   neon_composite_out_reverse_8_0565),
+    PIXMAN_STD_FAST_PATH (OUT_REVERSE,  a8,    null, x8r8g8b8, neon_composite_out_reverse_8_8888),
     PIXMAN_STD_FAST_PATH (OUT_REVERSE,  a8,    null, a8r8g8b8, neon_composite_out_reverse_8_8888),
+    PIXMAN_STD_FAST_PATH (OUT_REVERSE,  a8,    null, x8b8g8r8, neon_composite_out_reverse_8_8888),
     PIXMAN_STD_FAST_PATH (OUT_REVERSE,  a8,    null, a8b8g8r8, neon_composite_out_reverse_8_8888),
 
     SIMPLE_NEAREST_FAST_PATH (OVER, a8r8g8b8, a8r8g8b8, neon_8888_8888),
@@ -404,6 +423,7 @@
 
     SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, neon_8888_8888),
     SIMPLE_BILINEAR_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, neon_8888_8888),
+    SIMPLE_BILINEAR_FAST_PATH (ADD, x8r8g8b8, x8r8g8b8, neon_8888_8888),
 
     SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, a8r8g8b8, neon_8888_8_8888),
     SIMPLE_BILINEAR_A8_MASK_FAST_PATH (SRC, a8r8g8b8, x8r8g8b8, neon_8888_8_8888),
@@ -420,6 +440,7 @@
 
     SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, a8r8g8b8, neon_8888_8_8888),
     SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, a8r8g8b8, x8r8g8b8, neon_8888_8_8888),
+    SIMPLE_BILINEAR_A8_MASK_FAST_PATH (ADD, x8r8g8b8, x8r8g8b8, neon_8888_8_8888),
 
     { PIXMAN_OP_NONE },
 };

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arm.c
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arm.c	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arm.c	2022-10-18 21:13:29 UTC (rev 64747)
@@ -246,5 +246,11 @@
 	imp = _pixman_implementation_create_arm_neon (imp);
 #endif
 
+#ifdef USE_ARM_A64_NEON
+    /* neon is a part of aarch64 */
+    if (!_pixman_disabled ("arm-neon"))
+        imp = _pixman_implementation_create_arm_neon (imp);
+#endif
+
     return imp;
 }

Added: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm-bilinear.S
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm-bilinear.S	                        (rev 0)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm-bilinear.S	2022-10-18 21:13:29 UTC (rev 64747)
@@ -0,0 +1,1275 @@
+/*
+ * Copyright © 2011 SCore Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author:  Siarhei Siamashka (siarhei.siamashka at nokia.com)
+ * Author:  Taekyun Kim (tkq.kim at samsung.com)
+ */
+
+/*
+ * This file contains scaled bilinear scanline functions implemented
+ * using older siarhei's bilinear macro template.
+ *
+ * << General scanline function procedures >>
+ *  1. bilinear interpolate source pixels
+ *  2. load mask pixels
+ *  3. load destination pixels
+ *  4. duplicate mask to fill whole register
+ *  5. interleave source & destination pixels
+ *  6. apply mask to source pixels
+ *  7. combine source & destination pixels
+ *  8, Deinterleave final result
+ *  9. store destination pixels
+ *
+ * All registers with single number (i.e. src0, tmp0) are 64-bits registers.
+ * Registers with double numbers(src01, dst01) are 128-bits registers.
+ * All temp registers can be used freely outside the code block.
+ * Assume that symbol(register .req) OUT and MASK are defined at caller of these macro blocks.
+ *
+ * Remarks
+ *  There can be lots of pipeline stalls inside code block and between code blocks.
+ *  Further optimizations will be done by new macro templates using head/tail_head/tail scheme.
+ */
+
+/* Prevent the stack from becoming executable for no reason... */
+#if defined(__linux__) && defined (__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+.text
+.arch armv8-a
+.altmacro
+.p2align 2
+
+#include "pixman-private.h"
+#include "pixman-arm-asm.h"
+#include "pixman-arma64-neon-asm.h"
+
+/*
+ * Bilinear macros from pixman-arm-neon-asm.S
+ */
+
+/*
+ * Bilinear scaling support code which tries to provide pixel fetching, color
+ * format conversion, and interpolation as separate macros which can be used
+ * as the basic building blocks for constructing bilinear scanline functions.
+ */
+
+.macro bilinear_load_8888 reg1, reg2, tmp
+    asr       WTMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #2
+    ld1       {&reg1&.2s}, [TMP1], STRIDE
+    ld1       {&reg2&.2s}, [TMP1]
+.endm
+
+.macro bilinear_load_0565 reg1, reg2, tmp
+    asr       WTMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #1
+    ld1       {&reg2&.s}[0], [TMP1], STRIDE
+    ld1       {&reg2&.s}[1], [TMP1]
+    convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp
+.endm
+
+.macro bilinear_load_and_vertical_interpolate_two_8888 \
+                    acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2
+
+    bilinear_load_8888 reg1, reg2, tmp1
+    umull     &acc1&.8h, &reg1&.8b, v28.8b
+    umlal     &acc1&.8h, &reg2&.8b, v29.8b
+    bilinear_load_8888 reg3, reg4, tmp2
+    umull     &acc2&.8h, &reg3&.8b, v28.8b
+    umlal     &acc2&.8h, &reg4&.8b, v29.8b
+.endm
+
+.macro bilinear_load_and_vertical_interpolate_four_8888 \
+                xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
+                yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
+
+    bilinear_load_and_vertical_interpolate_two_8888 \
+                xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi
+    bilinear_load_and_vertical_interpolate_two_8888 \
+                yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
+.endm
+
+.macro vzip reg1, reg2
+    zip1      v24.8b, reg1, reg2
+    zip2      reg2,   reg1, reg2
+    mov       reg1,   v24.8b
+.endm
+
+.macro vuzp reg1, reg2
+    uzp1     v24.8b, reg1, reg2
+    uzp2     reg2,   reg1, reg2
+    mov      reg1,   v24.8b
+.endm
+
+.macro bilinear_load_and_vertical_interpolate_two_0565 \
+                acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi
+    asr       WTMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #1
+    asr       WTMP2, X, #16
+    add       X, X, UX
+    add       TMP2, TOP, TMP2, lsl #1
+    ld1       {&acc2&.s}[0], [TMP1], STRIDE
+    ld1       {&acc2&.s}[2], [TMP2], STRIDE
+    ld1       {&acc2&.s}[1], [TMP1]
+    ld1       {&acc2&.s}[3], [TMP2]
+    convert_0565_to_x888 acc2, reg3, reg2, reg1
+    vzip      &reg1&.8b, &reg3&.8b
+    vzip      &reg2&.8b, &reg4&.8b
+    vzip      &reg3&.8b, &reg4&.8b
+    vzip      &reg1&.8b, &reg2&.8b
+    umull     &acc1&.8h, &reg1&.8b, v28.8b
+    umlal     &acc1&.8h, &reg2&.8b, v29.8b
+    umull     &acc2&.8h, &reg3&.8b, v28.8b
+    umlal     &acc2&.8h, &reg4&.8b, v29.8b
+.endm
+
+.macro bilinear_load_and_vertical_interpolate_four_0565 \
+                xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
+                yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
+
+    asr       WTMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #1
+    asr       WTMP2, X, #16
+    add       X, X, UX
+    add       TMP2, TOP, TMP2, lsl #1
+    ld1       {&xacc2&.s}[0], [TMP1], STRIDE
+    ld1       {&xacc2&.s}[2], [TMP2], STRIDE
+    ld1       {&xacc2&.s}[1], [TMP1]
+    ld1       {&xacc2&.s}[3], [TMP2]
+    convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1
+    asr       WTMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #1
+    asr       WTMP2, X, #16
+    add       X, X, UX
+    add       TMP2, TOP, TMP2, lsl #1
+    ld1       {&yacc2&.s}[0], [TMP1], STRIDE
+    vzip      &xreg1&.8b, &xreg3&.8b
+    ld1       {&yacc2&.s}[2], [TMP2], STRIDE
+    vzip      &xreg2&.8b, &xreg4&.8b
+    ld1       {&yacc2&.s}[1], [TMP1]
+    vzip      &xreg3&.8b, &xreg4&.8b
+    ld1       {&yacc2&.s}[3], [TMP2]
+    vzip      &xreg1&.8b, &xreg2&.8b
+    convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1
+    umull     &xacc1&.8h, &xreg1&.8b, v28.8b
+    vzip      &yreg1&.8b, &yreg3&.8b
+    umlal     &xacc1&.8h, &xreg2&.8b, v29.8b
+    vzip      &yreg2&.8b, &yreg4&.8b
+    umull     &xacc2&.8h, &xreg3&.8b, v28.8b
+    vzip      &yreg3&.8b, &yreg4&.8b
+    umlal     &xacc2&.8h, &xreg4&.8b, v29.8b
+    vzip      &yreg1&.8b, &yreg2&.8b
+    umull     &yacc1&.8h, &yreg1&.8b, v28.8b
+    umlal     &yacc1&.8h, &yreg2&.8b, v29.8b
+    umull     &yacc2&.8h, &yreg3&.8b, v28.8b
+    umlal     &yacc2&.8h, &yreg4&.8b, v29.8b
+.endm
+
+.macro bilinear_store_8888 numpix, tmp1, tmp2
+.if numpix == 4
+    st1       {v0.2s, v1.2s}, [OUT], #16
+.elseif numpix == 2
+    st1       {v0.2s}, [OUT], #8
+.elseif numpix == 1
+    st1       {v0.s}[0], [OUT], #4
+.else
+    .error bilinear_store_8888 numpix is unsupported
+.endif
+.endm
+
+.macro bilinear_store_0565 numpix, tmp1, tmp2
+    vuzp    v0.8b, v1.8b
+    vuzp    v2.8b, v3.8b
+    vuzp    v1.8b, v3.8b
+    vuzp    v0.8b, v2.8b
+    convert_8888_to_0565 v2, v1, v0, v1, tmp1, tmp2
+.if numpix == 4
+    st1       {v1.4h}, [OUT], #8
+.elseif numpix == 2
+    st1       {v1.s}[0], [OUT], #4
+.elseif numpix == 1
+    st1       {v1.h}[0], [OUT], #2
+.else
+    .error bilinear_store_0565 numpix is unsupported
+.endif
+.endm
+
+
+/*
+ * Macros for loading mask pixels into register 'mask'.
+ * dup must be done in somewhere else.
+ */
+.macro bilinear_load_mask_x numpix, mask
+.endm
+
+.macro bilinear_load_mask_8 numpix, mask
+.if numpix == 4
+    ld1         {&mask&.s}[0], [MASK], #4
+.elseif numpix == 2
+    ld1         {&mask&.h}[0], [MASK], #2
+.elseif numpix == 1
+    ld1         {&mask&.b}[0], [MASK], #1
+.else
+    .error bilinear_load_mask_8 numpix is unsupported
+.endif
+    prfm        PREFETCH_MODE, [MASK, #prefetch_offset]
+.endm
+
+.macro bilinear_load_mask mask_fmt, numpix, mask
+    bilinear_load_mask_&mask_fmt numpix, mask
+.endm
+
+
+/*
+ * Macros for loading destination pixels into register 'dst0' and 'dst1'.
+ * Interleave should be done somewhere else.
+ */
+.macro bilinear_load_dst_0565_src numpix, dst0, dst1, dst01
+.endm
+
+.macro bilinear_load_dst_8888_src numpix, dst0, dst1, dst01
+.endm
+
+.macro bilinear_load_dst_8888 numpix, dst0, dst1, dst01
+.if numpix == 4
+    ld1         {&dst0&.2s, &dst1&.2s}, [OUT]
+.elseif numpix == 2
+    ld1         {&dst0&.2s}, [OUT]
+.elseif numpix == 1
+    ld1         {&dst0&.s}[0], [OUT]
+.else
+    .error bilinear_load_dst_8888 numpix is unsupported
+.endif
+    mov         &dst01&.d[0], &dst0&.d[0]
+    mov         &dst01&.d[1], &dst1&.d[0]
+    prfm        PREFETCH_MODE, [OUT, #(prefetch_offset * 4)]
+.endm
+
+.macro bilinear_load_dst_8888_over numpix, dst0, dst1, dst01
+    bilinear_load_dst_8888 numpix, dst0, dst1, dst01
+.endm
+
+.macro bilinear_load_dst_8888_add numpix, dst0, dst1, dst01
+    bilinear_load_dst_8888 numpix, dst0, dst1, dst01
+.endm
+
+.macro bilinear_load_dst dst_fmt, op, numpix, dst0, dst1, dst01
+    bilinear_load_dst_&dst_fmt&_&op numpix, dst0, dst1, dst01
+.endm
+
+/*
+ * Macros for duplicating partially loaded mask to fill entire register.
+ * We will apply mask to interleaved source pixels, that is
+ *  (r0, r1, r2, r3, g0, g1, g2, g3) x (m0, m1, m2, m3, m0, m1, m2, m3)
+ *  (b0, b1, b2, b3, a0, a1, a2, a3) x (m0, m1, m2, m3, m0, m1, m2, m3)
+ * So, we need to duplicate loaded mask into whole register.
+ *
+ * For two pixel case
+ *  (r0, r1, x, x, g0, g1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
+ *  (b0, b1, x, x, a0, a1, x, x) x (m0, m1, m0, m1, m0, m1, m0, m1)
+ * We can do some optimizations for this including last pixel cases.
+ */
+.macro bilinear_duplicate_mask_x numpix, mask
+.endm
+
+.macro bilinear_duplicate_mask_8 numpix, mask
+.if numpix == 4
+    dup         &mask&.2s, &mask&.s[0]
+.elseif numpix == 2
+    dup         &mask&.4h, &mask&.h[0]
+.elseif numpix == 1
+    dup         &mask&.8b, &mask&.b[0]
+.else
+    .error bilinear_duplicate_mask_8 is unsupported
+.endif
+.endm
+
+.macro bilinear_duplicate_mask mask_fmt, numpix, mask
+    bilinear_duplicate_mask_&mask_fmt numpix, mask
+.endm
+
+/*
+ * Macros for interleaving src and dst pixels to rrrr gggg bbbb aaaa form.
+ * Interleave should be done when maks is enabled or operator is 'over'.
+ */
+.macro bilinear_interleave src0, src1, src01, dst0, dst1, dst01
+    vuzp       &src0&.8b, &src1&.8b
+    vuzp       &dst0&.8b, &dst1&.8b
+    vuzp       &src0&.8b, &src1&.8b
+    vuzp       &dst0&.8b, &dst1&.8b
+    mov        &src01&.d[1], &src1&.d[0]
+    mov        &src01&.d[0], &src0&.d[0]
+    mov        &dst01&.d[1], &dst1&.d[0]
+    mov        &dst01&.d[0], &dst0&.d[0]
+.endm
+
+.macro bilinear_interleave_src_dst_x_src \
+                numpix, src0, src1, src01, dst0, dst1, dst01
+.endm
+
+.macro bilinear_interleave_src_dst_x_over \
+                numpix, src0, src1, src01, dst0, dst1, dst01
+
+    bilinear_interleave src0, src1, src01, dst0, dst1, dst01
+.endm
+
+.macro bilinear_interleave_src_dst_x_add \
+                numpix, src0, src1, src01, dst0, dst1, dst01
+    bilinear_interleave src0, src1, src01, dst0, dst1, dst01
+.endm
+
+.macro bilinear_interleave_src_dst_8_src \
+                numpix, src0, src1, src01, dst0, dst1, dst01
+
+    bilinear_interleave src0, src1, src01, dst0, dst1, dst01
+.endm
+
+.macro bilinear_interleave_src_dst_8_over \
+                numpix, src0, src1, src01, dst0, dst1, dst01
+
+    bilinear_interleave src0, src1, src01, dst0, dst1, dst01
+.endm
+
+.macro bilinear_interleave_src_dst_8_add \
+                numpix, src0, src1, src01, dst0, dst1, dst01
+
+    bilinear_interleave src0, src1, src01, dst0, dst1, dst01
+.endm
+
+.macro bilinear_interleave_src_dst \
+                mask_fmt, op, numpix, src0, src1, src01, dst0, dst1, dst01
+
+    bilinear_interleave_src_dst_&mask_fmt&_&op \
+                numpix, src0, src1, src01, dst0, dst1, dst01
+.endm
+
+
+/*
+ * Macros for applying masks to src pixels. (see combine_mask_u() function)
+ * src, dst should be in interleaved form.
+ * mask register should be in form (m0, m1, m2, m3).
+ */
+.macro bilinear_apply_mask_to_src_x \
+                numpix, src0, src1, src01, mask, \
+                tmp01, tmp23, tmp45, tmp67
+.endm
+
+.macro bilinear_apply_mask_to_src_8 \
+                numpix, src0, src1, src01, mask, \
+                tmp01, tmp23, tmp45, tmp67
+
+    umull           &tmp01&.8h, &src0&.8b, &mask&.8b
+    umull           &tmp23&.8h, &src1&.8b, &mask&.8b
+    /* bubbles */
+    urshr           &tmp45&.8h, &tmp01&.8h, #8
+    urshr           &tmp67&.8h, &tmp23&.8h, #8
+    /* bubbles */
+    raddhn          &src0&.8b, &tmp45&.8h, &tmp01&.8h
+    raddhn          &src1&.8b, &tmp67&.8h, &tmp23&.8h
+    mov             &src01&.d[0], &src0&.d[0]
+    mov             &src01&.d[1], &src1&.d[0]
+.endm
+
+.macro bilinear_apply_mask_to_src \
+                mask_fmt, numpix, src0, src1, src01, mask, \
+                tmp01, tmp23, tmp45, tmp67
+
+    bilinear_apply_mask_to_src_&mask_fmt \
+                numpix, src0, src1, src01, mask, \
+                tmp01, tmp23, tmp45, tmp67
+.endm
+
+
+/*
+ * Macros for combining src and destination pixels.
+ * Interleave or not is depending on operator 'op'.
+ */
+.macro bilinear_combine_src \
+                numpix, src0, src1, src01, dst0, dst1, dst01, \
+                tmp01, tmp23, tmp45, tmp67, tmp8
+.endm
+
+.macro bilinear_combine_over \
+                numpix, src0, src1, src01, dst0, dst1, dst01, \
+                tmp01, tmp23, tmp45, tmp67, tmp8
+
+    dup         &tmp8&.2s, &src1&.s[1]
+    /* bubbles */
+    mvn         &tmp8&.8b, &tmp8&.8b
+    /* bubbles */
+    umull       &tmp01&.8h, &dst0&.8b, &tmp8&.8b
+    /* bubbles */
+    umull       &tmp23&.8h, &dst1&.8b, &tmp8&.8b
+    /* bubbles */
+    urshr       &tmp45&.8h, &tmp01&.8h, #8
+    urshr       &tmp67&.8h, &tmp23&.8h, #8
+    /* bubbles */
+    raddhn      &dst0&.8b, &tmp45&.8h, &tmp01&.8h
+    raddhn      &dst1&.8b, &tmp67&.8h, &tmp23&.8h
+    mov         &dst01&.d[0], &dst0&.d[0]
+    mov         &dst01&.d[1], &dst1&.d[0]
+    /* bubbles */
+    uqadd       &src0&.8b, &dst0&.8b, &src0&.8b
+    uqadd       &src1&.8b, &dst1&.8b, &src1&.8b
+    mov         &src01&.d[0], &src0&.d[0]
+    mov         &src01&.d[1], &src1&.d[0]
+.endm
+
+.macro bilinear_combine_add \
+                numpix, src0, src1, src01, dst0, dst1, dst01, \
+                tmp01, tmp23, tmp45, tmp67, tmp8
+
+    uqadd       &src0&.8b, &dst0&.8b, &src0&.8b
+    uqadd       &src1&.8b, &dst1&.8b, &src1&.8b
+    mov         &src01&.d[0], &src0&.d[0]
+    mov         &src01&.d[1], &src1&.d[0]
+.endm
+
+.macro bilinear_combine \
+                op, numpix, src0, src1, src01, dst0, dst1, dst01, \
+                tmp01, tmp23, tmp45, tmp67, tmp8
+
+    bilinear_combine_&op \
+                numpix, src0, src1, src01, dst0, dst1, dst01, \
+                tmp01, tmp23, tmp45, tmp67, tmp8
+.endm
+
+/*
+ * Macros for final deinterleaving of destination pixels if needed.
+ */
+.macro bilinear_deinterleave numpix, dst0, dst1, dst01
+    vuzp       &dst0&.8b, &dst1&.8b
+    /* bubbles */
+    vuzp       &dst0&.8b, &dst1&.8b
+    mov        &dst01&.d[0], &dst0&.d[0]
+    mov        &dst01&.d[1], &dst1&.d[0]
+.endm
+
+.macro bilinear_deinterleave_dst_x_src numpix, dst0, dst1, dst01
+.endm
+
+.macro bilinear_deinterleave_dst_x_over numpix, dst0, dst1, dst01
+    bilinear_deinterleave numpix, dst0, dst1, dst01
+.endm
+
+.macro bilinear_deinterleave_dst_x_add numpix, dst0, dst1, dst01
+    bilinear_deinterleave numpix, dst0, dst1, dst01
+.endm
+
+.macro bilinear_deinterleave_dst_8_src numpix, dst0, dst1, dst01
+    bilinear_deinterleave numpix, dst0, dst1, dst01
+.endm
+
+.macro bilinear_deinterleave_dst_8_over numpix, dst0, dst1, dst01
+    bilinear_deinterleave numpix, dst0, dst1, dst01
+.endm
+
+.macro bilinear_deinterleave_dst_8_add numpix, dst0, dst1, dst01
+    bilinear_deinterleave numpix, dst0, dst1, dst01
+.endm
+
+.macro bilinear_deinterleave_dst mask_fmt, op, numpix, dst0, dst1, dst01
+    bilinear_deinterleave_dst_&mask_fmt&_&op numpix, dst0, dst1, dst01
+.endm
+
+
+.macro bilinear_interpolate_last_pixel src_fmt, mask_fmt, dst_fmt, op
+    bilinear_load_&src_fmt v0, v1, v2
+    bilinear_load_mask mask_fmt, 1, v4
+    bilinear_load_dst dst_fmt, op, 1, v18, v19, v9
+    umull     v2.8h, v0.8b, v28.8b
+    umlal     v2.8h, v1.8b, v29.8b
+    /* 5 cycles bubble */
+    ushll     v0.4s, v2.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v0.4s, v2.4h, v15.h[0]
+    umlal2    v0.4s, v2.8h, v15.h[0]
+    /* 5 cycles bubble */
+    bilinear_duplicate_mask mask_fmt, 1, v4
+    shrn      v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    /* 3 cycles bubble */
+    xtn       v0.8b, v0.8h
+    /* 1 cycle bubble */
+    bilinear_interleave_src_dst \
+                mask_fmt, op, 1, v0, v1, v0, v18, v19, v9
+    bilinear_apply_mask_to_src \
+                mask_fmt, 1, v0, v1, v0, v4, \
+                v3, v8, v10, v11
+    bilinear_combine \
+                op, 1, v0, v1, v0, v18, v19, v9, \
+                v3, v8, v10, v11, v5
+    bilinear_deinterleave_dst mask_fmt, op, 1, v0, v1, v0
+    bilinear_store_&dst_fmt 1, v17, v18
+.endm
+
+.macro bilinear_interpolate_two_pixels src_fmt, mask_fmt, dst_fmt, op
+    bilinear_load_and_vertical_interpolate_two_&src_fmt \
+                v1, v11, v18, v19, v20, v21, v22, v23
+    bilinear_load_mask mask_fmt, 2, v4
+    bilinear_load_dst dst_fmt, op, 2, v18, v19, v9
+    ushll     v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v0.4s, v1.4h, v15.h[0]
+    umlal2    v0.4s, v1.8h, v15.h[0]
+    ushll     v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v10.4s, v11.4h, v15.h[4]
+    umlal2    v10.4s, v11.8h, v15.h[4]
+    shrn      v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn2     v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    bilinear_duplicate_mask mask_fmt, 2, v4
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    add       v12.8h, v12.8h, v13.8h
+    xtn       v0.8b, v0.8h
+    bilinear_interleave_src_dst \
+                mask_fmt, op, 2, v0, v1, v0, v18, v19, v9
+    bilinear_apply_mask_to_src \
+                mask_fmt, 2, v0, v1, v0, v4, \
+                v3, v8, v10, v11
+    bilinear_combine \
+                op, 2, v0, v1, v0, v18, v19, v9, \
+                v3, v8, v10, v11, v5
+    bilinear_deinterleave_dst mask_fmt, op, 2, v0, v1, v0
+    bilinear_store_&dst_fmt 2, v16, v17
+.endm
+
+.macro bilinear_interpolate_four_pixels src_fmt, mask_fmt, dst_fmt, op
+    bilinear_load_and_vertical_interpolate_four_&src_fmt \
+                v1, v11, v4,  v5,  v6,  v7,  v22, v23 \
+                v3, v9,  v16, v17, v20, v21, v18, v19
+    prfm      PREFETCH_MODE, [TMP1, PF_OFFS]
+    sub       TMP1, TMP1, STRIDE
+    prfm      PREFETCH_MODE, [TMP1, PF_OFFS]
+    ushll     v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v0.4s, v1.4h, v15.h[0]
+    umlal2    v0.4s, v1.8h, v15.h[0]
+    ushll     v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v10.4s, v11.4h, v15.h[4]
+    umlal2    v10.4s, v11.8h, v15.h[4]
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    ushll     v2.4s, v3.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v2.4s, v3.4h, v15.h[0]
+    umlal2    v2.4s, v3.8h, v15.h[0]
+    ushll     v8.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v8.4s, v9.4h, v15.h[4]
+    umlal2    v8.4s, v9.8h, v15.h[4]
+    add       v12.8h, v12.8h, v13.8h
+    shrn      v0.4h,  v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn2     v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn      v2.4h,  v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn2     v2.8h,  v8.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    bilinear_load_mask mask_fmt, 4, v4
+    bilinear_duplicate_mask mask_fmt, 4, v4
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    xtn       v0.8b, v0.8h
+    xtn       v1.8b, v2.8h
+    add       v12.8h, v12.8h, v13.8h
+    bilinear_load_dst dst_fmt, op, 4, v2, v3, v21
+    bilinear_interleave_src_dst \
+                mask_fmt, op, 4, v0, v1, v0, v2, v3, v11
+    bilinear_apply_mask_to_src \
+                mask_fmt, 4, v0, v1, v0, v4, \
+                v6, v8, v9, v10
+    bilinear_combine \
+                op, 4, v0, v1, v0, v2, v3, v1, \
+                v6, v8, v9, v10, v23
+    bilinear_deinterleave_dst mask_fmt, op, 4, v0, v1, v0
+    bilinear_store_&dst_fmt 4, v6, v7
+.endm
+
+.set BILINEAR_FLAG_USE_MASK,        1
+.set BILINEAR_FLAG_USE_ALL_NEON_REGS,    2
+
+/*
+ * Main template macro for generating NEON optimized bilinear scanline functions.
+ *
+ * Bilinear scanline generator macro take folling arguments:
+ *  fname            - name of the function to generate
+ *  src_fmt            - source color format (8888 or 0565)
+ *  dst_fmt            - destination color format (8888 or 0565)
+ *  src/dst_bpp_shift        - (1 << bpp_shift) is the size of src/dst pixel in bytes
+ *  process_last_pixel        - code block that interpolate one pixel and does not
+ *                  update horizontal weight
+ *  process_two_pixels        - code block that interpolate two pixels and update
+ *                  horizontal weight
+ *  process_four_pixels        - code block that interpolate four pixels and update
+ *                  horizontal weight
+ *  process_pixblock_head    - head part of middle loop
+ *  process_pixblock_tail    - tail part of middle loop
+ *  process_pixblock_tail_head    - tail_head of middle loop
+ *  pixblock_size        - number of pixels processed in a single middle loop
+ *  prefetch_distance        - prefetch in the source image by that many pixels ahead
+ */
+
+.macro generate_bilinear_scanline_func \
+    fname, \
+    src_fmt, dst_fmt, src_bpp_shift, dst_bpp_shift, \
+    bilinear_process_last_pixel, \
+    bilinear_process_two_pixels, \
+    bilinear_process_four_pixels, \
+    bilinear_process_pixblock_head, \
+    bilinear_process_pixblock_tail, \
+    bilinear_process_pixblock_tail_head, \
+    pixblock_size, \
+    prefetch_distance, \
+    flags
+
+pixman_asm_function fname
+.if pixblock_size == 8
+.elseif pixblock_size == 4
+.else
+    .error unsupported pixblock size
+.endif
+
+.if ((flags) & BILINEAR_FLAG_USE_MASK) == 0
+    OUT       .req    x0
+    TOP       .req    x1
+    BOTTOM    .req    x2
+    WT        .req    x3
+    WWT       .req    w3
+    WB        .req    x4
+    WWB       .req    w4
+    X         .req    w5
+    UX        .req    w6
+    WIDTH     .req    x7
+    TMP1      .req    x10
+    WTMP1     .req    w10
+    TMP2      .req    x11
+    WTMP2     .req    w11
+    PF_OFFS   .req    x12
+    TMP3      .req    x13
+    WTMP3     .req    w13
+    TMP4      .req    x14
+    WTMP4     .req    w14
+    STRIDE    .req    x15
+    DUMMY     .req    x30
+
+    stp       x29, x30, [sp, -16]!
+    mov       x29, sp
+    sub       sp, sp, 112
+    sub       x29, x29, 64
+    st1       {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    st1       {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    stp       x10, x11, [x29, -80]
+    stp       x12, x13, [x29, -96]
+    stp       x14, x15, [x29, -112]
+.else
+    OUT       .req      x0
+    MASK      .req      x1
+    TOP       .req      x2
+    BOTTOM    .req      x3
+    WT        .req      x4
+    WWT       .req      w4
+    WB        .req      x5
+    WWB       .req      w5
+    X         .req      w6
+    UX        .req      w7
+    WIDTH     .req      x8
+    TMP1      .req      x10
+    WTMP1     .req      w10
+    TMP2      .req      x11
+    WTMP2     .req      w11
+    PF_OFFS   .req      x12
+    TMP3      .req      x13
+    WTMP3     .req      w13
+    TMP4      .req      x14
+    WTMP4     .req      w14
+    STRIDE    .req      x15
+    DUMMY     .req      x30
+
+    .set prefetch_offset, prefetch_distance
+
+    stp      x29, x30, [sp, -16]!
+    mov      x29, sp
+    sub      x29, x29, 64
+    st1      {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    st1      {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    stp      x10, x11, [x29, -80]
+    stp      x12, x13, [x29, -96]
+    stp      x14, x15, [x29, -112]
+    str      x8, [x29, -120]
+    ldr      w8, [x29, 16]
+    sub      sp, sp, 120
+.endif
+
+    mov      WTMP1, #prefetch_distance
+    umull    PF_OFFS, WTMP1, UX
+
+    sub      STRIDE, BOTTOM, TOP
+    .unreq   BOTTOM
+
+    cmp      WIDTH, #0
+    ble      300f
+
+    dup      v12.8h, X
+    dup      v13.8h, UX
+    dup      v28.8b, WWT
+    dup      v29.8b, WWB
+    mov      v25.d[0], v12.d[1]
+    mov      v26.d[0], v13.d[0]
+    add      v25.4h, v25.4h, v26.4h
+    mov      v12.d[1], v25.d[0]
+
+    /* ensure good destination alignment  */
+    cmp       WIDTH, #1
+    blt       100f
+    tst       OUT, #(1 << dst_bpp_shift)
+    beq       100f
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    add       v12.8h, v12.8h, v13.8h
+    bilinear_process_last_pixel
+    sub       WIDTH, WIDTH, #1
+100:
+    add       v13.8h, v13.8h, v13.8h
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    add       v12.8h, v12.8h, v13.8h
+
+    cmp       WIDTH, #2
+    blt       100f
+    tst       OUT, #(1 << (dst_bpp_shift + 1))
+    beq       100f
+    bilinear_process_two_pixels
+    sub       WIDTH, WIDTH, #2
+100:
+.if pixblock_size == 8
+    cmp       WIDTH, #4
+    blt       100f
+    tst       OUT, #(1 << (dst_bpp_shift + 2))
+    beq       100f
+    bilinear_process_four_pixels
+    sub       WIDTH, WIDTH, #4
+100:
+.endif
+    subs      WIDTH, WIDTH, #pixblock_size
+    blt       100f
+    asr       PF_OFFS, PF_OFFS, #(16 - src_bpp_shift)
+    bilinear_process_pixblock_head
+    subs      WIDTH, WIDTH, #pixblock_size
+    blt       500f
+0:
+    bilinear_process_pixblock_tail_head
+    subs      WIDTH, WIDTH, #pixblock_size
+    bge       0b
+500:
+    bilinear_process_pixblock_tail
+100:
+.if pixblock_size == 8
+    tst       WIDTH, #4
+    beq       200f
+    bilinear_process_four_pixels
+200:
+.endif
+    /* handle the remaining trailing pixels */
+    tst       WIDTH, #2
+    beq       200f
+    bilinear_process_two_pixels
+200:
+    tst       WIDTH, #1
+    beq       300f
+    bilinear_process_last_pixel
+300:
+
+.if ((flags) & BILINEAR_FLAG_USE_MASK) == 0
+    sub       x29, x29, 64
+    ld1       {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    ld1       {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    ldp       x10, x11, [x29, -80]
+    ldp       x12, x13, [x29, -96]
+    ldp       x14, x15, [x29, -112]
+    mov       sp, x29
+    ldp       x29, x30, [sp], 16
+.else
+    sub       x29, x29, 64
+    ld1       {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    ld1       {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    ldp       x10, x11, [x29, -80]
+    ldp       x12, x13, [x29, -96]
+    ldp       x14, x15, [x29, -112]
+    ldr       x8, [x29, -120]
+    mov       sp, x29
+    ldp       x29, x30, [sp], 16
+.endif
+    ret
+
+    .unreq    OUT
+    .unreq    TOP
+    .unreq    WT
+    .unreq    WWT
+    .unreq    WB
+    .unreq    WWB
+    .unreq    X
+    .unreq    UX
+    .unreq    WIDTH
+    .unreq    TMP1
+    .unreq    WTMP1
+    .unreq    TMP2
+    .unreq    PF_OFFS
+    .unreq    TMP3
+    .unreq    TMP4
+    .unreq    STRIDE
+.if ((flags) & BILINEAR_FLAG_USE_MASK) != 0
+    .unreq    MASK
+.endif
+
+.endfunc
+
+.endm
+
+/* src_8888_8_8888 */
+.macro bilinear_src_8888_8_8888_process_last_pixel
+    bilinear_interpolate_last_pixel 8888, 8, 8888, src
+.endm
+
+.macro bilinear_src_8888_8_8888_process_two_pixels
+    bilinear_interpolate_two_pixels 8888, 8, 8888, src
+.endm
+
+.macro bilinear_src_8888_8_8888_process_four_pixels
+    bilinear_interpolate_four_pixels 8888, 8, 8888, src
+.endm
+
+.macro bilinear_src_8888_8_8888_process_pixblock_head
+    bilinear_src_8888_8_8888_process_four_pixels
+.endm
+
+.macro bilinear_src_8888_8_8888_process_pixblock_tail
+.endm
+
+.macro bilinear_src_8888_8_8888_process_pixblock_tail_head
+    bilinear_src_8888_8_8888_process_pixblock_tail
+    bilinear_src_8888_8_8888_process_pixblock_head
+.endm
+
+/* src_8888_8_0565 */
+.macro bilinear_src_8888_8_0565_process_last_pixel
+    bilinear_interpolate_last_pixel 8888, 8, 0565, src
+.endm
+
+.macro bilinear_src_8888_8_0565_process_two_pixels
+    bilinear_interpolate_two_pixels 8888, 8, 0565, src
+.endm
+
+.macro bilinear_src_8888_8_0565_process_four_pixels
+    bilinear_interpolate_four_pixels 8888, 8, 0565, src
+.endm
+
+.macro bilinear_src_8888_8_0565_process_pixblock_head
+    bilinear_src_8888_8_0565_process_four_pixels
+.endm
+
+.macro bilinear_src_8888_8_0565_process_pixblock_tail
+.endm
+
+.macro bilinear_src_8888_8_0565_process_pixblock_tail_head
+    bilinear_src_8888_8_0565_process_pixblock_tail
+    bilinear_src_8888_8_0565_process_pixblock_head
+.endm
+
+/* src_0565_8_x888 */
+.macro bilinear_src_0565_8_x888_process_last_pixel
+    bilinear_interpolate_last_pixel 0565, 8, 8888, src
+.endm
+
+.macro bilinear_src_0565_8_x888_process_two_pixels
+    bilinear_interpolate_two_pixels 0565, 8, 8888, src
+.endm
+
+.macro bilinear_src_0565_8_x888_process_four_pixels
+    bilinear_interpolate_four_pixels 0565, 8, 8888, src
+.endm
+
+.macro bilinear_src_0565_8_x888_process_pixblock_head
+    bilinear_src_0565_8_x888_process_four_pixels
+.endm
+
+.macro bilinear_src_0565_8_x888_process_pixblock_tail
+.endm
+
+.macro bilinear_src_0565_8_x888_process_pixblock_tail_head
+    bilinear_src_0565_8_x888_process_pixblock_tail
+    bilinear_src_0565_8_x888_process_pixblock_head
+.endm
+
+/* src_0565_8_0565 */
+.macro bilinear_src_0565_8_0565_process_last_pixel
+    bilinear_interpolate_last_pixel 0565, 8, 0565, src
+.endm
+
+.macro bilinear_src_0565_8_0565_process_two_pixels
+    bilinear_interpolate_two_pixels 0565, 8, 0565, src
+.endm
+
+.macro bilinear_src_0565_8_0565_process_four_pixels
+    bilinear_interpolate_four_pixels 0565, 8, 0565, src
+.endm
+
+.macro bilinear_src_0565_8_0565_process_pixblock_head
+    bilinear_src_0565_8_0565_process_four_pixels
+.endm
+
+.macro bilinear_src_0565_8_0565_process_pixblock_tail
+.endm
+
+.macro bilinear_src_0565_8_0565_process_pixblock_tail_head
+    bilinear_src_0565_8_0565_process_pixblock_tail
+    bilinear_src_0565_8_0565_process_pixblock_head
+.endm
+
+/* over_8888_8888 */
+.macro bilinear_over_8888_8888_process_last_pixel
+    bilinear_interpolate_last_pixel 8888, x, 8888, over
+.endm
+
+.macro bilinear_over_8888_8888_process_two_pixels
+    bilinear_interpolate_two_pixels 8888, x, 8888, over
+.endm
+
+.macro bilinear_over_8888_8888_process_four_pixels
+    bilinear_interpolate_four_pixels 8888, x, 8888, over
+.endm
+
+.macro bilinear_over_8888_8888_process_pixblock_head
+    asr         WTMP1, X, #16
+    add         X, X, UX
+    add         TMP1, TOP, TMP1, lsl #2
+    asr         WTMP2, X, #16
+    add         X, X, UX
+    add         TMP2, TOP, TMP2, lsl #2
+
+    ld1         {v22.2s}, [TMP1], STRIDE
+    ld1         {v23.2s}, [TMP1]
+    asr         WTMP3, X, #16
+    add         X, X, UX
+    add         TMP3, TOP, TMP3, lsl #2
+    umull       v8.8h, v22.8b, v28.8b
+    umlal       v8.8h, v23.8b, v29.8b
+
+    ld1         {v22.2s}, [TMP2], STRIDE
+    ld1         {v23.2s}, [TMP2]
+    asr         WTMP4, X, #16
+    add         X, X, UX
+    add         TMP4, TOP, TMP4, lsl #2
+    umull       v9.8h, v22.8b, v28.8b
+    umlal       v9.8h, v23.8b, v29.8b
+
+    ld1         {v22.2s}, [TMP3], STRIDE
+    ld1         {v23.2s}, [TMP3]
+    umull       v10.8h, v22.8b, v28.8b
+    umlal       v10.8h, v23.8b, v29.8b
+
+    ushll       v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl       v0.4s, v8.4h, v15.h[0]
+    umlal2      v0.4s, v8.8h, v15.h[0]
+
+    prfm        PREFETCH_MODE, [TMP4, PF_OFFS]
+    ld1         {v16.2s}, [TMP4], STRIDE
+    ld1         {v17.2s}, [TMP4]
+    prfm        PREFETCH_MODE, [TMP4, PF_OFFS]
+    umull       v11.8h, v16.8b, v28.8b
+    umlal       v11.8h, v17.8b, v29.8b
+
+    ushll       v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl       v1.4s, v9.4h, v15.h[4]
+    umlal2      v1.4s, v9.8h, v15.h[4]
+    ushr        v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    add         v12.8h, v12.8h, v13.8h
+.endm
+
+.macro bilinear_over_8888_8888_process_pixblock_tail
+    ushll       v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl       v2.4s, v10.4h, v15.h[0]
+    umlal2      v2.4s, v10.8h, v15.h[0]
+    ushll       v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl       v3.4s, v11.4h, v15.h[4]
+    umlal2      v3.4s, v11.8h, v15.h[4]
+    shrn        v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn2       v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn        v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    ushr        v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    shrn2       v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    xtn         v6.8b, v0.8h
+    xtn         v7.8b, v2.8h
+    ld1         {v2.2s, v3.2s}, [OUT]
+    prfm        PREFETCH_MODE, [OUT, #(prefetch_offset * 4)]
+    vuzp        v6.8b, v7.8b
+    vuzp        v2.8b, v3.8b
+    vuzp        v6.8b, v7.8b
+    vuzp        v2.8b, v3.8b
+    dup         v4.2s, v7.s[1]
+    mvn         v4.8b, v4.8b
+    umull       v11.8h, v2.8b, v4.8b
+    umull       v2.8h,  v3.8b, v4.8b
+    urshr       v1.8h, v11.8h, #8
+    urshr       v10.8h, v2.8h, #8
+    raddhn      v3.8b, v10.8h, v2.8h
+    raddhn      v2.8b, v1.8h, v11.8h
+    uqadd       v6.8b, v2.8b,  v6.8b
+    uqadd       v7.8b, v3.8b,  v7.8b
+    vuzp        v6.8b, v7.8b
+    vuzp        v6.8b, v7.8b
+    add         v12.8h, v12.8h, v13.8h
+    st1         {v6.2s, v7.2s}, [OUT], #16
+.endm
+
+.macro bilinear_over_8888_8888_process_pixblock_tail_head
+                                            ushll       v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS
+    asr         WTMP1, X, #16
+    add         X, X, UX
+    add         TMP1, TOP, TMP1, lsl #2
+                                            umlsl       v2.4s, v10.4h, v15.h[0]
+    asr         WTMP2, X, #16
+    add         X, X, UX
+    add         TMP2, TOP, TMP2, lsl #2
+                                            umlal2      v2.4s, v10.8h, v15.h[0]
+                                            ushll       v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
+    ld1         {v20.2s}, [TMP1], STRIDE
+                                            umlsl       v3.4s, v11.4h, v15.h[4]
+                                            umlal2      v3.4s, v11.8h, v15.h[4]
+    ld1         {v21.2s}, [TMP1]
+    umull       v8.8h, v20.8b, v28.8b
+    umlal       v8.8h, v21.8b, v29.8b
+                                            shrn        v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+                                            shrn2       v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+                                            shrn        v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+                                            ushr        v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    ld1         {v22.2s}, [TMP2], STRIDE
+                                            shrn2       v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+                                            xtn         v6.8b, v0.8h
+    ld1         {v23.2s}, [TMP2]
+    umull       v9.8h, v22.8b, v28.8b
+    asr         WTMP3, X, #16
+    add         X, X, UX
+    add         TMP3, TOP, TMP3, lsl #2
+    asr         WTMP4, X, #16
+    add         X, X, UX
+    add         TMP4, TOP, TMP4, lsl #2
+    umlal       v9.8h, v23.8b, v29.8b
+                                            xtn         v7.8b, v2.8h
+                                            ld1         {v2.2s, v3.2s}, [OUT]
+                                            prfm        PREFETCH_MODE, [OUT, PF_OFFS]
+    ld1         {v22.2s}, [TMP3], STRIDE
+                                            vuzp        v6.8b, v7.8b
+                                            vuzp        v2.8b, v3.8b
+                                            vuzp        v6.8b, v7.8b
+                                            vuzp        v2.8b, v3.8b
+                                            dup         v4.2s, v7.s[1]
+    ld1         {v23.2s}, [TMP3]
+                                            mvn         v4.8b, v4.8b
+    umull       v10.8h, v22.8b, v28.8b
+    umlal       v10.8h, v23.8b, v29.8b
+                                            umull       v11.8h, v2.8b, v4.8b
+                                            umull        v2.8h, v3.8b, v4.8b
+    ushll       v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl       v0.4s, v8.4h, v15.h[0]
+                                            urshr       v1.8h, v11.8h, #8
+    umlal2      v0.4s, v8.8h, v15.h[0]
+                                            urshr       v8.8h, v2.8h, #8
+                                            raddhn      v3.8b, v8.8h, v2.8h
+                                            raddhn      v2.8b, v1.8h, v11.8h
+    prfm        PREFETCH_MODE, [TMP4, PF_OFFS]
+    ld1         {v16.2s}, [TMP4], STRIDE
+                                            uqadd       v6.8b, v2.8b, v6.8b
+                                            uqadd       v7.8b, v3.8b, v7.8b
+    ld1         {v17.2s}, [TMP4]
+    prfm        PREFETCH_MODE, [TMP4, PF_OFFS]
+    umull       v11.8h, v16.8b, v28.8b
+    umlal       v11.8h, v17.8b, v29.8b
+                                            vuzp        v6.8b, v7.8b
+    ushll       v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
+                                            vuzp        v6.8b, v7.8b
+    umlsl       v1.4s, v9.4h, v15.h[4]
+                                            add         v12.8h, v12.8h, v13.8h
+    umlal2      v1.4s, v9.8h, v15.h[4]
+    ushr        v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    add         v12.8h, v12.8h, v13.8h
+                                            st1         {v6.2s, v7.2s}, [OUT], #16
+.endm
+
+/* over_8888_8_8888 */
+.macro bilinear_over_8888_8_8888_process_last_pixel
+    bilinear_interpolate_last_pixel 8888, 8, 8888, over
+.endm
+
+.macro bilinear_over_8888_8_8888_process_two_pixels
+    bilinear_interpolate_two_pixels 8888, 8, 8888, over
+.endm
+
+.macro bilinear_over_8888_8_8888_process_four_pixels
+    bilinear_interpolate_two_pixels 8888, 8, 8888, over
+    bilinear_interpolate_two_pixels 8888, 8, 8888, over
+.endm
+
+.macro bilinear_over_8888_8_8888_process_pixblock_head
+    bilinear_over_8888_8_8888_process_four_pixels
+.endm
+
+.macro bilinear_over_8888_8_8888_process_pixblock_tail
+.endm
+
+.macro bilinear_over_8888_8_8888_process_pixblock_tail_head
+     bilinear_over_8888_8_8888_process_pixblock_tail
+     bilinear_over_8888_8_8888_process_pixblock_head
+.endm
+
+/* add_8888_8888 */
+.macro bilinear_add_8888_8888_process_last_pixel
+    bilinear_interpolate_last_pixel 8888, x, 8888, add
+.endm
+
+.macro bilinear_add_8888_8888_process_two_pixels
+    bilinear_interpolate_two_pixels 8888, x, 8888, add
+.endm
+
+.macro bilinear_add_8888_8888_process_four_pixels
+    bilinear_interpolate_two_pixels 8888, x, 8888, add
+    bilinear_interpolate_two_pixels 8888, x, 8888, add
+.endm
+
+.macro bilinear_add_8888_8888_process_pixblock_head
+    bilinear_add_8888_8888_process_four_pixels
+.endm
+
+.macro bilinear_add_8888_8888_process_pixblock_tail
+.endm
+
+.macro bilinear_add_8888_8888_process_pixblock_tail_head
+    bilinear_add_8888_8888_process_pixblock_tail
+    bilinear_add_8888_8888_process_pixblock_head
+.endm
+
+/* add_8888_8_8888 */
+.macro bilinear_add_8888_8_8888_process_last_pixel
+    bilinear_interpolate_last_pixel 8888, 8, 8888, add
+.endm
+
+.macro bilinear_add_8888_8_8888_process_two_pixels
+    bilinear_interpolate_two_pixels 8888, 8, 8888, add
+.endm
+
+.macro bilinear_add_8888_8_8888_process_four_pixels
+    bilinear_interpolate_four_pixels 8888, 8, 8888, add
+.endm
+
+.macro bilinear_add_8888_8_8888_process_pixblock_head
+    bilinear_add_8888_8_8888_process_four_pixels
+.endm
+
+.macro bilinear_add_8888_8_8888_process_pixblock_tail
+.endm
+
+.macro bilinear_add_8888_8_8888_process_pixblock_tail_head
+    bilinear_add_8888_8_8888_process_pixblock_tail
+    bilinear_add_8888_8_8888_process_pixblock_head
+.endm
+
+
+/* Bilinear scanline functions */
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_8888_8_8888_SRC_asm_neon, \
+    8888, 8888, 2, 2, \
+    bilinear_src_8888_8_8888_process_last_pixel, \
+    bilinear_src_8888_8_8888_process_two_pixels, \
+    bilinear_src_8888_8_8888_process_four_pixels, \
+    bilinear_src_8888_8_8888_process_pixblock_head, \
+    bilinear_src_8888_8_8888_process_pixblock_tail, \
+    bilinear_src_8888_8_8888_process_pixblock_tail_head, \
+    4, 28, BILINEAR_FLAG_USE_MASK
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_8888_8_0565_SRC_asm_neon, \
+    8888, 0565, 2, 1, \
+    bilinear_src_8888_8_0565_process_last_pixel, \
+    bilinear_src_8888_8_0565_process_two_pixels, \
+    bilinear_src_8888_8_0565_process_four_pixels, \
+    bilinear_src_8888_8_0565_process_pixblock_head, \
+    bilinear_src_8888_8_0565_process_pixblock_tail, \
+    bilinear_src_8888_8_0565_process_pixblock_tail_head, \
+    4, 28, BILINEAR_FLAG_USE_MASK
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_0565_8_x888_SRC_asm_neon, \
+    0565, 8888, 1, 2, \
+    bilinear_src_0565_8_x888_process_last_pixel, \
+    bilinear_src_0565_8_x888_process_two_pixels, \
+    bilinear_src_0565_8_x888_process_four_pixels, \
+    bilinear_src_0565_8_x888_process_pixblock_head, \
+    bilinear_src_0565_8_x888_process_pixblock_tail, \
+    bilinear_src_0565_8_x888_process_pixblock_tail_head, \
+    4, 28, BILINEAR_FLAG_USE_MASK
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_0565_8_0565_SRC_asm_neon, \
+    0565, 0565, 1, 1, \
+    bilinear_src_0565_8_0565_process_last_pixel, \
+    bilinear_src_0565_8_0565_process_two_pixels, \
+    bilinear_src_0565_8_0565_process_four_pixels, \
+    bilinear_src_0565_8_0565_process_pixblock_head, \
+    bilinear_src_0565_8_0565_process_pixblock_tail, \
+    bilinear_src_0565_8_0565_process_pixblock_tail_head, \
+    4, 28, BILINEAR_FLAG_USE_MASK
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_8888_8888_OVER_asm_neon, \
+    8888, 8888, 2, 2, \
+    bilinear_over_8888_8888_process_last_pixel, \
+    bilinear_over_8888_8888_process_two_pixels, \
+    bilinear_over_8888_8888_process_four_pixels, \
+    bilinear_over_8888_8888_process_pixblock_head, \
+    bilinear_over_8888_8888_process_pixblock_tail, \
+    bilinear_over_8888_8888_process_pixblock_tail_head, \
+    4, 28, 0
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_8888_8_8888_OVER_asm_neon, \
+    8888, 8888, 2, 2, \
+    bilinear_over_8888_8_8888_process_last_pixel, \
+    bilinear_over_8888_8_8888_process_two_pixels, \
+    bilinear_over_8888_8_8888_process_four_pixels, \
+    bilinear_over_8888_8_8888_process_pixblock_head, \
+    bilinear_over_8888_8_8888_process_pixblock_tail, \
+    bilinear_over_8888_8_8888_process_pixblock_tail_head, \
+    4, 28, BILINEAR_FLAG_USE_MASK
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_8888_8888_ADD_asm_neon, \
+    8888, 8888, 2, 2, \
+    bilinear_add_8888_8888_process_last_pixel, \
+    bilinear_add_8888_8888_process_two_pixels, \
+    bilinear_add_8888_8888_process_four_pixels, \
+    bilinear_add_8888_8888_process_pixblock_head, \
+    bilinear_add_8888_8888_process_pixblock_tail, \
+    bilinear_add_8888_8888_process_pixblock_tail_head, \
+    4, 28, 0
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_8888_8_8888_ADD_asm_neon, \
+    8888, 8888, 2, 2, \
+    bilinear_add_8888_8_8888_process_last_pixel, \
+    bilinear_add_8888_8_8888_process_two_pixels, \
+    bilinear_add_8888_8_8888_process_four_pixels, \
+    bilinear_add_8888_8_8888_process_pixblock_head, \
+    bilinear_add_8888_8_8888_process_pixblock_tail, \
+    bilinear_add_8888_8_8888_process_pixblock_tail_head, \
+    4, 28, BILINEAR_FLAG_USE_MASK

Added: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm.S
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm.S	                        (rev 0)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm.S	2022-10-18 21:13:29 UTC (rev 64747)
@@ -0,0 +1,3704 @@
+/*
+ * Copyright © 2009 Nokia Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author:  Siarhei Siamashka (siarhei.siamashka at nokia.com)
+ */
+
+/*
+ * This file contains implementations of NEON optimized pixel processing
+ * functions. There is no full and detailed tutorial, but some functions
+ * (those which are exposing some new or interesting features) are
+ * extensively commented and can be used as examples.
+ *
+ * You may want to have a look at the comments for following functions:
+ *  - pixman_composite_over_8888_0565_asm_neon
+ *  - pixman_composite_over_n_8_0565_asm_neon
+ */
+
+/* Prevent the stack from becoming executable for no reason... */
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+.text
+.arch armv8-a
+
+.altmacro
+.p2align 2
+
+#include "pixman-private.h"
+#include "pixman-arm-asm.h"
+#include "pixman-arma64-neon-asm.h"
+
+/* Global configuration options and preferences */
+
+/*
+ * The code can optionally make use of unaligned memory accesses to improve
+ * performance of handling leading/trailing pixels for each scanline.
+ * Configuration variable RESPECT_STRICT_ALIGNMENT can be set to 0 for
+ * example in linux if unaligned memory accesses are not configured to
+ * generate.exceptions.
+ */
+.set RESPECT_STRICT_ALIGNMENT, 1
+
+/*
+ * Set default prefetch type. There is a choice between the following options:
+ *
+ * PREFETCH_TYPE_NONE (may be useful for the ARM cores where PLD is set to work
+ * as NOP to workaround some HW bugs or for whatever other reason)
+ *
+ * PREFETCH_TYPE_SIMPLE (may be useful for simple single-issue ARM cores where
+ * advanced prefetch intruduces heavy overhead)
+ *
+ * PREFETCH_TYPE_ADVANCED (useful for superscalar cores such as ARM Cortex-A8
+ * which can run ARM and NEON instructions simultaneously so that extra ARM
+ * instructions do not add (many) extra cycles, but improve prefetch efficiency)
+ *
+ * Note: some types of function can't support advanced prefetch and fallback
+ *       to simple one (those which handle 24bpp pixels)
+ */
+.set PREFETCH_TYPE_DEFAULT, PREFETCH_TYPE_ADVANCED
+
+/* Prefetch distance in pixels for simple prefetch */
+.set PREFETCH_DISTANCE_SIMPLE, 64
+
+/*
+ * Implementation of pixman_composite_over_8888_0565_asm_neon
+ *
+ * This function takes a8r8g8b8 source buffer, r5g6b5 destination buffer and
+ * performs OVER compositing operation. Function fast_composite_over_8888_0565
+ * from pixman-fast-path.c does the same in C and can be used as a reference.
+ *
+ * First we need to have some NEON assembly code which can do the actual
+ * operation on the pixels and provide it to the template macro.
+ *
+ * Template macro quite conveniently takes care of emitting all the necessary
+ * code for memory reading and writing (including quite tricky cases of
+ * handling unaligned leading/trailing pixels), so we only need to deal with
+ * the data in NEON registers.
+ *
+ * NEON registers allocation in general is recommented to be the following:
+ * v0,  v1,  v2,  v3  - contain loaded source pixel data
+ * v4,  v5,  v6,  v7  - contain loaded destination pixels (if they are needed)
+ * v24, v25, v26, v27 - contain loading mask pixel data (if mask is used)
+ * v28, v29, v30, v31 - place for storing the result (destination pixels)
+ *
+ * As can be seen above, four 64-bit NEON registers are used for keeping
+ * intermediate pixel data and up to 8 pixels can be processed in one step
+ * for 32bpp formats (16 pixels for 16bpp, 32 pixels for 8bpp).
+ *
+ * This particular function uses the following registers allocation:
+ * v0,  v1,  v2,  v3  - contain loaded source pixel data
+ * v4,  v5            - contain loaded destination pixels (they are needed)
+ * v28, v29           - place for storing the result (destination pixels)
+ */
+
+/*
+ * Step one. We need to have some code to do some arithmetics on pixel data.
+ * This is implemented as a pair of macros: '*_head' and '*_tail'. When used
+ * back-to-back, they take pixel data from {v0, v1, v2, v3} and {v4, v5},
+ * perform all the needed calculations and write the result to {v28, v29}.
+ * The rationale for having two macros and not just one will be explained
+ * later. In practice, any single monolitic function which does the work can
+ * be split into two parts in any arbitrary way without affecting correctness.
+ *
+ * There is one special trick here too. Common template macro can optionally
+ * make our life a bit easier by doing R, G, B, A color components
+ * deinterleaving for 32bpp pixel formats (and this feature is used in
+ * 'pixman_composite_over_8888_0565_asm_neon' function). So it means that
+ * instead of having 8 packed pixels in {v0, v1, v2, v3} registers, we
+ * actually use v0 register for blue channel (a vector of eight 8-bit
+ * values), v1 register for green, v2 for red and v3 for alpha. This
+ * simple conversion can be also done with a few NEON instructions:
+ *
+ * Packed to planar conversion: // vuzp8 is a wrapper macro
+ *  vuzp8 v0, v1
+ *  vuzp8 v2, v3
+ *  vuzp8 v1, v3
+ *  vuzp8 v0, v2
+ *
+ * Planar to packed conversion: // vzip8 is a wrapper macro
+ *  vzip8 v0, v2
+ *  vzip8 v1, v3
+ *  vzip8 v2, v3
+ *  vzip8 v0, v1
+ *
+ * But pixel can be loaded directly in planar format using LD4 / b NEON
+ * instruction. It is 1 cycle slower than LD1 / s, so this is not always
+ * desirable, that's why deinterleaving is optional.
+ *
+ * But anyway, here is the code:
+ */
+
+.macro pixman_composite_over_8888_0565_process_pixblock_head
+    /* convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format
+       and put data into v6 - red, v7 - green, v30 - blue */
+    mov         v4.d[1], v5.d[0]
+    shrn        v6.8b, v4.8h, #8
+    shrn        v7.8b, v4.8h, #3
+    sli         v4.8h, v4.8h, #5
+    sri         v6.8b, v6.8b, #5
+    mvn         v3.8b, v3.8b      /* invert source alpha */
+    sri         v7.8b, v7.8b, #6
+    shrn        v30.8b, v4.8h, #2
+    /* now do alpha blending, storing results in 8-bit planar format
+       into v20 - red, v23 - green, v22 - blue */
+    umull       v10.8h, v3.8b, v6.8b
+    umull       v11.8h, v3.8b, v7.8b
+    umull       v12.8h, v3.8b, v30.8b
+    urshr       v17.8h, v10.8h, #8
+    urshr       v18.8h, v11.8h, #8
+    urshr       v19.8h, v12.8h, #8
+    raddhn      v20.8b, v10.8h, v17.8h
+    raddhn      v23.8b, v11.8h, v18.8h
+    raddhn      v22.8b, v12.8h, v19.8h
+.endm
+
+.macro pixman_composite_over_8888_0565_process_pixblock_tail
+    /* ... continue alpha blending */
+    uqadd       v17.8b, v2.8b, v20.8b
+    uqadd       v18.8b, v0.8b, v22.8b
+    uqadd       v19.8b, v1.8b, v23.8b
+    /* convert the result to r5g6b5 and store it into {v14} */
+    ushll       v14.8h, v17.8b, #7
+    sli         v14.8h, v14.8h, #1
+    ushll       v8.8h, v19.8b, #7
+    sli         v8.8h, v8.8h, #1
+    ushll       v9.8h, v18.8b, #7
+    sli         v9.8h, v9.8h, #1
+    sri         v14.8h, v8.8h, #5
+    sri         v14.8h, v9.8h, #11
+    mov         v28.d[0], v14.d[0]
+    mov         v29.d[0], v14.d[1]
+.endm
+
+/*
+ * OK, now we got almost everything that we need. Using the above two
+ * macros, the work can be done right. But now we want to optimize
+ * it a bit. ARM Cortex-A8 is an in-order core, and benefits really
+ * a lot from good code scheduling and software pipelining.
+ *
+ * Let's construct some code, which will run in the core main loop.
+ * Some pseudo-code of the main loop will look like this:
+ *   head
+ *   while (...) {
+ *     tail
+ *     head
+ *   }
+ *   tail
+ *
+ * It may look a bit weird, but this setup allows to hide instruction
+ * latencies better and also utilize dual-issue capability more
+ * efficiently (make pairs of load-store and ALU instructions).
+ *
+ * So what we need now is a '*_tail_head' macro, which will be used
+ * in the core main loop. A trivial straightforward implementation
+ * of this macro would look like this:
+ *
+ *   pixman_composite_over_8888_0565_process_pixblock_tail
+ *   st1         {v28.4h, v29.4h}, [DST_W], #32
+ *   ld1         {v4.4h, v5.4h}, [DST_R], #16
+ *   ld4         {v0.2s, v1.2s, v2.2s, v3.2s}, [SRC], #32
+ *   pixman_composite_over_8888_0565_process_pixblock_head
+ *   cache_preload 8, 8
+ *
+ * Now it also got some VLD/VST instructions. We simply can't move from
+ * processing one block of pixels to the other one with just arithmetics.
+ * The previously processed data needs to be written to memory and new
+ * data needs to be fetched. Fortunately, this main loop does not deal
+ * with partial leading/trailing pixels and can load/store a full block
+ * of pixels in a bulk. Additionally, destination buffer is already
+ * 16 bytes aligned here (which is good for performance).
+ *
+ * New things here are DST_R, DST_W, SRC and MASK identifiers. These
+ * are the aliases for ARM registers which are used as pointers for
+ * accessing data. We maintain separate pointers for reading and writing
+ * destination buffer (DST_R and DST_W).
+ *
+ * Another new thing is 'cache_preload' macro. It is used for prefetching
+ * data into CPU L2 cache and improve performance when dealing with large
+ * images which are far larger than cache size. It uses one argument
+ * (actually two, but they need to be the same here) - number of pixels
+ * in a block. Looking into 'pixman-arm-neon-asm.h' can provide some
+ * details about this macro. Moreover, if good performance is needed
+ * the code from this macro needs to be copied into '*_tail_head' macro
+ * and mixed with the rest of code for optimal instructions scheduling.
+ * We are actually doing it below.
+ *
+ * Now after all the explanations, here is the optimized code.
+ * Different instruction streams (originaling from '*_head', '*_tail'
+ * and 'cache_preload' macro) use different indentation levels for
+ * better readability. Actually taking the code from one of these
+ * indentation levels and ignoring a few LD/ST instructions would
+ * result in exactly the code from '*_head', '*_tail' or 'cache_preload'
+ * macro!
+ */
+
+#if 1
+
+.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
+        uqadd       v17.8b, v2.8b, v20.8b
+    ld1         {v4.4h, v5.4h}, [DST_R], #16
+    mov         v4.d[1], v5.d[0]
+        uqadd       v18.8b, v0.8b, v22.8b
+        uqadd       v19.8b, v1.8b, v23.8b
+    shrn        v6.8b, v4.8h, #8
+    fetch_src_pixblock
+    shrn        v7.8b, v4.8h, #3
+    sli         v4.8h, v4.8h, #5
+        ushll       v14.8h, v17.8b, #7
+        sli         v14.8h, v14.8h, #1
+                                    PF add PF_X, PF_X, #8
+        ushll       v8.8h, v19.8b, #7
+        sli         v8.8h, v8.8h,  #1
+                                    PF tst PF_CTL, #0xF
+    sri         v6.8b, v6.8b, #5
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+10:
+    mvn         v3.8b, v3.8b
+                                    PF beq 10f
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+    sri         v7.8b, v7.8b, #6
+    shrn        v30.8b, v4.8h, #2
+    umull       v10.8h, v3.8b, v6.8b
+                                    PF lsl DUMMY, PF_X, #src_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
+    umull       v11.8h, v3.8b, v7.8b
+    umull       v12.8h, v3.8b, v30.8b
+                                    PF lsl DUMMY, PF_X, #dst_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
+        sri         v14.8h, v8.8h, #5
+                                    PF cmp PF_X, ORIG_W
+        ushll       v9.8h, v18.8b, #7
+        sli         v9.8h, v9.8h, #1
+    urshr       v17.8h, v10.8h, #8
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+10:
+    urshr       v19.8h, v11.8h, #8
+    urshr       v18.8h, v12.8h, #8
+                                    PF ble 10f
+                                    PF subs PF_CTL, PF_CTL, #0x10
+10:
+        sri         v14.8h, v9.8h, #11
+        mov         v28.d[0], v14.d[0]
+        mov         v29.d[0], v14.d[1]
+                                    PF ble 10f
+                                    PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
+                                    PF ldrsb DUMMY, [PF_SRC, DUMMY]
+                                    PF add PF_SRC, PF_SRC, #1
+10:
+    raddhn      v20.8b, v10.8h, v17.8h
+    raddhn      v23.8b, v11.8h, v19.8h
+                                    PF ble 10f
+                                    PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
+                                    PF ldrsb DUMMY, [PF_DST, DUMMY]
+                                    PF add PF_DST, PF_SRC, #1
+10:
+    raddhn      v22.8b, v12.8h, v18.8h
+        st1         {v14.8h}, [DST_W], #16
+.endm
+
+#else
+
+/* If we did not care much about the performance, we would just use this... */
+.macro pixman_composite_over_8888_0565_process_pixblock_tail_head
+    pixman_composite_over_8888_0565_process_pixblock_tail
+    st1         {v14.8h}, [DST_W], #16
+    ld1         {v4.4h, v4.5h}, [DST_R], #16
+    fetch_src_pixblock
+    pixman_composite_over_8888_0565_process_pixblock_head
+    cache_preload 8, 8
+.endm
+
+#endif
+
+/*
+ * And now the final part. We are using 'generate_composite_function' macro
+ * to put all the stuff together. We are specifying the name of the function
+ * which we want to get, number of bits per pixel for the source, mask and
+ * destination (0 if unused, like mask in this case). Next come some bit
+ * flags:
+ *   FLAG_DST_READWRITE      - tells that the destination buffer is both read
+ *                             and written, for write-only buffer we would use
+ *                             FLAG_DST_WRITEONLY flag instead
+ *   FLAG_DEINTERLEAVE_32BPP - tells that we prefer to work with planar data
+ *                             and separate color channels for 32bpp format.
+ * The next things are:
+ *  - the number of pixels processed per iteration (8 in this case, because
+ *    that's the maximum what can fit into four 64-bit NEON registers).
+ *  - prefetch distance, measured in pixel blocks. In this case it is 5 times
+ *    by 8 pixels. That would be 40 pixels, or up to 160 bytes. Optimal
+ *    prefetch distance can be selected by running some benchmarks.
+ *
+ * After that we specify some macros, these are 'default_init',
+ * 'default_cleanup' here which are empty (but it is possible to have custom
+ * init/cleanup macros to be able to save/restore some extra NEON registers
+ * like d8-d15 or do anything else) followed by
+ * 'pixman_composite_over_8888_0565_process_pixblock_head',
+ * 'pixman_composite_over_8888_0565_process_pixblock_tail' and
+ * 'pixman_composite_over_8888_0565_process_pixblock_tail_head'
+ * which we got implemented above.
+ *
+ * The last part is the NEON registers allocation scheme.
+ */
+generate_composite_function \
+    pixman_composite_over_8888_0565_asm_neon, 32, 0, 16, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_0565_process_pixblock_head, \
+    pixman_composite_over_8888_0565_process_pixblock_tail, \
+    pixman_composite_over_8888_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_0565_process_pixblock_head
+    /* convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format
+       and put data into v6 - red, v7 - green, v30 - blue */
+    mov         v4.d[1], v5.d[0]
+    shrn        v6.8b, v4.8h, #8
+    shrn        v7.8b, v4.8h, #3
+    sli         v4.8h, v4.8h, #5
+    sri         v6.8b, v6.8b, #5
+    sri         v7.8b, v7.8b, #6
+    shrn        v30.8b, v4.8h, #2
+    /* now do alpha blending, storing results in 8-bit planar format
+       into v20 - red, v23 - green, v22 - blue */
+    umull       v10.8h, v3.8b, v6.8b
+    umull       v11.8h, v3.8b, v7.8b
+    umull       v12.8h, v3.8b, v30.8b
+    urshr       v13.8h, v10.8h, #8
+    urshr       v14.8h, v11.8h, #8
+    urshr       v15.8h, v12.8h, #8
+    raddhn      v20.8b, v10.8h, v13.8h
+    raddhn      v23.8b, v11.8h, v14.8h
+    raddhn      v22.8b, v12.8h, v15.8h
+.endm
+
+.macro pixman_composite_over_n_0565_process_pixblock_tail
+    /* ... continue alpha blending */
+    uqadd       v17.8b, v2.8b, v20.8b
+    uqadd       v18.8b, v0.8b, v22.8b
+    uqadd       v19.8b, v1.8b, v23.8b
+    /* convert the result to r5g6b5 and store it into {v14} */
+    ushll       v14.8h, v17.8b, #7
+    sli         v14.8h, v14.8h, #1
+    ushll       v8.8h, v19.8b, #7
+    sli         v8.8h, v8.8h, #1
+    ushll       v9.8h, v18.8b, #7
+    sli         v9.8h, v9.8h, #1
+    sri         v14.8h, v8.8h, #5
+    sri         v14.8h, v9.8h, #11
+    mov         v28.d[0], v14.d[0]
+    mov         v29.d[0], v14.d[1]
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_n_0565_process_pixblock_tail_head
+    pixman_composite_over_n_0565_process_pixblock_tail
+    ld1         {v4.4h, v5.4h}, [DST_R], #16
+    st1         {v14.8h}, [DST_W], #16
+    pixman_composite_over_n_0565_process_pixblock_head
+    cache_preload 8, 8
+.endm
+
+.macro pixman_composite_over_n_0565_init
+    mov         v3.s[0], w4
+    dup         v0.8b, v3.b[0]
+    dup         v1.8b, v3.b[1]
+    dup         v2.8b, v3.b[2]
+    dup         v3.8b, v3.b[3]
+    mvn         v3.8b, v3.8b      /* invert source alpha */
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_0565_asm_neon, 0, 0, 16, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_0565_init, \
+    default_cleanup, \
+    pixman_composite_over_n_0565_process_pixblock_head, \
+    pixman_composite_over_n_0565_process_pixblock_tail, \
+    pixman_composite_over_n_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_8888_0565_process_pixblock_head
+    ushll       v8.8h,  v1.8b,  #7
+    sli         v8.8h,  v8.8h,  #1
+    ushll       v14.8h, v2.8b,  #7
+    sli         v14.8h, v14.8h, #1
+    ushll       v9.8h,  v0.8b,  #7
+    sli         v9.8h,  v9.8h,  #1
+.endm
+
+.macro pixman_composite_src_8888_0565_process_pixblock_tail
+    sri         v14.8h, v8.8h, #5
+    sri         v14.8h, v9.8h, #11
+    mov         v28.d[0], v14.d[0]
+    mov         v29.d[0], v14.d[1]
+.endm
+
+.macro pixman_composite_src_8888_0565_process_pixblock_tail_head
+        sri         v14.8h, v8.8h, #5
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+    fetch_src_pixblock
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        sri         v14.8h, v9.8h, #11
+        mov         v28.d[0], v14.d[0]
+        mov         v29.d[0], v14.d[1]
+                                    PF cmp PF_X, ORIG_W
+                                    PF lsl DUMMY, PF_X, #src_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
+    ushll       v8.8h, v1.8b, #7
+    sli         v8.8h, v8.8h, #1
+        st1        {v14.8h}, [DST_W], #16
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+                                    PF subs PF_CTL, PF_CTL, #0x10
+10:
+    ushll       v14.8h, v2.8b, #7
+    sli         v14.8h, v14.8h, #1
+                                    PF ble 10f
+                                    PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
+                                    PF ldrsb DUMMY, [PF_SRC, DUMMY]
+                                    PF add PF_SRC, PF_SRC, #1
+10:
+    ushll       v9.8h, v0.8b, #7
+    sli         v9.8h, v9.8h, #1
+.endm
+
+generate_composite_function \
+    pixman_composite_src_8888_0565_asm_neon, 32, 0, 16, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_8888_0565_process_pixblock_head, \
+    pixman_composite_src_8888_0565_process_pixblock_tail, \
+    pixman_composite_src_8888_0565_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0565_8888_process_pixblock_head
+    mov         v0.d[1], v1.d[0]
+    shrn        v30.8b, v0.8h, #8
+    shrn        v29.8b, v0.8h, #3
+    sli         v0.8h,  v0.8h, #5
+    movi        v31.8b, #255
+    sri         v30.8b, v30.8b, #5
+    sri         v29.8b, v29.8b, #6
+    shrn        v28.8b, v0.8h, #2
+.endm
+
+.macro pixman_composite_src_0565_8888_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_src_0565_8888_process_pixblock_tail_head
+    pixman_composite_src_0565_8888_process_pixblock_tail
+    st4         {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+    fetch_src_pixblock
+    pixman_composite_src_0565_8888_process_pixblock_head
+    cache_preload 8, 8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0565_8888_asm_neon, 16, 0, 32, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0565_8888_process_pixblock_head, \
+    pixman_composite_src_0565_8888_process_pixblock_tail, \
+    pixman_composite_src_0565_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8_8_process_pixblock_head
+    uqadd       v28.8b, v0.8b, v4.8b
+    uqadd       v29.8b, v1.8b, v5.8b
+    uqadd       v30.8b, v2.8b, v6.8b
+    uqadd       v31.8b, v3.8b, v7.8b
+.endm
+
+.macro pixman_composite_add_8_8_process_pixblock_tail
+.endm
+
+.macro pixman_composite_add_8_8_process_pixblock_tail_head
+    fetch_src_pixblock
+                                    PF add PF_X, PF_X, #32
+                                    PF tst PF_CTL, #0xF
+    ld1         {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #32
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        st1     {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+                                    PF cmp PF_X, ORIG_W
+                                    PF lsl DUMMY, PF_X, #src_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
+                                    PF lsl DUMMY, PF_X, #dst_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+                                    PF subs PF_CTL, PF_CTL, #0x10
+10:
+    uqadd       v28.8b, v0.8b, v4.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
+                                    PF ldrsb DUMMY, [PF_SRC, DUMMY]
+                                    PF add PF_SRC, PF_SRC, #1
+                                    PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
+                                    PF ldrsb DUMMY, [PF_DST, DUMMY]
+                                    PF add PF_DST, PF_DST, #1
+10:
+    uqadd       v29.8b, v1.8b, v5.8b
+    uqadd       v30.8b, v2.8b, v6.8b
+    uqadd       v31.8b, v3.8b, v7.8b
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8_8_asm_neon, 8, 0, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8_8_process_pixblock_head, \
+    pixman_composite_add_8_8_process_pixblock_tail, \
+    pixman_composite_add_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8888_8888_process_pixblock_tail_head
+    fetch_src_pixblock
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+    ld1         {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        st1     {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+                                    PF cmp PF_X, ORIG_W
+                                    PF lsl DUMMY, PF_X, #src_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
+                                    PF lsl DUMMY, PF_X, #dst_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+                                    PF subs PF_CTL, PF_CTL, #0x10
+10:
+    uqadd       v28.8b, v0.8b, v4.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
+                                    PF ldrsb DUMMY, [PF_SRC, DUMMY]
+                                    PF add PF_SRC, PF_SRC, #1
+                                    PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
+                                    PF ldrsb DUMMY, [PF_DST, DUMMY]
+                                    PF add PF_DST, PF_DST, #1
+10:
+    uqadd       v29.8b, v1.8b, v5.8b
+    uqadd       v30.8b, v2.8b, v6.8b
+    uqadd       v31.8b, v3.8b, v7.8b
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8888_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8_8_process_pixblock_head, \
+    pixman_composite_add_8_8_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_process_pixblock_tail_head
+
+generate_composite_function_single_scanline \
+    pixman_composite_scanline_add_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8_8_process_pixblock_head, \
+    pixman_composite_add_8_8_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_out_reverse_8888_8888_process_pixblock_head
+    mvn         v24.8b, v3.8b  /* get inverted alpha */
+    /* do alpha blending */
+    umull       v8.8h, v24.8b, v4.8b
+    umull       v9.8h, v24.8b, v5.8b
+    umull       v10.8h, v24.8b, v6.8b
+    umull       v11.8h, v24.8b, v7.8b
+.endm
+
+.macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail
+    urshr       v14.8h, v8.8h, #8
+    urshr       v15.8h, v9.8h, #8
+    urshr       v16.8h, v10.8h, #8
+    urshr       v17.8h, v11.8h, #8
+    raddhn      v28.8b, v14.8h, v8.8h
+    raddhn      v29.8b, v15.8h, v9.8h
+    raddhn      v30.8b, v16.8h, v10.8h
+    raddhn      v31.8b, v17.8h, v11.8h
+.endm
+
+.macro pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head
+     ld4        {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+        urshr       v14.8h, v8.8h, #8
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+        urshr       v15.8h, v9.8h, #8
+        urshr       v16.8h, v10.8h, #8
+        urshr       v17.8h, v11.8h, #8
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        raddhn      v28.8b, v14.8h, v8.8h
+        raddhn      v29.8b, v15.8h, v9.8h
+                                    PF cmp PF_X, ORIG_W
+        raddhn      v30.8b, v16.8h, v10.8h
+        raddhn      v31.8b, v17.8h, v11.8h
+    fetch_src_pixblock
+                                    PF lsl DUMMY, PF_X, #src_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
+    mvn         v22.8b, v3.8b
+                                    PF lsl DUMMY, PF_X, #dst_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
+         st4        {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+10:
+    umull      v8.8h, v22.8b, v4.8b
+                                    PF ble 10f
+                                    PF subs PF_CTL, PF_CTL, #0x10
+10:
+    umull      v9.8h, v22.8b, v5.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
+                                    PF ldrsb DUMMY, [PF_SRC, DUMMY]
+                                    PF add PF_SRC, PF_SRC, #1
+10:
+    umull      v10.8h, v22.8b, v6.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
+                                    PF ldrsb DUMMY, [PF_DST, DUMMY]
+                                    PF add PF_DST, PF_DST, #1
+10:
+     umull     v11.8h, v22.8b, v7.8b
+.endm
+
+generate_composite_function_single_scanline \
+    pixman_composite_scanline_out_reverse_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_out_reverse_8888_8888_process_pixblock_head, \
+    pixman_composite_out_reverse_8888_8888_process_pixblock_tail, \
+    pixman_composite_out_reverse_8888_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_8888_process_pixblock_head
+    pixman_composite_out_reverse_8888_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_over_8888_8888_process_pixblock_tail
+    pixman_composite_out_reverse_8888_8888_process_pixblock_tail
+    uqadd       v28.8b, v0.8b, v28.8b
+    uqadd       v29.8b, v1.8b, v29.8b
+    uqadd       v30.8b, v2.8b, v30.8b
+    uqadd       v31.8b, v3.8b, v31.8b
+.endm
+
+.macro pixman_composite_over_8888_8888_process_pixblock_tail_head
+     ld4        {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+        urshr       v14.8h, v8.8h, #8
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+        urshr       v15.8h, v9.8h, #8
+        urshr       v16.8h, v10.8h, #8
+        urshr       v17.8h, v11.8h, #8
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        raddhn      v28.8b, v14.8h, v8.8h
+        raddhn      v29.8b, v15.8h, v9.8h
+                                    PF cmp PF_X, ORIG_W
+        raddhn      v30.8b, v16.8h, v10.8h
+        raddhn      v31.8b, v17.8h, v11.8h
+        uqadd       v28.8b, v0.8b, v28.8b
+        uqadd       v29.8b, v1.8b, v29.8b
+        uqadd       v30.8b, v2.8b, v30.8b
+        uqadd       v31.8b, v3.8b, v31.8b
+    fetch_src_pixblock
+                                    PF lsl DUMMY, PF_X, #src_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
+    mvn        v22.8b, v3.8b
+                                    PF lsl DUMMY, PF_X, #dst_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
+         st4        {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+10:
+    umull      v8.8h, v22.8b, v4.8b
+                                    PF ble 10f
+                                    PF subs PF_CTL, PF_CTL, #0x10
+10:
+    umull      v9.8h, v22.8b, v5.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
+                                    PF ldrsb DUMMY, [PF_SRC, DUMMY]
+                                    PF add PF_SRC, PF_SRC, #1
+10:
+    umull      v10.8h, v22.8b, v6.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
+                                    PF ldrsb DUMMY, [PF_DST, DUMMY]
+                                    PF add PF_DST, PF_DST, #1
+10:
+    umull      v11.8h, v22.8b, v7.8b
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_8888_process_pixblock_head, \
+    pixman_composite_over_8888_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8888_process_pixblock_tail_head
+
+generate_composite_function_single_scanline \
+    pixman_composite_scanline_over_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_8888_process_pixblock_head, \
+    pixman_composite_over_8888_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8888_process_pixblock_head
+    /* deinterleaved source pixels in {v0, v1, v2, v3} */
+    /* inverted alpha in {v24} */
+    /* destination pixels in {v4, v5, v6, v7} */
+    umull       v8.8h, v24.8b, v4.8b
+    umull       v9.8h, v24.8b, v5.8b
+    umull       v10.8h, v24.8b, v6.8b
+    umull       v11.8h, v24.8b, v7.8b
+.endm
+
+.macro pixman_composite_over_n_8888_process_pixblock_tail
+    urshr       v14.8h, v8.8h, #8
+    urshr       v15.8h, v9.8h, #8
+    urshr       v16.8h, v10.8h, #8
+    urshr       v17.8h, v11.8h, #8
+    raddhn      v28.8b, v14.8h, v8.8h
+    raddhn      v29.8b, v15.8h, v9.8h
+    raddhn      v30.8b, v16.8h, v10.8h
+    raddhn      v31.8b, v17.8h, v11.8h
+    uqadd       v28.8b, v0.8b, v28.8b
+    uqadd       v29.8b, v1.8b, v29.8b
+    uqadd       v30.8b, v2.8b, v30.8b
+    uqadd       v31.8b, v3.8b, v31.8b
+.endm
+
+.macro pixman_composite_over_n_8888_process_pixblock_tail_head
+        urshr       v14.8h, v8.8h, #8
+        urshr       v15.8h, v9.8h, #8
+        urshr       v16.8h, v10.8h, #8
+        urshr       v17.8h, v11.8h, #8
+        raddhn      v28.8b, v14.8h, v8.8h
+        raddhn      v29.8b, v15.8h, v9.8h
+        raddhn      v30.8b, v16.8h, v10.8h
+        raddhn      v31.8b, v17.8h, v11.8h
+    ld4         {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+        uqadd       v28.8b, v0.8b, v28.8b
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0x0F
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        uqadd       v29.8b, v1.8b, v29.8b
+        uqadd       v30.8b, v2.8b, v30.8b
+        uqadd       v31.8b, v3.8b, v31.8b
+                                    PF cmp PF_X, ORIG_W
+    umull       v8.8h, v24.8b, v4.8b
+                                    PF lsl DUMMY, PF_X, #dst_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
+    umull       v9.8h, v24.8b, v5.8b
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+10:
+    umull       v10.8h, v24.8b, v6.8b
+                                    PF subs PF_CTL, PF_CTL, #0x10
+    umull       v11.8h, v24.8b, v7.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
+                                    PF ldrsb DUMMY, [PF_DST, DUMMY]
+                                    PF add PF_DST, PF_DST, #1
+10:
+        st4         {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+.endm
+
+.macro pixman_composite_over_n_8888_init
+    mov         v3.s[0], w4
+    dup         v0.8b, v3.b[0]
+    dup         v1.8b, v3.b[1]
+    dup         v2.8b, v3.b[2]
+    dup         v3.8b, v3.b[3]
+    mvn         v24.8b, v3.8b  /* get inverted alpha */
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8888_asm_neon, 0, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8888_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_8888_process_pixblock_head, \
+    pixman_composite_over_8888_8888_process_pixblock_tail, \
+    pixman_composite_over_n_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_reverse_n_8888_process_pixblock_tail_head
+        urshr       v14.8h, v8.8h, #8
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+        urshr       v15.8h, v9.8h, #8
+        urshr       v12.8h, v10.8h, #8
+        urshr       v13.8h, v11.8h, #8
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        raddhn      v28.8b, v14.8h, v8.8h
+        raddhn      v29.8b, v15.8h, v9.8h
+                                    PF cmp PF_X, ORIG_W
+        raddhn      v30.8b, v12.8h, v10.8h
+        raddhn      v31.8b, v13.8h, v11.8h
+        uqadd       v28.8b, v0.8b, v28.8b
+        uqadd       v29.8b, v1.8b, v29.8b
+        uqadd       v30.8b, v2.8b, v30.8b
+        uqadd       v31.8b, v3.8b, v31.8b
+    ld4         {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_R], #32
+    mvn         v22.8b, v3.8b
+                                    PF lsl DUMMY, PF_X, #dst_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
+        st4         {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+                                    PF blt 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+10:
+    umull       v8.8h, v22.8b, v4.8b
+                                    PF blt 10f
+                                    PF subs PF_CTL, PF_CTL, #0x10
+10:
+    umull       v9.8h, v22.8b, v5.8b
+    umull       v10.8h, v22.8b, v6.8b
+                                    PF blt 10f
+                                    PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
+                                    PF ldrsb DUMMY, [PF_DST, DUMMY]
+                                    PF add PF_DST, PF_DST, #1
+10:
+    umull       v11.8h, v22.8b, v7.8b
+.endm
+
+.macro pixman_composite_over_reverse_n_8888_init
+    mov         v7.s[0], w4
+    dup         v4.8b, v7.b[0]
+    dup         v5.8b, v7.b[1]
+    dup         v6.8b, v7.b[2]
+    dup         v7.8b, v7.b[3]
+.endm
+
+generate_composite_function \
+    pixman_composite_over_reverse_n_8888_asm_neon, 0, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_reverse_n_8888_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_8888_process_pixblock_head, \
+    pixman_composite_over_8888_8888_process_pixblock_tail, \
+    pixman_composite_over_reverse_n_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    0,  /* dst_r_basereg */ \
+    4,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_8_0565_process_pixblock_head
+    umull       v0.8h,  v24.8b, v8.8b    /* IN for SRC pixels (part1) */
+    umull       v1.8h,  v24.8b, v9.8b
+    umull       v2.8h,  v24.8b, v10.8b
+    umull       v3.8h,  v24.8b, v11.8b
+        mov         v4.d[1], v5.d[0]
+        shrn        v25.8b,  v4.8h, #8 /* convert DST_R data to 32-bpp (part1) */
+        shrn        v26.8b,  v4.8h, #3
+        sli         v4.8h,   v4.8h, #5
+    urshr       v17.8h, v0.8h,  #8    /* IN for SRC pixels (part2) */
+    urshr       v18.8h, v1.8h,  #8
+    urshr       v19.8h, v2.8h,  #8
+    urshr       v20.8h, v3.8h,  #8
+    raddhn      v0.8b,  v0.8h,  v17.8h
+    raddhn      v1.8b,  v1.8h,  v18.8h
+    raddhn      v2.8b,  v2.8h,  v19.8h
+    raddhn      v3.8b,  v3.8h,  v20.8h
+        sri         v25.8b, v25.8b, #5 /* convert DST_R data to 32-bpp (part2) */
+        sri         v26.8b, v26.8b, #6
+    mvn         v3.8b,  v3.8b
+        shrn        v30.8b, v4.8h,  #2
+    umull       v18.8h, v3.8b, v25.8b     /* now do alpha blending */
+    umull       v19.8h, v3.8b, v26.8b
+    umull       v20.8h, v3.8b, v30.8b
+.endm
+
+.macro pixman_composite_over_8888_8_0565_process_pixblock_tail
+    /* 3 cycle bubble (after vmull.u8) */
+    urshr       v5.8h, v18.8h, #8
+    urshr       v6.8h, v19.8h, #8
+    urshr       v7.8h, v20.8h, #8
+    raddhn      v17.8b, v18.8h, v5.8h
+    raddhn      v19.8b, v19.8h, v6.8h
+    raddhn      v18.8b, v20.8h, v7.8h
+    uqadd       v5.8b, v2.8b,  v17.8b
+    /* 1 cycle bubble */
+    uqadd       v6.8b, v0.8b,  v18.8b
+    uqadd       v7.8b, v1.8b,  v19.8b
+    ushll       v14.8h, v5.8b, #7    /* convert to 16bpp */
+    sli         v14.8h, v14.8h, #1
+    ushll       v18.8h, v7.8b, #7
+    sli         v18.8h, v18.8h, #1
+    ushll       v19.8h, v6.8b, #7
+    sli         v19.8h, v19.8h, #1
+    sri         v14.8h, v18.8h, #5
+    /* 1 cycle bubble */
+    sri         v14.8h, v19.8h, #11
+    mov         v28.d[0], v14.d[0]
+    mov         v29.d[0], v14.d[1]
+.endm
+
+.macro pixman_composite_over_8888_8_0565_process_pixblock_tail_head
+#if 0
+    ld1         {v4.8h}, [DST_R], #16
+    shrn        v25.8b,  v4.8h,  #8
+    fetch_mask_pixblock
+    shrn        v26.8b,  v4.8h,  #3
+    fetch_src_pixblock
+    umull       v22.8h,  v24.8b, v10.8b
+        urshr       v13.8h, v18.8h, #8
+        urshr       v11.8h, v19.8h, #8
+        urshr       v15.8h, v20.8h, #8
+        raddhn      v17.8b, v18.8h, v13.8h
+        raddhn      v19.8b, v19.8h, v11.8h
+        raddhn      v18.8b, v20.8h, v15.8h
+        uqadd       v17.8b, v2.8b, v17.8b
+    umull       v21.8h,  v24.8b, v9.8b
+        uqadd       v18.8b, v0.8b, v18.8b
+        uqadd       v19.8b, v1.8b, v19.8b
+        ushll       v14.8h, v17.8b, #7
+        sli         v14.8h, v14.8h, #1
+    umull       v20.8h,  v24.8b, v8.8b
+        ushll       v18.8h,  v18.8b, #7
+        sli         v18.8h,  v18.8h, #1
+        ushll       v19.8h,  v19.8b, #7
+        sli         v19.8h,  v19.8h, #1
+        sri         v14.8h,  v18.8h, #5
+    umull       v23.8h,  v24.8b, v11.8b
+        sri         v14.8h,  v19.8h, #11
+        mov         v28.d[0], v14.d[0]
+        mov         v29.d[0], v14.d[1]
+
+    cache_preload 8, 8
+
+    sli         v4.8h,  v4.8h,   #5
+    urshr       v16.8h, v20.8h,  #8
+    urshr       v17.8h, v21.8h,  #8
+    urshr       v18.8h, v22.8h,  #8
+    urshr       v19.8h, v23.8h,  #8
+    raddhn      v0.8b,  v20.8h, v16.8h
+    raddhn      v1.8b,  v21.8h, v17.8h
+    raddhn      v2.8b,  v22.8h, v18.8h
+    raddhn      v3.8b,  v23.8h, v19.8h
+    sri         v25.8b,  v25.8b,  #5
+    sri         v26.8b,  v26.8b,  #6
+    mvn         v3.8b,  v3.8b
+    shrn        v30.8b, v4.8h,  #2
+    st1         {v14.8h}, [DST_W], #16
+    umull       v18.8h, v3.8b, v25.8b
+    umull       v19.8h, v3.8b, v26.8b
+    umull       v20.8h, v3.8b, v30.8b
+#else
+    pixman_composite_over_8888_8_0565_process_pixblock_tail
+    st1         {v28.4h, v29.4h}, [DST_W], #16
+    ld1         {v4.4h, v5.4h}, [DST_R], #16
+    fetch_mask_pixblock
+    fetch_src_pixblock
+    pixman_composite_over_8888_8_0565_process_pixblock_head
+#endif
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_8_0565_asm_neon, 32, 8, 16, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init_need_all_regs, \
+    default_cleanup_need_all_regs, \
+    pixman_composite_over_8888_8_0565_process_pixblock_head, \
+    pixman_composite_over_8888_8_0565_process_pixblock_tail, \
+    pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    8,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+/******************************************************************************/
+
+/*
+ * This function needs a special initialization of solid mask.
+ * Solid source pixel data is fetched from stack at ARGS_STACK_OFFSET
+ * offset, split into color components and replicated in d8-d11
+ * registers. Additionally, this function needs all the NEON registers,
+ * so it has to save d8-d15 registers which are callee saved according
+ * to ABI. These registers are restored from 'cleanup' macro. All the
+ * other NEON registers are caller saved, so can be clobbered freely
+ * without introducing any problems.
+ */
+.macro pixman_composite_over_n_8_0565_init
+    mov         v11.s[0], w4
+    dup         v8.8b, v11.b[0]
+    dup         v9.8b, v11.b[1]
+    dup         v10.8b, v11.b[2]
+    dup         v11.8b, v11.b[3]
+.endm
+
+.macro pixman_composite_over_n_8_0565_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8_0565_asm_neon, 0, 8, 16, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8_0565_init, \
+    pixman_composite_over_n_8_0565_cleanup, \
+    pixman_composite_over_8888_8_0565_process_pixblock_head, \
+    pixman_composite_over_8888_8_0565_process_pixblock_tail, \
+    pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    8,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_n_0565_init
+    mov         v24.s[0], w6
+    dup         v24.8b, v24.b[3]
+.endm
+
+.macro pixman_composite_over_8888_n_0565_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_n_0565_asm_neon, 32, 0, 16, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_8888_n_0565_init, \
+    pixman_composite_over_8888_n_0565_cleanup, \
+    pixman_composite_over_8888_8_0565_process_pixblock_head, \
+    pixman_composite_over_8888_8_0565_process_pixblock_tail, \
+    pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    8,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0565_0565_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_0565_0565_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_0565_0565_process_pixblock_tail_head
+    st1     {v0.4h, v1.4h, v2.4h, v3.4h}, [DST_W], #32
+    fetch_src_pixblock
+    cache_preload 16, 16
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0565_0565_asm_neon, 16, 0, 16, \
+    FLAG_DST_WRITEONLY, \
+    16, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0565_0565_process_pixblock_head, \
+    pixman_composite_src_0565_0565_process_pixblock_tail, \
+    pixman_composite_src_0565_0565_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_8_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_8_process_pixblock_tail_head
+    st1         {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_W], 32
+.endm
+
+.macro pixman_composite_src_n_8_init
+    mov         v0.s[0], w4
+    dup         v3.8b, v0.b[0]
+    dup         v2.8b, v0.b[0]
+    dup         v1.8b, v0.b[0]
+    dup         v0.8b, v0.b[0]
+.endm
+
+.macro pixman_composite_src_n_8_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_8_asm_neon, 0, 0, 8, \
+    FLAG_DST_WRITEONLY, \
+    32, /* number of pixels, processed in a single block */ \
+    0,  /* prefetch distance */ \
+    pixman_composite_src_n_8_init, \
+    pixman_composite_src_n_8_cleanup, \
+    pixman_composite_src_n_8_process_pixblock_head, \
+    pixman_composite_src_n_8_process_pixblock_tail, \
+    pixman_composite_src_n_8_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_0565_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_0565_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_0565_process_pixblock_tail_head
+    st1     {v0.4h, v1.4h, v2.4h, v3.4h}, [DST_W], #32
+.endm
+
+.macro pixman_composite_src_n_0565_init
+    mov         v0.s[0], w4
+    dup         v3.4h, v0.h[0]
+    dup         v2.4h, v0.h[0]
+    dup         v1.4h, v0.h[0]
+    dup         v0.4h, v0.h[0]
+.endm
+
+.macro pixman_composite_src_n_0565_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_0565_asm_neon, 0, 0, 16, \
+    FLAG_DST_WRITEONLY, \
+    16, /* number of pixels, processed in a single block */ \
+    0,  /* prefetch distance */ \
+    pixman_composite_src_n_0565_init, \
+    pixman_composite_src_n_0565_cleanup, \
+    pixman_composite_src_n_0565_process_pixblock_head, \
+    pixman_composite_src_n_0565_process_pixblock_tail, \
+    pixman_composite_src_n_0565_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_n_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_n_8888_process_pixblock_tail_head
+    st1         {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32
+.endm
+
+.macro pixman_composite_src_n_8888_init
+    mov         v0.s[0], w4
+    dup         v3.2s, v0.s[0]
+    dup         v2.2s, v0.s[0]
+    dup         v1.2s, v0.s[0]
+    dup         v0.2s, v0.s[0]
+.endm
+
+.macro pixman_composite_src_n_8888_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_8888_asm_neon, 0, 0, 32, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    0, /* prefetch distance */ \
+    pixman_composite_src_n_8888_init, \
+    pixman_composite_src_n_8888_cleanup, \
+    pixman_composite_src_n_8888_process_pixblock_head, \
+    pixman_composite_src_n_8888_process_pixblock_tail, \
+    pixman_composite_src_n_8888_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_8888_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_8888_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_8888_8888_process_pixblock_tail_head
+    st1  {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32
+    fetch_src_pixblock
+    cache_preload 8, 8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_8888_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_8888_8888_process_pixblock_head, \
+    pixman_composite_src_8888_8888_process_pixblock_tail, \
+    pixman_composite_src_8888_8888_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_x888_8888_process_pixblock_head
+    orr      v0.8b, v0.8b, v4.8b
+    orr      v1.8b, v1.8b, v4.8b
+    orr      v2.8b, v2.8b, v4.8b
+    orr      v3.8b, v3.8b, v4.8b
+.endm
+
+.macro pixman_composite_src_x888_8888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_x888_8888_process_pixblock_tail_head
+    st1      {v0.2s, v1.2s, v2.2s, v3.2s}, [DST_W], #32
+    fetch_src_pixblock
+    orr      v0.8b, v0.8b, v4.8b
+    orr      v1.8b, v1.8b, v4.8b
+    orr      v2.8b, v2.8b, v4.8b
+    orr      v3.8b, v3.8b, v4.8b
+    cache_preload 8, 8
+.endm
+
+.macro pixman_composite_src_x888_8888_init
+    movi    v4.2s, #0xff, lsl 24
+.endm
+
+generate_composite_function \
+    pixman_composite_src_x888_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    pixman_composite_src_x888_8888_init, \
+    default_cleanup, \
+    pixman_composite_src_x888_8888_process_pixblock_head, \
+    pixman_composite_src_x888_8888_process_pixblock_tail, \
+    pixman_composite_src_x888_8888_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8_8888_process_pixblock_head
+    /* expecting solid source in {v0, v1, v2, v3} */
+    /* mask is in v24 (v25, v26, v27 are unused) */
+
+    /* in */
+    umull       v8.8h,  v24.8b, v0.8b
+    umull       v9.8h,  v24.8b, v1.8b
+    umull       v10.8h, v24.8b, v2.8b
+    umull       v11.8h, v24.8b, v3.8b
+    ursra       v8.8h,  v8.8h, #8
+    ursra       v9.8h,  v9.8h, #8
+    ursra       v10.8h, v10.8h, #8
+    ursra       v11.8h, v11.8h, #8
+.endm
+
+.macro pixman_composite_src_n_8_8888_process_pixblock_tail
+    rshrn       v28.8b, v8.8h, #8
+    rshrn       v29.8b, v9.8h, #8
+    rshrn       v30.8b, v10.8h, #8
+    rshrn       v31.8b, v11.8h, #8
+.endm
+
+.macro pixman_composite_src_n_8_8888_process_pixblock_tail_head
+    fetch_mask_pixblock
+                                    PF add PF_X, PF_X, #8
+        rshrn       v28.8b, v8.8h, #8
+                                    PF tst PF_CTL, #0x0F
+        rshrn       v29.8b, v9.8h, #8
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+10:
+        rshrn      v30.8b, v10.8h, #8
+                                    PF beq 10f
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        rshrn      v31.8b, v11.8h, #8
+                                    PF cmp PF_X, ORIG_W
+    umull          v8.8h, v24.8b, v0.8b
+                                    PF lsl DUMMY, PF_X, #mask_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_MASK, DUMMY]
+    umull          v9.8h, v24.8b, v1.8b
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+10:
+    umull          v10.8h, v24.8b, v2.8b
+                                    PF ble 10f
+                                    PF subs PF_CTL, PF_CTL, #0x10
+10:
+    umull          v11.8h, v24.8b, v3.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, MASK_STRIDE, #mask_bpp_shift
+                                    PF ldrsb DUMMY, [PF_MASK, DUMMY]
+                                    PF add PF_MASK, PF_MASK, #1
+10:
+        st4        {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+    ursra       v8.8h, v8.8h, #8
+    ursra       v9.8h, v9.8h, #8
+    ursra       v10.8h, v10.8h, #8
+    ursra       v11.8h, v11.8h, #8
+.endm
+
+.macro pixman_composite_src_n_8_8888_init
+    mov         v3.s[0], w4
+    dup         v0.8b, v3.b[0]
+    dup         v1.8b, v3.b[1]
+    dup         v2.8b, v3.b[2]
+    dup         v3.8b, v3.b[3]
+.endm
+
+.macro pixman_composite_src_n_8_8888_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_8_8888_asm_neon, 0, 8, 32, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_src_n_8_8888_init, \
+    pixman_composite_src_n_8_8888_cleanup, \
+    pixman_composite_src_n_8_8888_process_pixblock_head, \
+    pixman_composite_src_n_8_8888_process_pixblock_tail, \
+    pixman_composite_src_n_8_8888_process_pixblock_tail_head, \
+
+/******************************************************************************/
+
+.macro pixman_composite_src_n_8_8_process_pixblock_head
+    umull       v0.8h, v24.8b, v16.8b
+    umull       v1.8h, v25.8b, v16.8b
+    umull       v2.8h, v26.8b, v16.8b
+    umull       v3.8h, v27.8b, v16.8b
+    ursra       v0.8h, v0.8h,  #8
+    ursra       v1.8h, v1.8h,  #8
+    ursra       v2.8h, v2.8h,  #8
+    ursra       v3.8h, v3.8h,  #8
+.endm
+
+.macro pixman_composite_src_n_8_8_process_pixblock_tail
+    rshrn       v28.8b, v0.8h, #8
+    rshrn       v29.8b, v1.8h, #8
+    rshrn       v30.8b, v2.8h, #8
+    rshrn       v31.8b, v3.8h, #8
+.endm
+
+.macro pixman_composite_src_n_8_8_process_pixblock_tail_head
+    fetch_mask_pixblock
+                                    PF add PF_X, PF_X, #8
+        rshrn       v28.8b, v0.8h, #8
+                                    PF tst PF_CTL, #0x0F
+        rshrn       v29.8b, v1.8h, #8
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+10:
+        rshrn       v30.8b, v2.8h, #8
+                                    PF beq 10f
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        rshrn       v31.8b, v3.8h, #8
+                                    PF cmp PF_X, ORIG_W
+    umull       v0.8h,  v24.8b, v16.8b
+                                    PF lsl DUMMY, PF_X, mask_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_MASK, DUMMY]
+    umull       v1.8h,  v25.8b, v16.8b
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+10:
+    umull       v2.8h,  v26.8b, v16.8b
+                                    PF ble 10f
+                                    PF subs PF_CTL, PF_CTL, #0x10
+10:
+    umull       v3.8h,  v27.8b, v16.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, MASK_STRIDE, #mask_bpp_shift
+                                    PF ldrsb DUMMY, [PF_MASK, DUMMY]
+                                    PF add PF_MASK, PF_MASK, #1
+10:
+        st1         {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+    ursra       v0.8h, v0.8h,  #8
+    ursra       v1.8h, v1.8h,  #8
+    ursra       v2.8h, v2.8h,  #8
+    ursra       v3.8h, v3.8h,  #8
+.endm
+
+.macro pixman_composite_src_n_8_8_init
+    mov         v16.s[0], w4
+    dup         v16.8b, v16.b[3]
+.endm
+
+.macro pixman_composite_src_n_8_8_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_src_n_8_8_asm_neon, 0, 8, 8, \
+    FLAG_DST_WRITEONLY, \
+    32, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_src_n_8_8_init, \
+    pixman_composite_src_n_8_8_cleanup, \
+    pixman_composite_src_n_8_8_process_pixblock_head, \
+    pixman_composite_src_n_8_8_process_pixblock_tail, \
+    pixman_composite_src_n_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8_8888_process_pixblock_head
+    /* expecting deinterleaved source data in {v8, v9, v10, v11} */
+    /* v8 - blue, v9 - green, v10 - red, v11 - alpha */
+    /* and destination data in {v4, v5, v6, v7} */
+    /* mask is in v24 (v25, v26, v27 are unused) */
+
+    /* in */
+    umull       v12.8h, v24.8b, v8.8b
+    umull       v13.8h, v24.8b, v9.8b
+    umull       v14.8h, v24.8b, v10.8b
+    umull       v15.8h, v24.8b, v11.8b
+    urshr       v16.8h, v12.8h, #8
+    urshr       v17.8h, v13.8h, #8
+    urshr       v18.8h, v14.8h, #8
+    urshr       v19.8h, v15.8h, #8
+    raddhn      v0.8b, v12.8h, v16.8h
+    raddhn      v1.8b, v13.8h, v17.8h
+    raddhn      v2.8b, v14.8h, v18.8h
+    raddhn      v3.8b, v15.8h, v19.8h
+    mvn         v25.8b, v3.8b  /* get inverted alpha */
+    /* source:      v0 - blue, v1 - green, v2 - red, v3 - alpha */
+    /* destination: v4 - blue, v5 - green, v6 - red, v7 - alpha */
+    /* now do alpha blending */
+    umull       v12.8h, v25.8b, v4.8b
+    umull       v13.8h, v25.8b, v5.8b
+    umull       v14.8h, v25.8b, v6.8b
+    umull       v15.8h, v25.8b, v7.8b
+.endm
+
+.macro pixman_composite_over_n_8_8888_process_pixblock_tail
+    urshr       v16.8h, v12.8h, #8
+    urshr       v17.8h, v13.8h, #8
+    urshr       v18.8h, v14.8h, #8
+    urshr       v19.8h, v15.8h, #8
+    raddhn      v28.8b, v16.8h, v12.8h
+    raddhn      v29.8b, v17.8h, v13.8h
+    raddhn      v30.8b, v18.8h, v14.8h
+    raddhn      v31.8b, v19.8h, v15.8h
+    uqadd       v28.8b, v0.8b, v28.8b
+    uqadd       v29.8b, v1.8b, v29.8b
+    uqadd       v30.8b, v2.8b, v30.8b
+    uqadd       v31.8b, v3.8b, v31.8b
+.endm
+
+.macro pixman_composite_over_n_8_8888_process_pixblock_tail_head
+        urshr       v16.8h, v12.8h, #8
+     ld4        {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+        urshr       v17.8h, v13.8h, #8
+    fetch_mask_pixblock
+        urshr       v18.8h, v14.8h, #8
+                                    PF add PF_X, PF_X, #8
+        urshr       v19.8h, v15.8h, #8
+                                    PF tst PF_CTL, #0x0F
+        raddhn      v28.8b, v16.8h, v12.8h
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+10:
+        raddhn      v29.8b, v17.8h, v13.8h
+                                    PF beq 10f
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        raddhn      v30.8b, v18.8h, v14.8h
+                                    PF cmp PF_X, ORIG_W
+        raddhn      v31.8b, v19.8h, v15.8h
+                                    PF lsl DUMMY, PF_X, #dst_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
+    umull       v16.8h, v24.8b, v8.8b
+                                    PF lsl DUMMY, PF_X, #mask_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_MASK, DUMMY]
+    umull       v17.8h, v24.8b, v9.8b
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+10:
+    umull       v18.8h, v24.8b, v10.8b
+                                    PF ble 10f
+                                    PF subs PF_CTL, PF_CTL, #0x10
+10:
+    umull       v19.8h, v24.8b, v11.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
+                                    PF ldrsb DUMMY, [PF_DST, DUMMY]
+                                    PF add PF_DST, PF_DST, #1
+10:
+        uqadd       v28.8b, v0.8b, v28.8b
+                                    PF ble 10f
+                                    PF lsl DUMMY, MASK_STRIDE, #mask_bpp_shift
+                                    PF ldrsb DUMMY, [PF_MASK, DUMMY]
+                                    PF add PF_MASK, PF_MASK, #1
+10:
+        uqadd        v29.8b, v1.8b, v29.8b
+        uqadd        v30.8b, v2.8b, v30.8b
+        uqadd        v31.8b, v3.8b, v31.8b
+    urshr       v12.8h, v16.8h, #8
+    urshr       v13.8h, v17.8h, #8
+    urshr       v14.8h, v18.8h, #8
+    urshr       v15.8h, v19.8h, #8
+    raddhn      v0.8b, v16.8h, v12.8h
+    raddhn      v1.8b, v17.8h, v13.8h
+    raddhn      v2.8b, v18.8h, v14.8h
+    raddhn      v3.8b, v19.8h, v15.8h
+        st4          {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+    mvn         v25.8b, v3.8b
+    umull       v12.8h, v25.8b, v4.8b
+    umull       v13.8h, v25.8b, v5.8b
+    umull       v14.8h, v25.8b, v6.8b
+    umull       v15.8h, v25.8b, v7.8b
+.endm
+
+.macro pixman_composite_over_n_8_8888_init
+    mov         v11.s[0], w4
+    dup         v8.8b, v11.b[0]
+    dup         v9.8b, v11.b[1]
+    dup         v10.8b, v11.b[2]
+    dup         v11.8b, v11.b[3]
+.endm
+
+.macro pixman_composite_over_n_8_8888_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8_8888_asm_neon, 0, 8, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8_8888_init, \
+    pixman_composite_over_n_8_8888_cleanup, \
+    pixman_composite_over_n_8_8888_process_pixblock_head, \
+    pixman_composite_over_n_8_8888_process_pixblock_tail, \
+    pixman_composite_over_n_8_8888_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8_8_process_pixblock_head
+    umull       v0.8h,  v24.8b, v8.8b
+    umull       v1.8h,  v25.8b, v8.8b
+    umull       v2.8h,  v26.8b, v8.8b
+    umull       v3.8h,  v27.8b, v8.8b
+    urshr       v10.8h, v0.8h,  #8
+    urshr       v11.8h, v1.8h,  #8
+    urshr       v12.8h, v2.8h,  #8
+    urshr       v13.8h, v3.8h,  #8
+    raddhn      v0.8b,  v0.8h,  v10.8h
+    raddhn      v1.8b,  v1.8h,  v11.8h
+    raddhn      v2.8b,  v2.8h,  v12.8h
+    raddhn      v3.8b,  v3.8h,  v13.8h
+    mvn         v24.8b, v0.8b
+    mvn         v25.8b, v1.8b
+    mvn         v26.8b, v2.8b
+    mvn         v27.8b, v3.8b
+    umull       v10.8h, v24.8b, v4.8b
+    umull       v11.8h, v25.8b, v5.8b
+    umull       v12.8h, v26.8b, v6.8b
+    umull       v13.8h, v27.8b, v7.8b
+.endm
+
+.macro pixman_composite_over_n_8_8_process_pixblock_tail
+    urshr       v14.8h, v10.8h,  #8
+    urshr       v15.8h, v11.8h,  #8
+    urshr       v16.8h, v12.8h, #8
+    urshr       v17.8h, v13.8h, #8
+    raddhn      v28.8b, v14.8h, v10.8h
+    raddhn      v29.8b, v15.8h, v11.8h
+    raddhn      v30.8b, v16.8h, v12.8h
+    raddhn      v31.8b, v17.8h, v13.8h
+    uqadd       v28.8b, v0.8b,  v28.8b
+    uqadd       v29.8b, v1.8b,  v29.8b
+    uqadd       v30.8b, v2.8b,  v30.8b
+    uqadd       v31.8b, v3.8b,  v31.8b
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_n_8_8_process_pixblock_tail_head
+    ld1         {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+    pixman_composite_over_n_8_8_process_pixblock_tail
+    fetch_mask_pixblock
+    cache_preload 32, 32
+    st1         {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+    pixman_composite_over_n_8_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_over_n_8_8_init
+    mov         v8.s[0], w4
+    dup         v8.8b, v8.b[3]
+.endm
+
+.macro pixman_composite_over_n_8_8_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8_8_asm_neon, 0, 8, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8_8_init, \
+    pixman_composite_over_n_8_8_cleanup, \
+    pixman_composite_over_n_8_8_process_pixblock_head, \
+    pixman_composite_over_n_8_8_process_pixblock_tail, \
+    pixman_composite_over_n_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8888_8888_ca_process_pixblock_head
+    /*
+     * 'combine_mask_ca' replacement
+     *
+     * input:  solid src (n) in {v8,  v9,  v10, v11}
+     *         dest in          {v4,  v5,  v6,  v7 }
+     *         mask in          {v24, v25, v26, v27}
+     * output: updated src in   {v0,  v1,  v2,  v3 }
+     *         updated mask in  {v24, v25, v26, v3 }
+     */
+    umull       v0.8h,  v24.8b, v8.8b
+    umull       v1.8h,  v25.8b, v9.8b
+    umull       v2.8h,  v26.8b, v10.8b
+    umull       v3.8h,  v27.8b, v11.8b
+    umull       v12.8h, v11.8b, v25.8b
+    umull       v13.8h, v11.8b, v24.8b
+    umull       v14.8h, v11.8b, v26.8b
+    urshr       v15.8h, v0.8h,  #8
+    urshr       v16.8h, v1.8h,  #8
+    urshr       v17.8h, v2.8h,  #8
+    raddhn      v0.8b,  v0.8h,  v15.8h
+    raddhn      v1.8b,  v1.8h,  v16.8h
+    raddhn      v2.8b,  v2.8h,  v17.8h
+    urshr       v15.8h, v13.8h, #8
+    urshr       v16.8h, v12.8h, #8
+    urshr       v17.8h, v14.8h, #8
+    urshr       v18.8h, v3.8h,  #8
+    raddhn      v24.8b, v13.8h, v15.8h
+    raddhn      v25.8b, v12.8h, v16.8h
+    raddhn      v26.8b, v14.8h, v17.8h
+    raddhn      v3.8b,  v3.8h,  v18.8h
+    /*
+     * 'combine_over_ca' replacement
+     *
+     * output: updated dest in {v28, v29, v30, v31}
+     */
+    mvn         v24.8b, v24.8b
+    mvn         v25.8b, v25.8b
+    mvn         v26.8b, v26.8b
+    mvn         v27.8b, v3.8b
+    umull       v12.8h, v24.8b, v4.8b
+    umull       v13.8h, v25.8b, v5.8b
+    umull       v14.8h, v26.8b, v6.8b
+    umull       v15.8h, v27.8b, v7.8b
+.endm
+
+.macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail
+    /* ... continue 'combine_over_ca' replacement */
+    urshr       v16.8h, v12.8h, #8
+    urshr       v17.8h, v13.8h, #8
+    urshr       v18.8h, v14.8h, #8
+    urshr       v19.8h, v15.8h, #8
+    raddhn      v28.8b, v16.8h, v12.8h
+    raddhn      v29.8b, v17.8h, v13.8h
+    raddhn      v30.8b, v18.8h, v14.8h
+    raddhn      v31.8b, v19.8h, v15.8h
+    uqadd       v28.8b, v0.8b,  v28.8b
+    uqadd       v29.8b, v1.8b,  v29.8b
+    uqadd       v30.8b, v2.8b,  v30.8b
+    uqadd       v31.8b, v3.8b,  v31.8b
+.endm
+
+.macro pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head
+        urshr       v16.8h, v12.8h, #8
+        urshr       v17.8h, v13.8h, #8
+    ld4         {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+        urshr       v18.8h, v14.8h, #8
+        urshr       v19.8h, v15.8h, #8
+        raddhn      v28.8b, v16.8h, v12.8h
+        raddhn      v29.8b, v17.8h, v13.8h
+        raddhn      v30.8b, v18.8h, v14.8h
+        raddhn      v31.8b, v19.8h, v15.8h
+    fetch_mask_pixblock
+        uqadd       v28.8b, v0.8b, v28.8b
+        uqadd       v29.8b, v1.8b, v29.8b
+        uqadd       v30.8b, v2.8b, v30.8b
+        uqadd       v31.8b, v3.8b, v31.8b
+    cache_preload 8, 8
+    pixman_composite_over_n_8888_8888_ca_process_pixblock_head
+    st4         {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+.endm
+
+.macro pixman_composite_over_n_8888_8888_ca_init
+    mov         v13.s[0], w4
+    dup         v8.8b, v13.b[0]
+    dup         v9.8b, v13.b[1]
+    dup         v10.8b, v13.b[2]
+    dup         v11.8b, v13.b[3]
+.endm
+
+.macro pixman_composite_over_n_8888_8888_ca_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8888_8888_ca_asm_neon, 0, 32, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8888_8888_ca_init, \
+    pixman_composite_over_n_8888_8888_ca_cleanup, \
+    pixman_composite_over_n_8888_8888_ca_process_pixblock_head, \
+    pixman_composite_over_n_8888_8888_ca_process_pixblock_tail, \
+    pixman_composite_over_n_8888_8888_ca_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_head
+    /*
+     * 'combine_mask_ca' replacement
+     *
+     * input:  solid src (n) in {v8,  v9,  v10, v11}  [B, G, R, A]
+     *         mask in          {v24, v25, v26}       [B, G, R]
+     * output: updated src in   {v0,  v1,  v2 }       [B, G, R]
+     *         updated mask in  {v24, v25, v26}       [B, G, R]
+     */
+    umull       v0.8h,  v24.8b, v8.8b
+    umull       v1.8h,  v25.8b, v9.8b
+    umull       v2.8h,  v26.8b, v10.8b
+    umull       v12.8h, v11.8b, v24.8b
+    umull       v13.8h, v11.8b, v25.8b
+    umull       v14.8h, v11.8b, v26.8b
+    urshr       v15.8h, v0.8h,  #8
+    urshr       v16.8h, v1.8h,  #8
+    urshr       v17.8h, v2.8h,  #8
+    raddhn      v0.8b,  v0.8h,  v15.8h
+    raddhn      v1.8b,  v1.8h,  v16.8h
+    raddhn      v2.8b,  v2.8h,  v17.8h
+    urshr       v19.8h, v12.8h, #8
+    urshr       v20.8h, v13.8h, #8
+    urshr       v21.8h, v14.8h, #8
+    raddhn      v24.8b, v12.8h, v19.8h
+    raddhn      v25.8b, v13.8h, v20.8h
+    /*
+     * convert 8 r5g6b5 pixel data from {v4} to planar 8-bit format
+     * and put data into v16 - blue, v17 - green, v18 - red
+     */
+       mov         v4.d[1], v5.d[0]
+       shrn        v17.8b, v4.8h,  #3
+       shrn        v18.8b, v4.8h,  #8
+    raddhn      v26.8b, v14.8h, v21.8h
+       sli         v4.8h,  v4.8h,  #5
+       sri         v18.8b, v18.8b, #5
+       sri         v17.8b, v17.8b, #6
+    /*
+     * 'combine_over_ca' replacement
+     *
+     * output: updated dest in v16 - blue, v17 - green, v18 - red
+     */
+    mvn         v24.8b, v24.8b
+    mvn         v25.8b, v25.8b
+       shrn       v16.8b, v4.8h,  #2
+    mvn         v26.8b, v26.8b
+    umull       v5.8h, v16.8b, v24.8b
+    umull       v6.8h, v17.8b, v25.8b
+    umull       v7.8h, v18.8b, v26.8b
+.endm
+
+.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail
+    /* ... continue 'combine_over_ca' replacement */
+    urshr       v13.8h, v5.8h, #8
+    urshr       v14.8h, v6.8h, #8
+    urshr       v15.8h, v7.8h, #8
+    raddhn      v16.8b, v13.8h, v5.8h
+    raddhn      v17.8b, v14.8h, v6.8h
+    raddhn      v18.8b, v15.8h, v7.8h
+    uqadd       v16.8b, v0.8b, v16.8b
+    uqadd       v17.8b, v1.8b, v17.8b
+    uqadd       v18.8b, v2.8b, v18.8b
+    /*
+     * convert the results in v16, v17, v18 to r5g6b5 and store
+     * them into {v14}
+     */
+    ushll       v14.8h, v18.8b, #7
+    sli         v14.8h, v14.8h, #1
+    ushll       v12.8h, v17.8b, #7
+    sli         v12.8h, v12.8h, #1
+    ushll       v13.8h, v16.8b, #7
+    sli         v13.8h, v13.8h, #1
+    sri         v14.8h, v12.8h, #5
+    sri         v14.8h, v13.8h, #11
+    mov         v28.d[0], v14.d[0]
+    mov         v29.d[0], v14.d[1]
+.endm
+
+.macro pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head
+    fetch_mask_pixblock
+        urshr       v13.8h, v5.8h, #8
+        urshr       v14.8h, v6.8h, #8
+    ld1         {v4.8h}, [DST_R], #16
+        urshr       v15.8h, v7.8h, #8
+        raddhn      v16.8b, v13.8h, v5.8h
+        raddhn      v17.8b, v14.8h, v6.8h
+        raddhn      v18.8b, v15.8h, v7.8h
+    mov         v5.d[0], v4.d[1]
+            /* process_pixblock_head */
+            /*
+             * 'combine_mask_ca' replacement
+             *
+             * input:  solid src (n) in {v8,  v9,  v10, v11}  [B, G, R, A]
+             *         mask in          {v24, v25, v26}       [B, G, R]
+             * output: updated src in   {v0,  v1,  v2 }       [B, G, R]
+             *         updated mask in  {v24, v25, v26}       [B, G, R]
+             */
+        uqadd       v16.8b, v0.8b, v16.8b
+        uqadd       v17.8b, v1.8b, v17.8b
+        uqadd       v18.8b, v2.8b, v18.8b
+            umull       v0.8h,  v24.8b, v8.8b
+            umull       v1.8h,  v25.8b, v9.8b
+            umull       v2.8h,  v26.8b, v10.8b
+        /*
+         * convert the result in v16, v17, v18 to r5g6b5 and store
+         * it into {v14}
+         */
+        ushll       v14.8h, v18.8b, #7
+        sli         v14.8h, v14.8h, #1
+        ushll       v18.8h, v16.8b, #7
+        sli         v18.8h, v18.8h, #1
+        ushll       v19.8h, v17.8b, #7
+        sli         v19.8h, v19.8h, #1
+            umull       v12.8h, v11.8b, v24.8b
+        sri         v14.8h, v19.8h, #5
+            umull       v13.8h, v11.8b, v25.8b
+            umull       v15.8h, v11.8b, v26.8b
+        sri         v14.8h, v18.8h, #11
+        mov         v28.d[0], v14.d[0]
+        mov         v29.d[0], v14.d[1]
+    cache_preload 8, 8
+            urshr       v16.8h, v0.8h,  #8
+            urshr       v17.8h, v1.8h,  #8
+            urshr       v18.8h, v2.8h,  #8
+            raddhn      v0.8b,  v0.8h,  v16.8h
+            raddhn      v1.8b,  v1.8h,  v17.8h
+            raddhn      v2.8b,  v2.8h,  v18.8h
+            urshr       v19.8h, v12.8h, #8
+            urshr       v20.8h, v13.8h, #8
+            urshr       v21.8h, v15.8h, #8
+            raddhn      v24.8b, v12.8h, v19.8h
+            raddhn      v25.8b, v13.8h, v20.8h
+                /*
+                 * convert 8 r5g6b5 pixel data from {v4, v5} to planar
+             * 8-bit format and put data into v16 - blue, v17 - green,
+             * v18 - red
+                 */
+		mov         v4.d[1], v5.d[0]
+                shrn        v17.8b, v4.8h,  #3
+                shrn        v18.8b, v4.8h,  #8
+            raddhn      v26.8b, v15.8h, v21.8h
+                sli         v4.8h,  v4.8h,  #5
+                sri         v17.8b, v17.8b, #6
+                sri         v18.8b, v18.8b, #5
+            /*
+             * 'combine_over_ca' replacement
+             *
+             * output: updated dest in v16 - blue, v17 - green, v18 - red
+             */
+            mvn         v24.8b, v24.8b
+            mvn         v25.8b, v25.8b
+                shrn        v16.8b, v4.8h,  #2
+            mvn         v26.8b, v26.8b
+            umull       v5.8h, v16.8b, v24.8b
+            umull       v6.8h, v17.8b, v25.8b
+            umull       v7.8h, v18.8b, v26.8b
+    st1         {v14.8h}, [DST_W], #16
+.endm
+
+.macro pixman_composite_over_n_8888_0565_ca_init
+    mov         v13.s[0], w4
+    dup         v8.8b, v13.b[0]
+    dup         v9.8b, v13.b[1]
+    dup         v10.8b, v13.b[2]
+    dup         v11.8b, v13.b[3]
+.endm
+
+.macro pixman_composite_over_n_8888_0565_ca_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_over_n_8888_0565_ca_asm_neon, 0, 32, 16, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_n_8888_0565_ca_init, \
+    pixman_composite_over_n_8888_0565_ca_cleanup, \
+    pixman_composite_over_n_8888_0565_ca_process_pixblock_head, \
+    pixman_composite_over_n_8888_0565_ca_process_pixblock_tail, \
+    pixman_composite_over_n_8888_0565_ca_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_in_n_8_process_pixblock_head
+    /* expecting source data in {v0, v1, v2, v3} */
+    /* and destination data in {v4, v5, v6, v7} */
+    umull       v8.8h,  v4.8b,  v3.8b
+    umull       v9.8h,  v5.8b,  v3.8b
+    umull       v10.8h, v6.8b,  v3.8b
+    umull       v11.8h, v7.8b,  v3.8b
+.endm
+
+.macro pixman_composite_in_n_8_process_pixblock_tail
+    urshr       v14.8h, v8.8h,  #8
+    urshr       v15.8h, v9.8h,  #8
+    urshr       v12.8h, v10.8h, #8
+    urshr       v13.8h, v11.8h, #8
+    raddhn      v28.8b, v8.8h,  v14.8h
+    raddhn      v29.8b, v9.8h,  v15.8h
+    raddhn      v30.8b, v10.8h, v12.8h
+    raddhn      v31.8b, v11.8h, v13.8h
+.endm
+
+.macro pixman_composite_in_n_8_process_pixblock_tail_head
+    pixman_composite_in_n_8_process_pixblock_tail
+    ld1         {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+    cache_preload 32, 32
+    pixman_composite_in_n_8_process_pixblock_head
+    st1         {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+.endm
+
+.macro pixman_composite_in_n_8_init
+    mov         v3.s[0], w4
+    dup         v3.8b, v3.b[3]
+.endm
+
+.macro pixman_composite_in_n_8_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_in_n_8_asm_neon, 0, 0, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_in_n_8_init, \
+    pixman_composite_in_n_8_cleanup, \
+    pixman_composite_in_n_8_process_pixblock_head, \
+    pixman_composite_in_n_8_process_pixblock_tail, \
+    pixman_composite_in_n_8_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+.macro pixman_composite_add_n_8_8_process_pixblock_head
+    /* expecting source data in {v8, v9, v10, v11} */
+    /* v8 - blue, v9 - green, v10 - red, v11 - alpha */
+    /* and destination data in {v4, v5, v6, v7} */
+    /* mask is in v24, v25, v26, v27 */
+    umull       v0.8h, v24.8b, v11.8b
+    umull       v1.8h, v25.8b, v11.8b
+    umull       v2.8h, v26.8b, v11.8b
+    umull       v3.8h, v27.8b, v11.8b
+    urshr       v12.8h, v0.8h, #8
+    urshr       v13.8h, v1.8h, #8
+    urshr       v14.8h, v2.8h, #8
+    urshr       v15.8h, v3.8h, #8
+    raddhn      v0.8b, v0.8h, v12.8h
+    raddhn      v1.8b, v1.8h, v13.8h
+    raddhn      v2.8b, v2.8h, v14.8h
+    raddhn      v3.8b, v3.8h, v15.8h
+    uqadd       v28.8b, v0.8b, v4.8b
+    uqadd       v29.8b, v1.8b, v5.8b
+    uqadd       v30.8b, v2.8b, v6.8b
+    uqadd       v31.8b, v3.8b, v7.8b
+.endm
+
+.macro pixman_composite_add_n_8_8_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_add_n_8_8_process_pixblock_tail_head
+    pixman_composite_add_n_8_8_process_pixblock_tail
+    st1         {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+    ld1         {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+    fetch_mask_pixblock
+    cache_preload 32, 32
+    pixman_composite_add_n_8_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_add_n_8_8_init
+    mov         v11.s[0], w4
+    dup         v11.8b, v11.b[3]
+.endm
+
+.macro pixman_composite_add_n_8_8_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_add_n_8_8_asm_neon, 0, 8, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_add_n_8_8_init, \
+    pixman_composite_add_n_8_8_cleanup, \
+    pixman_composite_add_n_8_8_process_pixblock_head, \
+    pixman_composite_add_n_8_8_process_pixblock_tail, \
+    pixman_composite_add_n_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8_8_8_process_pixblock_head
+    /* expecting source data in {v0, v1, v2, v3} */
+    /* destination data in {v4, v5, v6, v7} */
+    /* mask in {v24, v25, v26, v27} */
+    umull       v8.8h, v24.8b, v0.8b
+    umull       v9.8h, v25.8b, v1.8b
+    umull       v10.8h, v26.8b, v2.8b
+    umull       v11.8h, v27.8b, v3.8b
+    urshr       v0.8h, v8.8h, #8
+    urshr       v1.8h, v9.8h, #8
+    urshr       v12.8h, v10.8h, #8
+    urshr       v13.8h, v11.8h, #8
+    raddhn      v0.8b, v0.8h, v8.8h
+    raddhn      v1.8b, v1.8h, v9.8h
+    raddhn      v2.8b, v12.8h, v10.8h
+    raddhn      v3.8b, v13.8h, v11.8h
+    uqadd       v28.8b, v0.8b, v4.8b
+    uqadd       v29.8b, v1.8b, v5.8b
+    uqadd       v30.8b, v2.8b, v6.8b
+    uqadd       v31.8b, v3.8b, v7.8b
+.endm
+
+.macro pixman_composite_add_8_8_8_process_pixblock_tail
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_add_8_8_8_process_pixblock_tail_head
+    pixman_composite_add_8_8_8_process_pixblock_tail
+    st1         {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+    ld1         {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+    fetch_mask_pixblock
+    fetch_src_pixblock
+    cache_preload 32, 32
+    pixman_composite_add_8_8_8_process_pixblock_head
+.endm
+
+.macro pixman_composite_add_8_8_8_init
+.endm
+
+.macro pixman_composite_add_8_8_8_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8_8_8_asm_neon, 8, 8, 8, \
+    FLAG_DST_READWRITE, \
+    32, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_add_8_8_8_init, \
+    pixman_composite_add_8_8_8_cleanup, \
+    pixman_composite_add_8_8_8_process_pixblock_head, \
+    pixman_composite_add_8_8_8_process_pixblock_tail, \
+    pixman_composite_add_8_8_8_process_pixblock_tail_head
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8888_8888_8888_process_pixblock_head
+    /* expecting source data in {v0, v1, v2, v3} */
+    /* destination data in {v4, v5, v6, v7} */
+    /* mask in {v24, v25, v26, v27} */
+    umull       v8.8h,  v27.8b, v0.8b
+    umull       v9.8h,  v27.8b, v1.8b
+    umull       v10.8h, v27.8b, v2.8b
+    umull       v11.8h, v27.8b, v3.8b
+    /* 1 cycle bubble */
+    ursra       v8.8h,  v8.8h,  #8
+    ursra       v9.8h,  v9.8h,  #8
+    ursra       v10.8h, v10.8h, #8
+    ursra       v11.8h, v11.8h, #8
+.endm
+
+.macro pixman_composite_add_8888_8888_8888_process_pixblock_tail
+    /* 2 cycle bubble */
+    rshrn       v28.8b, v8.8h,  #8
+    rshrn       v29.8b, v9.8h,  #8
+    rshrn       v30.8b, v10.8h, #8
+    rshrn       v31.8b, v11.8h, #8
+    uqadd       v28.8b, v4.8b,  v28.8b
+    uqadd       v29.8b, v5.8b,  v29.8b
+    uqadd       v30.8b, v6.8b,  v30.8b
+    uqadd       v31.8b, v7.8b,  v31.8b
+.endm
+
+.macro pixman_composite_add_8888_8888_8888_process_pixblock_tail_head
+    fetch_src_pixblock
+        rshrn       v28.8b, v8.8h,  #8
+    fetch_mask_pixblock
+        rshrn       v29.8b, v9.8h,  #8
+    umull       v8.8h,  v27.8b, v0.8b
+        rshrn       v30.8b, v10.8h, #8
+    umull       v9.8h,  v27.8b, v1.8b
+        rshrn       v31.8b, v11.8h, #8
+    umull       v10.8h, v27.8b, v2.8b
+    umull       v11.8h, v27.8b, v3.8b
+        uqadd       v28.8b, v4.8b,  v28.8b
+        uqadd       v29.8b, v5.8b,  v29.8b
+        uqadd       v30.8b, v6.8b,  v30.8b
+        uqadd       v31.8b, v7.8b,  v31.8b
+    ursra       v8.8h,  v8.8h,  #8
+    ld4         {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+    ursra       v9.8h,  v9.8h,  #8
+        st4         {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+    ursra       v10.8h, v10.8h, #8
+
+    cache_preload 8, 8
+
+    ursra       v11.8h, v11.8h, #8
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8888_8888_8888_asm_neon, 32, 32, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_head, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+generate_composite_function_single_scanline \
+    pixman_composite_scanline_add_mask_asm_neon, 32, 32, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_head, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+/******************************************************************************/
+
+generate_composite_function \
+    pixman_composite_add_8888_8_8888_asm_neon, 32, 8, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_head, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    27  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_add_n_8_8888_init
+    mov         v3.s[0], w4
+    dup         v0.8b, v3.b[0]
+    dup         v1.8b, v3.b[1]
+    dup         v2.8b, v3.b[2]
+    dup         v3.8b, v3.b[3]
+.endm
+
+.macro pixman_composite_add_n_8_8888_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_add_n_8_8888_asm_neon, 0, 8, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_add_n_8_8888_init, \
+    pixman_composite_add_n_8_8888_cleanup, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_head, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    27  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_add_8888_n_8888_init
+    mov         v27.s[0], w6
+    dup         v27.8b, v27.b[3]
+.endm
+
+.macro pixman_composite_add_8888_n_8888_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_add_8888_n_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_add_8888_n_8888_init, \
+    pixman_composite_add_8888_n_8888_cleanup, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_head, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail, \
+    pixman_composite_add_8888_8888_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    27  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_head
+    /* expecting source data in {v0, v1, v2, v3} */
+    /* destination data in {v4, v5, v6, v7} */
+    /* solid mask is in v15 */
+
+    /* 'in' */
+    umull       v11.8h, v15.8b, v3.8b
+    umull       v10.8h, v15.8b, v2.8b
+    umull       v9.8h,  v15.8b, v1.8b
+    umull       v8.8h,  v15.8b, v0.8b
+    urshr       v16.8h, v11.8h, #8
+    urshr       v14.8h, v10.8h, #8
+    urshr       v13.8h,  v9.8h, #8
+    urshr       v12.8h,  v8.8h, #8
+    raddhn      v3.8b, v11.8h, v16.8h
+    raddhn      v2.8b, v10.8h, v14.8h
+    raddhn      v1.8b,  v9.8h, v13.8h
+    raddhn      v0.8b,  v8.8h, v12.8h
+    mvn         v24.8b, v3.8b  /* get inverted alpha */
+    /* now do alpha blending */
+    umull       v8.8h, v24.8b, v4.8b
+    umull       v9.8h, v24.8b, v5.8b
+    umull       v10.8h, v24.8b, v6.8b
+    umull       v11.8h, v24.8b, v7.8b
+.endm
+
+.macro pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail
+    urshr       v16.8h, v8.8h, #8
+    urshr       v17.8h, v9.8h, #8
+    urshr       v18.8h, v10.8h, #8
+    urshr       v19.8h, v11.8h, #8
+    raddhn      v28.8b, v16.8h, v8.8h
+    raddhn      v29.8b, v17.8h, v9.8h
+    raddhn      v30.8b, v18.8h, v10.8h
+    raddhn      v31.8b, v19.8h, v11.8h
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head
+    ld4        {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+    pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail
+    fetch_src_pixblock
+    cache_preload 8, 8
+    fetch_mask_pixblock
+    pixman_composite_out_reverse_8888_n_8888_process_pixblock_head
+    st4        {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+.endm
+
+generate_composite_function_single_scanline \
+    pixman_composite_scanline_out_reverse_mask_asm_neon, 32, 32, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init_need_all_regs, \
+    default_cleanup_need_all_regs, \
+    pixman_composite_out_reverse_8888_n_8888_process_pixblock_head, \
+    pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail, \
+    pixman_composite_out_reverse_8888_8888_8888_process_pixblock_tail_head \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    12  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_over_8888_n_8888_process_pixblock_head
+    pixman_composite_out_reverse_8888_n_8888_process_pixblock_head
+.endm
+
+.macro pixman_composite_over_8888_n_8888_process_pixblock_tail
+    pixman_composite_out_reverse_8888_n_8888_process_pixblock_tail
+    uqadd       v28.8b, v0.8b, v28.8b
+    uqadd       v29.8b, v1.8b, v29.8b
+    uqadd       v30.8b, v2.8b, v30.8b
+    uqadd       v31.8b, v3.8b, v31.8b
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_8888_n_8888_process_pixblock_tail_head
+    ld4        {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+    pixman_composite_over_8888_n_8888_process_pixblock_tail
+    fetch_src_pixblock
+    cache_preload 8, 8
+    pixman_composite_over_8888_n_8888_process_pixblock_head
+    st4        {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+.endm
+
+.macro pixman_composite_over_8888_n_8888_init
+    mov         v15.s[0], w6
+    dup         v15.8b, v15.b[3]
+.endm
+
+.macro pixman_composite_over_8888_n_8888_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_n_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_8888_n_8888_init, \
+    pixman_composite_over_8888_n_8888_cleanup, \
+    pixman_composite_over_8888_n_8888_process_pixblock_head, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    12  /* mask_basereg  */
+
+/******************************************************************************/
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_8888_8888_8888_process_pixblock_tail_head
+    ld4        {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+    pixman_composite_over_8888_n_8888_process_pixblock_tail
+    fetch_src_pixblock
+    cache_preload 8, 8
+    fetch_mask_pixblock
+    pixman_composite_over_8888_n_8888_process_pixblock_head
+    st4        {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_8888_8888_asm_neon, 32, 32, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init_need_all_regs, \
+    default_cleanup_need_all_regs, \
+    pixman_composite_over_8888_n_8888_process_pixblock_head, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    12  /* mask_basereg  */
+
+generate_composite_function_single_scanline \
+    pixman_composite_scanline_over_mask_asm_neon, 32, 32, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init_need_all_regs, \
+    default_cleanup_need_all_regs, \
+    pixman_composite_over_8888_n_8888_process_pixblock_head, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8888_8888_process_pixblock_tail_head \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    12  /* mask_basereg  */
+
+/******************************************************************************/
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_8888_8_8888_process_pixblock_tail_head
+    ld4        {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+    pixman_composite_over_8888_n_8888_process_pixblock_tail
+    fetch_src_pixblock
+    cache_preload 8, 8
+    fetch_mask_pixblock
+    pixman_composite_over_8888_n_8888_process_pixblock_head
+    st4        {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+.endm
+
+generate_composite_function \
+    pixman_composite_over_8888_8_8888_asm_neon, 32, 8, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init_need_all_regs, \
+    default_cleanup_need_all_regs, \
+    pixman_composite_over_8888_n_8888_process_pixblock_head, \
+    pixman_composite_over_8888_n_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8_8888_process_pixblock_tail_head \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    15  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0888_0888_process_pixblock_head
+.endm
+
+.macro pixman_composite_src_0888_0888_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_0888_0888_process_pixblock_tail_head
+    st3     {v0.8b, v1.8b, v2.8b}, [DST_W], #24
+    fetch_src_pixblock
+    cache_preload 8, 8
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0888_0888_asm_neon, 24, 0, 24, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0888_0888_process_pixblock_head, \
+    pixman_composite_src_0888_0888_process_pixblock_tail, \
+    pixman_composite_src_0888_0888_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0888_8888_rev_process_pixblock_head
+    mov    v31.8b, v2.8b
+    mov    v2.8b, v0.8b
+    mov    v0.8b, v31.8b
+.endm
+
+.macro pixman_composite_src_0888_8888_rev_process_pixblock_tail
+.endm
+
+.macro pixman_composite_src_0888_8888_rev_process_pixblock_tail_head
+    st4    {v0.8b, v1.8b, v2.8b, v3.8b}, [DST_W], #32
+    fetch_src_pixblock
+    mov    v31.8b, v2.8b
+    mov    v2.8b, v0.8b
+    mov    v0.8b, v31.8b
+    cache_preload 8, 8
+.endm
+
+.macro pixman_composite_src_0888_8888_rev_init
+    eor    v3.8b, v3.8b, v3.8b
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0888_8888_rev_asm_neon, 24, 0, 32, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    pixman_composite_src_0888_8888_rev_init, \
+    default_cleanup, \
+    pixman_composite_src_0888_8888_rev_process_pixblock_head, \
+    pixman_composite_src_0888_8888_rev_process_pixblock_tail, \
+    pixman_composite_src_0888_8888_rev_process_pixblock_tail_head, \
+    0, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_0888_0565_rev_process_pixblock_head
+    ushll       v8.8h, v1.8b, #7
+    sli         v8.8h, v8.8h, #1
+    ushll       v9.8h, v2.8b, #7
+    sli         v9.8h, v9.8h, #1
+.endm
+
+.macro pixman_composite_src_0888_0565_rev_process_pixblock_tail
+    ushll       v14.8h, v0.8b, #7
+    sli         v14.8h, v14.8h, #1
+    sri         v14.8h, v8.8h, #5
+    sri         v14.8h, v9.8h, #11
+    mov         v28.d[0], v14.d[0]
+    mov         v29.d[0], v14.d[1]
+.endm
+
+.macro pixman_composite_src_0888_0565_rev_process_pixblock_tail_head
+        ushll       v14.8h, v0.8b, #7
+        sli         v14.8h, v14.8h, #1
+    fetch_src_pixblock
+        sri         v14.8h, v8.8h, #5
+        sri         v14.8h, v9.8h, #11
+        mov         v28.d[0], v14.d[0]
+        mov         v29.d[0], v14.d[1]
+    ushll       v8.8h, v1.8b, #7
+    sli         v8.8h, v8.8h, #1
+        st1     {v14.8h}, [DST_W], #16
+    ushll       v9.8h, v2.8b, #7
+    sli         v9.8h, v9.8h, #1
+.endm
+
+generate_composite_function \
+    pixman_composite_src_0888_0565_rev_asm_neon, 24, 0, 16, \
+    FLAG_DST_WRITEONLY, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0888_0565_rev_process_pixblock_head, \
+    pixman_composite_src_0888_0565_rev_process_pixblock_tail, \
+    pixman_composite_src_0888_0565_rev_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_pixbuf_8888_process_pixblock_head
+    umull       v8.8h, v3.8b, v0.8b
+    umull       v9.8h, v3.8b, v1.8b
+    umull       v10.8h, v3.8b, v2.8b
+.endm
+
+.macro pixman_composite_src_pixbuf_8888_process_pixblock_tail
+    urshr       v11.8h, v8.8h, #8
+    mov         v30.8b, v31.8b
+    mov         v31.8b, v3.8b
+    mov         v3.8b, v30.8b
+    urshr       v12.8h, v9.8h, #8
+    urshr       v13.8h, v10.8h, #8
+    raddhn      v30.8b, v11.8h, v8.8h
+    raddhn      v29.8b, v12.8h, v9.8h
+    raddhn      v28.8b, v13.8h, v10.8h
+.endm
+
+.macro pixman_composite_src_pixbuf_8888_process_pixblock_tail_head
+        urshr       v11.8h, v8.8h, #8
+        mov         v30.8b, v31.8b
+        mov         v31.8b, v3.8b
+        mov         v3.8b, v31.8b
+        urshr       v12.8h, v9.8h, #8
+        urshr       v13.8h, v10.8h, #8
+    fetch_src_pixblock
+        raddhn      v30.8b, v11.8h, v8.8h
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        raddhn      v29.8b, v12.8h, v9.8h
+        raddhn      v28.8b, v13.8h, v10.8h
+    umull       v8.8h, v3.8b, v0.8b
+    umull       v9.8h, v3.8b, v1.8b
+    umull       v10.8h, v3.8b, v2.8b
+         st4    {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+                                    PF cmp PF_X, ORIG_W
+                                    PF lsl DUMMY, PF_X, src_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+                                    PF subs PF_CTL, PF_CTL, #0x10
+                                    PF ble 10f
+                                    PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
+                                    PF ldrsb DUMMY, [PF_SRC, DUMMY]
+                                    PF add PF_SRC, PF_SRC, #1
+10:
+.endm
+
+generate_composite_function \
+    pixman_composite_src_pixbuf_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_pixbuf_8888_process_pixblock_head, \
+    pixman_composite_src_pixbuf_8888_process_pixblock_tail, \
+    pixman_composite_src_pixbuf_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_src_rpixbuf_8888_process_pixblock_head
+    umull       v8.8h, v3.8b, v0.8b
+    umull       v9.8h, v3.8b, v1.8b
+    umull       v10.8h, v3.8b, v2.8b
+.endm
+
+.macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail
+    urshr       v11.8h, v8.8h, #8
+    mov         v30.8b, v31.8b
+    mov         v31.8b, v3.8b
+    mov         v3.8b, v30.8b
+    urshr       v12.8h, v9.8h, #8
+    urshr       v13.8h, v10.8h, #8
+    raddhn      v28.8b, v11.8h, v8.8h
+    raddhn      v29.8b, v12.8h, v9.8h
+    raddhn      v30.8b, v13.8h, v10.8h
+.endm
+
+.macro pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head
+        urshr       v11.8h, v8.8h, #8
+        mov         v30.8b, v31.8b
+        mov         v31.8b, v3.8b
+        mov         v3.8b, v30.8b
+        urshr       v12.8h, v9.8h, #8
+        urshr       v13.8h, v10.8h, #8
+    fetch_src_pixblock
+        raddhn      v28.8b, v11.8h, v8.8h
+                                    PF add PF_X, PF_X, #8
+                                    PF tst PF_CTL, #0xF
+                                    PF beq 10f
+                                    PF add PF_X, PF_X, #8
+                                    PF sub PF_CTL, PF_CTL, #1
+10:
+        raddhn      v29.8b, v12.8h, v9.8h
+        raddhn      v30.8b, v13.8h, v10.8h
+    umull       v8.8h, v3.8b, v0.8b
+    umull       v9.8h, v3.8b, v1.8b
+    umull       v10.8h, v3.8b, v2.8b
+         st4    {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+                                    PF cmp PF_X, ORIG_W
+                                    PF lsl DUMMY, PF_X, src_bpp_shift
+                                    PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
+                                    PF ble 10f
+                                    PF sub PF_X, PF_X, ORIG_W
+                                    PF subs PF_CTL, PF_CTL, #0x10
+                                    PF ble 10f
+                                    PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
+                                    PF ldrsb DUMMY, [PF_SRC, DUMMY]
+                                    PF add PF_SRC, PF_SRC, #1
+10:
+.endm
+
+generate_composite_function \
+    pixman_composite_src_rpixbuf_8888_asm_neon, 32, 0, 32, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    10, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_rpixbuf_8888_process_pixblock_head, \
+    pixman_composite_src_rpixbuf_8888_process_pixblock_tail, \
+    pixman_composite_src_rpixbuf_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    0, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_over_0565_8_0565_process_pixblock_head
+    /* mask is in v15 */
+    mov         v4.d[0], v8.d[0]
+    mov         v4.d[1], v9.d[0]
+    mov         v13.d[0], v10.d[0]
+    mov         v13.d[1], v11.d[0]
+    convert_0565_to_x888 v4, v2, v1, v0
+    convert_0565_to_x888 v13, v6, v5, v4
+    /* source pixel data is in      {v0, v1, v2, XX} */
+    /* destination pixel data is in {v4, v5, v6, XX} */
+    mvn         v7.8b,  v15.8b
+    umull       v10.8h, v15.8b, v2.8b
+    umull       v9.8h,  v15.8b, v1.8b
+    umull       v8.8h,  v15.8b, v0.8b
+    umull       v11.8h, v7.8b,  v4.8b
+    umull       v12.8h, v7.8b,  v5.8b
+    umull       v13.8h, v7.8b,  v6.8b
+    urshr       v19.8h, v10.8h, #8
+    urshr       v18.8h, v9.8h,  #8
+    urshr       v17.8h, v8.8h,  #8
+    raddhn      v2.8b,  v10.8h, v19.8h
+    raddhn      v1.8b,  v9.8h,  v18.8h
+    raddhn      v0.8b,  v8.8h,  v17.8h
+.endm
+
+.macro pixman_composite_over_0565_8_0565_process_pixblock_tail
+    urshr       v17.8h, v11.8h,  #8
+    urshr       v18.8h, v12.8h,  #8
+    urshr       v19.8h, v13.8h,  #8
+    raddhn      v28.8b, v17.8h, v11.8h
+    raddhn      v29.8b, v18.8h, v12.8h
+    raddhn      v30.8b, v19.8h, v13.8h
+    uqadd       v0.8b,  v0.8b,  v28.8b
+    uqadd       v1.8b,  v1.8b,  v29.8b
+    uqadd       v2.8b,  v2.8b,  v30.8b
+    /* 32bpp result is in {v0, v1, v2, XX} */
+    convert_8888_to_0565 v2, v1, v0, v14, v30, v13
+    mov         v28.d[0], v14.d[0]
+    mov         v29.d[0], v14.d[1]
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_over_0565_8_0565_process_pixblock_tail_head
+    fetch_mask_pixblock
+    pixman_composite_over_0565_8_0565_process_pixblock_tail
+    fetch_src_pixblock
+    ld1        {v10.4h, v11.4h}, [DST_R], #16
+    cache_preload 8, 8
+    pixman_composite_over_0565_8_0565_process_pixblock_head
+    st1        {v14.8h}, [DST_W], #16
+.endm
+
+generate_composite_function \
+    pixman_composite_over_0565_8_0565_asm_neon, 16, 8, 16, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init_need_all_regs, \
+    default_cleanup_need_all_regs, \
+    pixman_composite_over_0565_8_0565_process_pixblock_head, \
+    pixman_composite_over_0565_8_0565_process_pixblock_tail, \
+    pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    10,  /* dst_r_basereg */ \
+    8,  /* src_basereg   */ \
+    15  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_over_0565_n_0565_init
+    mov         v15.s[0], w6
+    dup         v15.8b, v15.b[3]
+.endm
+
+.macro pixman_composite_over_0565_n_0565_cleanup
+.endm
+
+generate_composite_function \
+    pixman_composite_over_0565_n_0565_asm_neon, 16, 0, 16, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    pixman_composite_over_0565_n_0565_init, \
+    pixman_composite_over_0565_n_0565_cleanup, \
+    pixman_composite_over_0565_8_0565_process_pixblock_head, \
+    pixman_composite_over_0565_8_0565_process_pixblock_tail, \
+    pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    10,  /* dst_r_basereg */ \
+    8,  /* src_basereg   */ \
+    15  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_add_0565_8_0565_process_pixblock_head
+    /* mask is in v15 */
+    mov         v4.d[0], v8.d[0]
+    mov         v4.d[1], v9.d[0]
+    mov         v13.d[0], v10.d[0]
+    mov         v13.d[1], v11.d[0]
+    convert_0565_to_x888 v4,  v2, v1, v0
+    convert_0565_to_x888 v13, v6, v5, v4
+    /* source pixel data is in      {v0, v1, v2, XX} */
+    /* destination pixel data is in {v4, v5, v6, XX} */
+    umull       v9.8h,  v15.8b, v2.8b
+    umull       v8.8h,  v15.8b, v1.8b
+    umull       v7.8h,  v15.8b, v0.8b
+    urshr       v12.8h, v9.8h,  #8
+    urshr       v11.8h, v8.8h,  #8
+    urshr       v10.8h, v7.8h,  #8
+    raddhn      v2.8b,  v9.8h,  v12.8h
+    raddhn      v1.8b,  v8.8h,  v11.8h
+    raddhn      v0.8b,  v7.8h,  v10.8h
+.endm
+
+.macro pixman_composite_add_0565_8_0565_process_pixblock_tail
+    uqadd       v0.8b,  v0.8b,  v4.8b
+    uqadd       v1.8b,  v1.8b,  v5.8b
+    uqadd       v2.8b,  v2.8b,  v6.8b
+    /* 32bpp result is in {v0, v1, v2, XX} */
+    convert_8888_to_0565 v2, v1, v0, v14, v30, v13
+    mov         v28.d[0], v14.d[0]
+    mov         v29.d[0], v14.d[1]
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_add_0565_8_0565_process_pixblock_tail_head
+    fetch_mask_pixblock
+    pixman_composite_add_0565_8_0565_process_pixblock_tail
+    fetch_src_pixblock
+    ld1        {v10.4h, v11.4h}, [DST_R], #16
+    cache_preload 8, 8
+    pixman_composite_add_0565_8_0565_process_pixblock_head
+    st1        {v14.8h}, [DST_W], #16
+.endm
+
+generate_composite_function \
+    pixman_composite_add_0565_8_0565_asm_neon, 16, 8, 16, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init_need_all_regs, \
+    default_cleanup_need_all_regs, \
+    pixman_composite_add_0565_8_0565_process_pixblock_head, \
+    pixman_composite_add_0565_8_0565_process_pixblock_tail, \
+    pixman_composite_add_0565_8_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    10, /* dst_r_basereg */ \
+    8,  /* src_basereg   */ \
+    15  /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_out_reverse_8_0565_process_pixblock_head
+    /* mask is in v15 */
+    mov         v12.d[0], v10.d[0]
+    mov         v12.d[1], v11.d[0]
+    convert_0565_to_x888 v12, v6, v5, v4
+    /* destination pixel data is in {v4, v5, v6, xx} */
+    mvn         v24.8b, v15.8b /* get inverted alpha */
+    /* now do alpha blending */
+    umull       v8.8h,  v24.8b, v4.8b
+    umull       v9.8h,  v24.8b, v5.8b
+    umull       v10.8h, v24.8b, v6.8b
+.endm
+
+.macro pixman_composite_out_reverse_8_0565_process_pixblock_tail
+    urshr       v11.8h, v8.8h, #8
+    urshr       v12.8h, v9.8h, #8
+    urshr       v13.8h, v10.8h, #8
+    raddhn      v0.8b, v11.8h, v8.8h
+    raddhn      v1.8b, v12.8h, v9.8h
+    raddhn      v2.8b, v13.8h, v10.8h
+    /* 32bpp result is in {v0, v1, v2, XX} */
+    convert_8888_to_0565 v2, v1, v0, v14, v12, v3
+    mov         v28.d[0], v14.d[0]
+    mov         v29.d[0], v14.d[1]
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_out_reverse_8_0565_process_pixblock_tail_head
+    fetch_src_pixblock
+    pixman_composite_out_reverse_8_0565_process_pixblock_tail
+    ld1        {v10.4h, v11.4h}, [DST_R], #16
+    cache_preload 8, 8
+    pixman_composite_out_reverse_8_0565_process_pixblock_head
+    st1        {v14.8h}, [DST_W], #16
+.endm
+
+generate_composite_function \
+    pixman_composite_out_reverse_8_0565_asm_neon, 8, 0, 16, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init_need_all_regs, \
+    default_cleanup_need_all_regs, \
+    pixman_composite_out_reverse_8_0565_process_pixblock_head, \
+    pixman_composite_out_reverse_8_0565_process_pixblock_tail, \
+    pixman_composite_out_reverse_8_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    10,  /* dst_r_basereg */ \
+    15, /* src_basereg   */ \
+    0   /* mask_basereg  */
+
+/******************************************************************************/
+
+.macro pixman_composite_out_reverse_8_8888_process_pixblock_head
+    /* src is in v0 */
+    /* destination pixel data is in {v4, v5, v6, v7} */
+    mvn         v1.8b, v0.8b /* get inverted alpha */
+    /* now do alpha blending */
+    umull       v8.8h, v1.8b, v4.8b
+    umull       v9.8h, v1.8b, v5.8b
+    umull       v10.8h, v1.8b, v6.8b
+    umull       v11.8h, v1.8b, v7.8b
+.endm
+
+.macro pixman_composite_out_reverse_8_8888_process_pixblock_tail
+    urshr       v14.8h, v8.8h, #8
+    urshr       v15.8h, v9.8h, #8
+    urshr       v12.8h, v10.8h, #8
+    urshr       v13.8h, v11.8h, #8
+    raddhn      v28.8b, v14.8h, v8.8h
+    raddhn      v29.8b, v15.8h, v9.8h
+    raddhn      v30.8b, v12.8h, v10.8h
+    raddhn      v31.8b, v13.8h, v11.8h
+    /* 32bpp result is in {v28, v29, v30, v31} */
+.endm
+
+/* TODO: expand macros and do better instructions scheduling */
+.macro pixman_composite_out_reverse_8_8888_process_pixblock_tail_head
+    fetch_src_pixblock
+    pixman_composite_out_reverse_8_8888_process_pixblock_tail
+    ld4       {v4.8b, v5.8b, v6.8b, v7.8b}, [DST_R], #32
+    cache_preload 8, 8
+    pixman_composite_out_reverse_8_8888_process_pixblock_head
+    st4       {v28.8b, v29.8b, v30.8b, v31.8b}, [DST_W], #32
+.endm
+
+generate_composite_function \
+    pixman_composite_out_reverse_8_8888_asm_neon, 8, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    5, /* prefetch distance */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_out_reverse_8_8888_process_pixblock_head, \
+    pixman_composite_out_reverse_8_8888_process_pixblock_tail, \
+    pixman_composite_out_reverse_8_8888_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4, /* dst_r_basereg */ \
+    0, /* src_basereg   */ \
+    0   /* mask_basereg  */
+
+/******************************************************************************/
+
+generate_composite_function_nearest_scanline \
+    pixman_scaled_nearest_scanline_8888_8888_OVER_asm_neon, 32, 0, 32, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_8888_process_pixblock_head, \
+    pixman_composite_over_8888_8888_process_pixblock_tail, \
+    pixman_composite_over_8888_8888_process_pixblock_tail_head
+
+generate_composite_function_nearest_scanline \
+    pixman_scaled_nearest_scanline_8888_0565_OVER_asm_neon, 32, 0, 16, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_over_8888_0565_process_pixblock_head, \
+    pixman_composite_over_8888_0565_process_pixblock_tail, \
+    pixman_composite_over_8888_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    0,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+generate_composite_function_nearest_scanline \
+    pixman_scaled_nearest_scanline_8888_0565_SRC_asm_neon, 32, 0, 16, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_8888_0565_process_pixblock_head, \
+    pixman_composite_src_8888_0565_process_pixblock_tail, \
+    pixman_composite_src_8888_0565_process_pixblock_tail_head
+
+generate_composite_function_nearest_scanline \
+    pixman_scaled_nearest_scanline_0565_8888_SRC_asm_neon, 16, 0, 32, \
+    FLAG_DST_WRITEONLY | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init, \
+    default_cleanup, \
+    pixman_composite_src_0565_8888_process_pixblock_head, \
+    pixman_composite_src_0565_8888_process_pixblock_tail, \
+    pixman_composite_src_0565_8888_process_pixblock_tail_head
+
+generate_composite_function_nearest_scanline \
+    pixman_scaled_nearest_scanline_8888_8_0565_OVER_asm_neon, 32, 8, 16, \
+    FLAG_DST_READWRITE | FLAG_DEINTERLEAVE_32BPP, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init_need_all_regs, \
+    default_cleanup_need_all_regs, \
+    pixman_composite_over_8888_8_0565_process_pixblock_head, \
+    pixman_composite_over_8888_8_0565_process_pixblock_tail, \
+    pixman_composite_over_8888_8_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    4,  /* dst_r_basereg */ \
+    8,  /* src_basereg   */ \
+    24  /* mask_basereg  */
+
+generate_composite_function_nearest_scanline \
+    pixman_scaled_nearest_scanline_0565_8_0565_OVER_asm_neon, 16, 8, 16, \
+    FLAG_DST_READWRITE, \
+    8, /* number of pixels, processed in a single block */ \
+    default_init_need_all_regs, \
+    default_cleanup_need_all_regs, \
+    pixman_composite_over_0565_8_0565_process_pixblock_head, \
+    pixman_composite_over_0565_8_0565_process_pixblock_tail, \
+    pixman_composite_over_0565_8_0565_process_pixblock_tail_head, \
+    28, /* dst_w_basereg */ \
+    10,  /* dst_r_basereg */ \
+    8,  /* src_basereg   */ \
+    15  /* mask_basereg  */
+
+/******************************************************************************/
+
+/*
+ * Bilinear scaling support code which tries to provide pixel fetching, color
+ * format conversion, and interpolation as separate macros which can be used
+ * as the basic building blocks for constructing bilinear scanline functions.
+ */
+
+.macro bilinear_load_8888 reg1, reg2, tmp
+    asr       TMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #2
+    ld1       {&reg1&.2s}, [TMP1], STRIDE
+    ld1       {&reg2&.2s}, [TMP1]
+.endm
+
+.macro bilinear_load_0565 reg1, reg2, tmp
+    asr       TMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #1
+    ld1       {&reg2&.s}[0], [TMP1], STRIDE
+    ld1       {&reg2&.s}[1], [TMP1]
+    convert_four_0565_to_x888_packed reg2, reg1, reg2, tmp
+.endm
+
+.macro bilinear_load_and_vertical_interpolate_two_8888 \
+                    acc1, acc2, reg1, reg2, reg3, reg4, tmp1, tmp2
+
+    bilinear_load_8888 reg1, reg2, tmp1
+    umull     &acc1&.8h, &reg1&.8b, v28.8b
+    umlal     &acc1&.8h, &reg2&.8b, v29.8b
+    bilinear_load_8888 reg3, reg4, tmp2
+    umull     &acc2&.8h, &reg3&.8b, v28.8b
+    umlal     &acc2&.8h, &reg4&.8b, v29.8b
+.endm
+
+.macro bilinear_load_and_vertical_interpolate_four_8888 \
+                xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
+                yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
+
+    bilinear_load_and_vertical_interpolate_two_8888 \
+                xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi
+    bilinear_load_and_vertical_interpolate_two_8888 \
+                yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
+.endm
+
+.macro vzip reg1, reg2
+    umov      TMP4, v31.d[0]
+    zip1      v31.8b, reg1, reg2
+    zip2      reg2,   reg1, reg2
+    mov       reg1,   v31.8b
+    mov       v31.d[0], TMP4
+.endm
+
+.macro vuzp reg1, reg2
+    umov      TMP4, v31.d[0]
+    uzp1      v31.8b, reg1, reg2
+    uzp2      reg2,   reg1, reg2
+    mov       reg1,   v31.8b
+    mov       v31.d[0], TMP4
+.endm
+
+.macro bilinear_load_and_vertical_interpolate_two_0565 \
+                acc1, acc2, reg1, reg2, reg3, reg4, acc2lo, acc2hi
+    asr       TMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #1
+    asr       TMP2, X, #16
+    add       X, X, UX
+    add       TMP2, TOP, TMP2, lsl #1
+    ld1       {&acc2&.s}[0], [TMP1], STRIDE
+    ld1       {&acc2&.s}[2], [TMP2], STRIDE
+    ld1       {&acc2&.s}[1], [TMP1]
+    ld1       {&acc2&.s}[3], [TMP2]
+    convert_0565_to_x888 acc2, reg3, reg2, reg1
+    vzip      &reg1&.8b, &reg3&.8b
+    vzip      &reg2&.8b, &reg4&.8b
+    vzip      &reg3&.8b, &reg4&.8b
+    vzip      &reg1&.8b, &reg2&.8b
+    umull     &acc1&.8h, &reg1&.8b, v28.8b
+    umlal     &acc1&.8h, &reg2&.8b, v29.8b
+    umull     &acc2&.8h, &reg3&.8b, v28.8b
+    umlal     &acc2&.8h, &reg4&.8b, v29.8b
+.endm
+
+.macro bilinear_load_and_vertical_interpolate_four_0565 \
+                xacc1, xacc2, xreg1, xreg2, xreg3, xreg4, xacc2lo, xacc2hi \
+                yacc1, yacc2, yreg1, yreg2, yreg3, yreg4, yacc2lo, yacc2hi
+    asr       TMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #1
+    asr       TMP2, X, #16
+    add       X, X, UX
+    add       TMP2, TOP, TMP2, lsl #1
+    ld1       {&xacc2&.s}[0], [TMP1], STRIDE
+    ld1       {&xacc2&.s}[2], [TMP2], STRIDE
+    ld1       {&xacc2&.s}[1], [TMP1]
+    ld1       {&xacc2&.s}[3], [TMP2]
+    convert_0565_to_x888 xacc2, xreg3, xreg2, xreg1
+    asr       TMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #1
+    asr       TMP2, X, #16
+    add       X, X, UX
+    add       TMP2, TOP, TMP2, lsl #1
+    ld1       {&yacc2&.s}[0], [TMP1], STRIDE
+    vzip      &xreg1&.8b, &xreg3&.8b
+    ld1       {&yacc2&.s}[2], [TMP2], STRIDE
+    vzip      &xreg2&.8b, &xreg4&.8b
+    ld1       {&yacc2&.s}[1], [TMP1]
+    vzip      &xreg3&.8b, &xreg4&.8b
+    ld1       {&yacc2&.s}[3], [TMP2]
+    vzip      &xreg1&.8b, &xreg2&.8b
+    convert_0565_to_x888 yacc2, yreg3, yreg2, yreg1
+    umull     &xacc1&.8h, &xreg1&.8b, v28.8b
+    vzip      &yreg1&.8b, &yreg3&.8b
+    umlal     &xacc1&.8h, &xreg2&.8b, v29.8b
+    vzip      &yreg2&.8b, &yreg4&.8b
+    umull     &xacc2&.8h, &xreg3&.8b, v28.8b
+    vzip      &yreg3&.8b, &yreg4&.8b
+    umlal     &xacc2&.8h, &xreg4&.8b, v29.8b
+    vzip      &yreg1&.8b, &yreg2&.8b
+    umull     &yacc1&.8h, &yreg1&.8b, v28.8b
+    umlal     &yacc1&.8h, &yreg2&.8b, v29.8b
+    umull     &yacc2&.8h, &yreg3&.8b, v28.8b
+    umlal     &yacc2&.8h, &yreg4&.8b, v29.8b
+.endm
+
+.macro bilinear_store_8888 numpix, tmp1, tmp2
+.if numpix == 4
+    st1       {v0.2s, v1.2s}, [OUT], #16
+.elseif numpix == 2
+    st1       {v0.2s}, [OUT], #8
+.elseif numpix == 1
+    st1       {v0.s}[0], [OUT], #4
+.else
+    .error bilinear_store_8888 numpix is unsupported
+.endif
+.endm
+
+.macro bilinear_store_0565 numpix, tmp1, tmp2
+    vuzp      v0.8b, v1.8b
+    vuzp      v2.8b, v3.8b
+    vuzp      v1.8b, v3.8b
+    vuzp      v0.8b, v2.8b
+    convert_8888_to_0565 v2, v1, v0, v1, tmp1, tmp2
+.if numpix == 4
+    st1       {v1.4h}, [OUT], #8
+.elseif numpix == 2
+    st1       {v1.s}[0], [OUT], #4
+.elseif numpix == 1
+    st1       {v1.h}[0], [OUT], #2
+.else
+    .error bilinear_store_0565 numpix is unsupported
+.endif
+.endm
+
+.macro bilinear_interpolate_last_pixel src_fmt, dst_fmt
+    bilinear_load_&src_fmt v0, v1, v2
+    umull     v2.8h, v0.8b, v28.8b
+    umlal     v2.8h, v1.8b, v29.8b
+    /* 5 cycles bubble */
+    ushll     v0.4s, v2.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v0.4s, v2.4h, v15.h[0]
+    umlal2    v0.4s, v2.8h, v15.h[0]
+    /* 5 cycles bubble */
+    shrn      v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    /* 3 cycles bubble */
+    xtn       v0.8b, v0.8h
+    /* 1 cycle bubble */
+    bilinear_store_&dst_fmt 1, v3, v4
+.endm
+
+.macro bilinear_interpolate_two_pixels src_fmt, dst_fmt
+    bilinear_load_and_vertical_interpolate_two_&src_fmt \
+                v1, v11, v2, v3, v20, v21, v22, v23
+    ushll     v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v0.4s, v1.4h, v15.h[0]
+    umlal2    v0.4s, v1.8h, v15.h[0]
+    ushll     v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v10.4s, v11.4h, v15.h[4]
+    umlal2    v10.4s, v11.8h, v15.h[4]
+    shrn      v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn2     v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    add       v12.8h, v12.8h, v13.8h
+    xtn       v0.8b, v0.8h
+    bilinear_store_&dst_fmt 2, v3, v4
+.endm
+
+.macro bilinear_interpolate_four_pixels src_fmt, dst_fmt
+    bilinear_load_and_vertical_interpolate_four_&src_fmt \
+                v1, v11, v14, v20, v16, v17, v22, v23 \
+                v3, v9,  v24, v25, v26, v27, v18, v19
+    prfm      PREFETCH_MODE, [TMP1, PF_OFFS]
+    sub       TMP1, TMP1, STRIDE
+    ushll     v0.4s, v1.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v0.4s, v1.4h, v15.h[0]
+    umlal2    v0.4s, v1.8h, v15.h[0]
+    ushll     v10.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v10.4s, v11.4h, v15.h[4]
+    umlal2    v10.4s, v11.8h, v15.h[4]
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    ushll     v2.4s, v3.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v2.4s, v3.4h, v15.h[0]
+    umlal2    v2.4s, v3.8h, v15.h[0]
+    ushll     v8.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
+    prfm      PREFETCH_MODE, [TMP2, PF_OFFS]
+    umlsl     v8.4s, v9.4h, v15.h[4]
+    umlal2    v8.4s, v9.8h, v15.h[4]
+    add       v12.8h, v12.8h, v13.8h
+    shrn      v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn2     v0.8h, v10.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn      v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn2     v2.8h, v8.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    xtn       v0.8b, v0.8h
+    xtn       v1.8b, v2.8h
+    add       v12.8h, v12.8h, v13.8h
+    bilinear_store_&dst_fmt 4, v3, v4
+.endm
+
+.macro bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
+.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
+    bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_head
+.else
+    bilinear_interpolate_four_pixels src_fmt, dst_fmt
+.endif
+.endm
+
+.macro bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
+.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
+    bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail
+.endif
+.endm
+
+.macro bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
+.ifdef have_bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt
+    bilinear_interpolate_four_pixels_&src_fmt&_&dst_fmt&_tail_head
+.else
+    bilinear_interpolate_four_pixels src_fmt, dst_fmt
+.endif
+.endm
+
+.macro bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt
+.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
+    bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_head
+.else
+    bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
+    bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
+.endif
+.endm
+
+.macro bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt
+.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
+    bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail
+.else
+    bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
+.endif
+.endm
+
+.macro bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt
+.ifdef have_bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt
+    bilinear_interpolate_eight_pixels_&src_fmt&_&dst_fmt&_tail_head
+.else
+    bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
+    bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
+.endif
+.endm
+
+.set BILINEAR_FLAG_UNROLL_4,          0
+.set BILINEAR_FLAG_UNROLL_8,          1
+.set BILINEAR_FLAG_USE_ALL_NEON_REGS, 2
+
+/*
+ * Main template macro for generating NEON optimized bilinear scanline
+ * functions.
+ *
+ * Bilinear scanline scaler macro template uses the following arguments:
+ *  fname             - name of the function to generate
+ *  src_fmt           - source color format (8888 or 0565)
+ *  dst_fmt           - destination color format (8888 or 0565)
+ *  bpp_shift         - (1 << bpp_shift) is the size of source pixel in bytes
+ *  prefetch_distance - prefetch in the source image by that many
+ *                      pixels ahead
+ */
+
+.macro generate_bilinear_scanline_func fname, src_fmt, dst_fmt, \
+                                       src_bpp_shift, dst_bpp_shift, \
+                                       prefetch_distance, flags
+
+pixman_asm_function fname
+    OUT       .req      x0
+    TOP       .req      x1
+    BOTTOM    .req      x2
+    WT        .req      x3
+    WB        .req      x4
+    X         .req      x5
+    UX        .req      x6
+    WIDTH     .req      x7
+    TMP1      .req      x8
+    TMP2      .req      x9
+    PF_OFFS   .req      x10
+    TMP3      .req      x11
+    TMP4      .req      x12
+    STRIDE    .req      x13
+
+    sxtw      x3, w3
+    sxtw      x4, w4
+    sxtw      x5, w5
+    sxtw      x6, w6
+    sxtw      x7, w7
+
+    stp       x29, x30, [sp, -16]!
+    mov       x29, sp
+    sub       sp,  sp, 112  /* push all registers */
+    sub       x29, x29, 64
+    st1       {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], #32
+    st1       {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], #32
+    stp        x8,  x9, [x29, -80]
+    stp       x10, x11, [x29, -96]
+    stp       x12, x13, [x29, -112]
+
+    mov       PF_OFFS, #prefetch_distance
+    mul       PF_OFFS, PF_OFFS, UX
+
+    subs      STRIDE, BOTTOM, TOP
+    .unreq    BOTTOM
+
+    cmp       WIDTH, #0
+    ble       300f
+
+    dup       v12.8h, w5
+    dup       v13.8h, w6
+    dup       v28.8b, w3
+    dup       v29.8b, w4
+    mov       v25.d[0], v12.d[1]
+    mov       v26.d[0], v13.d[0]
+    add       v25.4h, v25.4h, v26.4h
+    mov       v12.d[1], v25.d[0]
+
+    /* ensure good destination alignment  */
+    cmp       WIDTH, #1
+    blt       100f
+    tst       OUT, #(1 << dst_bpp_shift)
+    beq       100f
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    add       v12.8h, v12.8h, v13.8h
+    bilinear_interpolate_last_pixel src_fmt, dst_fmt
+    sub       WIDTH, WIDTH, #1
+100:
+    add       v13.8h, v13.8h, v13.8h
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    add       v12.8h, v12.8h, v13.8h
+
+    cmp       WIDTH, #2
+    blt       100f
+    tst       OUT, #(1 << (dst_bpp_shift + 1))
+    beq       100f
+    bilinear_interpolate_two_pixels src_fmt, dst_fmt
+    sub       WIDTH, WIDTH, #2
+100:
+.if ((flags) & BILINEAR_FLAG_UNROLL_8) != 0
+/*********** 8 pixels per iteration *****************/
+    cmp       WIDTH, #4
+    blt       100f
+    tst       OUT, #(1 << (dst_bpp_shift + 2))
+    beq       100f
+    bilinear_interpolate_four_pixels src_fmt, dst_fmt
+    sub       WIDTH, WIDTH, #4
+100:
+    subs      WIDTH, WIDTH, #8
+    blt       100f
+    asr       PF_OFFS, PF_OFFS, #(16 - src_bpp_shift)
+    bilinear_interpolate_eight_pixels_head src_fmt, dst_fmt
+    subs      WIDTH, WIDTH, #8
+    blt       500f
+1000:
+    bilinear_interpolate_eight_pixels_tail_head src_fmt, dst_fmt
+    subs      WIDTH, WIDTH, #8
+    bge       1000b
+500:
+    bilinear_interpolate_eight_pixels_tail src_fmt, dst_fmt
+100:
+    tst       WIDTH, #4
+    beq       200f
+    bilinear_interpolate_four_pixels src_fmt, dst_fmt
+200:
+.else
+/*********** 4 pixels per iteration *****************/
+    subs      WIDTH, WIDTH, #4
+    blt       100f
+    asr       PF_OFFS, PF_OFFS, #(16 - src_bpp_shift)
+    bilinear_interpolate_four_pixels_head src_fmt, dst_fmt
+    subs      WIDTH, WIDTH, #4
+    blt       500f
+1000:
+    bilinear_interpolate_four_pixels_tail_head src_fmt, dst_fmt
+    subs      WIDTH, WIDTH, #4
+    bge       1000b
+500:
+    bilinear_interpolate_four_pixels_tail src_fmt, dst_fmt
+100:
+/****************************************************/
+.endif
+    /* handle the remaining trailing pixels */
+    tst       WIDTH, #2
+    beq       200f
+    bilinear_interpolate_two_pixels src_fmt, dst_fmt
+200:
+    tst       WIDTH, #1
+    beq       300f
+    bilinear_interpolate_last_pixel src_fmt, dst_fmt
+300:
+    sub       x29, x29, 64
+    ld1       {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], #32
+    ld1       {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], #32
+    ldp        x8,  x9, [x29, -80]
+    ldp       x10, x11, [x29, -96]
+    ldp       x12, x13, [x29, -104]
+    mov       sp, x29
+    ldp       x29, x30, [sp], 16
+    ret
+
+    .unreq    OUT
+    .unreq    TOP
+    .unreq    WT
+    .unreq    WB
+    .unreq    X
+    .unreq    UX
+    .unreq    WIDTH
+    .unreq    TMP1
+    .unreq    TMP2
+    .unreq    PF_OFFS
+    .unreq    TMP3
+    .unreq    TMP4
+    .unreq    STRIDE
+.endfunc
+
+.endm
+
+/*****************************************************************************/
+
+.set have_bilinear_interpolate_four_pixels_8888_8888, 1
+
+.macro bilinear_interpolate_four_pixels_8888_8888_head
+    asr       TMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #2
+    asr       TMP2, X, #16
+    add       X, X, UX
+    add       TMP2, TOP, TMP2, lsl #2
+
+    ld1       {v22.2s}, [TMP1], STRIDE
+    ld1       {v23.2s}, [TMP1]
+    asr       TMP3, X, #16
+    add       X, X, UX
+    add       TMP3, TOP, TMP3, lsl #2
+    umull     v8.8h, v22.8b, v28.8b
+    umlal     v8.8h, v23.8b, v29.8b
+
+    ld1       {v22.2s}, [TMP2], STRIDE
+    ld1       {v23.2s}, [TMP2]
+    asr       TMP4, X, #16
+    add       X, X, UX
+    add       TMP4, TOP, TMP4, lsl #2
+    umull     v9.8h, v22.8b, v28.8b
+    umlal     v9.8h, v23.8b, v29.8b
+
+    ld1       {v22.2s}, [TMP3], STRIDE
+    ld1       {v23.2s}, [TMP3]
+    umull     v10.8h, v22.8b, v28.8b
+    umlal     v10.8h, v23.8b, v29.8b
+
+    ushll     v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v0.4s, v8.4h, v15.h[0]
+    umlal2    v0.4s, v8.8h, v15.h[0]
+
+    prfm      PREFETCH_MODE, [TMP4, PF_OFFS]
+    ld1       {v16.2s}, [TMP4], STRIDE
+    ld1       {v17.2s}, [TMP4]
+    prfm      PREFETCH_MODE, [TMP4, PF_OFFS]
+    umull     v11.8h, v16.8b, v28.8b
+    umlal     v11.8h, v17.8b, v29.8b
+
+    ushll     v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v1.4s, v9.4h, v15.h[4]
+.endm
+
+.macro bilinear_interpolate_four_pixels_8888_8888_tail
+    umlal2    v1.4s, v9.8h, v15.h[4]
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    ushll     v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v2.4s, v10.4h, v15.h[0]
+    umlal2    v2.4s, v10.8h, v15.h[0]
+    ushll     v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v3.4s, v11.4h, v15.h[4]
+    umlal2    v3.4s, v11.8h, v15.h[4]
+    add       v12.8h, v12.8h, v13.8h
+    shrn      v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn2     v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    shrn      v2.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    shrn2     v2.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    xtn       v6.8b, v0.8h
+    xtn       v7.8b, v2.8h
+    add       v12.8h, v12.8h, v13.8h
+    st1       {v6.2s, v7.2s}, [OUT], #16
+.endm
+
+.macro bilinear_interpolate_four_pixels_8888_8888_tail_head
+    asr       TMP1, X, #16
+    add       X, X, UX
+    add       TMP1, TOP, TMP1, lsl #2
+    asr       TMP2, X, #16
+    add       X, X, UX
+    add       TMP2, TOP, TMP2, lsl #2
+        umlal2    v1.4s, v9.8h, v15.h[4]
+        ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+        ushll     v2.4s, v10.4h, #BILINEAR_INTERPOLATION_BITS
+        umlsl     v2.4s, v10.4h, v15.h[0]
+        umlal2    v2.4s, v10.8h, v15.h[0]
+        ushll     v3.4s, v11.4h, #BILINEAR_INTERPOLATION_BITS
+    ld1       {v20.2s}, [TMP1], STRIDE
+        umlsl     v3.4s, v11.4h, v15.h[4]
+        umlal2    v3.4s, v11.8h, v15.h[4]
+    ld1       {v21.2s}, [TMP1]
+    umull     v8.8h, v20.8b, v28.8b
+    umlal     v8.8h, v21.8b, v29.8b
+        shrn      v0.4h, v0.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+        shrn2     v0.8h, v1.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+        shrn      v4.4h, v2.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+    ld1       {v22.2s}, [TMP2], STRIDE
+        shrn2     v4.8h, v3.4s, #(2 * BILINEAR_INTERPOLATION_BITS)
+        add       v12.8h, v12.8h, v13.8h
+    ld1       {v23.2s}, [TMP2]
+    umull     v9.8h, v22.8b, v28.8b
+    asr       TMP3, X, #16
+    add       X, X, UX
+    add       TMP3, TOP, TMP3, lsl #2
+    asr       TMP4, X, #16
+    add       X, X, UX
+    add       TMP4, TOP, TMP4, lsl #2
+    umlal     v9.8h, v23.8b, v29.8b
+    ld1       {v22.2s}, [TMP3], STRIDE
+        ushr      v15.8h, v12.8h, #(16 - BILINEAR_INTERPOLATION_BITS)
+    ld1       {v23.2s}, [TMP3]
+    umull     v10.8h, v22.8b, v28.8b
+    umlal     v10.8h, v23.8b, v29.8b
+        xtn       v6.8b, v0.8h
+    ushll     v0.4s, v8.4h, #BILINEAR_INTERPOLATION_BITS
+        xtn       v7.8b, v4.8h
+    umlsl     v0.4s, v8.4h, v15.h[0]
+    umlal2    v0.4s, v8.8h, v15.h[0]
+    prfm      PREFETCH_MODE, [TMP4, PF_OFFS]
+    ld1       {v16.2s}, [TMP4], STRIDE
+        add       v12.8h, v12.8h, v13.8h
+    ld1       {v17.2s}, [TMP4]
+    prfm      PREFETCH_MODE, [TMP4, PF_OFFS]
+    umull     v11.8h, v16.8b, v28.8b
+    umlal     v11.8h, v17.8b, v29.8b
+        st1       {v6.2s, v7.2s}, [OUT], #16
+    ushll     v1.4s, v9.4h, #BILINEAR_INTERPOLATION_BITS
+    umlsl     v1.4s, v9.4h, v15.h[4]
+.endm
+
+/*****************************************************************************/
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_8888_8888_SRC_asm_neon, 8888, 8888, \
+    2, 2, 28, BILINEAR_FLAG_UNROLL_4
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_8888_0565_SRC_asm_neon, 8888, 0565, \
+    2, 1, 28, BILINEAR_FLAG_UNROLL_8 | BILINEAR_FLAG_USE_ALL_NEON_REGS
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_0565_x888_SRC_asm_neon, 0565, 8888, \
+    1, 2, 28, BILINEAR_FLAG_UNROLL_4
+
+generate_bilinear_scanline_func \
+    pixman_scaled_bilinear_scanline_0565_0565_SRC_asm_neon, 0565, 0565, \
+    1, 1, 28, BILINEAR_FLAG_UNROLL_4

Added: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm.h
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm.h	                        (rev 0)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-arma64-neon-asm.h	2022-10-18 21:13:29 UTC (rev 64747)
@@ -0,0 +1,1310 @@
+/*
+ * Copyright © 2009 Nokia Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author:  Siarhei Siamashka (siarhei.siamashka at nokia.com)
+ */
+
+/*
+ * This file contains a macro ('generate_composite_function') which can
+ * construct 2D image processing functions, based on a common template.
+ * Any combinations of source, destination and mask images with 8bpp,
+ * 16bpp, 24bpp, 32bpp color formats are supported.
+ *
+ * This macro takes care of:
+ *  - handling of leading and trailing unaligned pixels
+ *  - doing most of the work related to L2 cache preload
+ *  - encourages the use of software pipelining for better instructions
+ *    scheduling
+ *
+ * The user of this macro has to provide some configuration parameters
+ * (bit depths for the images, prefetch distance, etc.) and a set of
+ * macros, which should implement basic code chunks responsible for
+ * pixels processing. See 'pixman-armv8-neon-asm.S' file for the usage
+ * examples.
+ *
+ * TODO:
+ *  - try overlapped pixel method (from Ian Rickards) when processing
+ *    exactly two blocks of pixels
+ *  - maybe add an option to do reverse scanline processing
+ */
+
+/*
+ * Bit flags for 'generate_composite_function' macro which are used
+ * to tune generated functions behavior.
+ */
+.set FLAG_DST_WRITEONLY,       0
+.set FLAG_DST_READWRITE,       1
+.set FLAG_DEINTERLEAVE_32BPP,  2
+
+/*
+ * Constants for selecting preferable prefetch type.
+ */
+.set PREFETCH_TYPE_NONE,       0 /* No prefetch at all */
+.set PREFETCH_TYPE_SIMPLE,     1 /* A simple, fixed-distance-ahead prefetch */
+.set PREFETCH_TYPE_ADVANCED,   2 /* Advanced fine-grained prefetch */
+
+/*
+ * prefetch mode
+ * available modes are:
+ * pldl1keep
+ * pldl1strm
+ * pldl2keep
+ * pldl2strm
+ * pldl3keep
+ * pldl3strm
+ */
+#define PREFETCH_MODE pldl1keep
+
+/*
+ * Definitions of supplementary pixld/pixst macros (for partial load/store of
+ * pixel data).
+ */
+
+.macro pixldst1 op, elem_size, reg1, mem_operand, abits
+    op {v&reg1&.&elem_size}, [&mem_operand&], #8
+.endm
+
+.macro pixldst2 op, elem_size, reg1, reg2, mem_operand, abits
+    op {v&reg1&.&elem_size, v&reg2&.&elem_size}, [&mem_operand&], #16
+.endm
+
+.macro pixldst4 op, elem_size, reg1, reg2, reg3, reg4, mem_operand, abits
+    op {v&reg1&.&elem_size, v&reg2&.&elem_size, v&reg3&.&elem_size, v&reg4&.&elem_size}, [&mem_operand&], #32
+.endm
+
+.macro pixldst0 op, elem_size, reg1, idx, mem_operand, abits, bytes
+    op {v&reg1&.&elem_size}[idx], [&mem_operand&], #&bytes&
+.endm
+
+.macro pixldst3 op, elem_size, reg1, reg2, reg3, mem_operand
+    op {v&reg1&.&elem_size, v&reg2&.&elem_size, v&reg3&.&elem_size}, [&mem_operand&], #24
+.endm
+
+.macro pixldst30 op, elem_size, reg1, reg2, reg3, idx, mem_operand
+    op {v&reg1&.&elem_size, v&reg2&.&elem_size, v&reg3&.&elem_size}[idx], [&mem_operand&], #3
+.endm
+
+.macro pixldst numbytes, op, elem_size, basereg, mem_operand, abits
+.if numbytes == 32
+    .if elem_size==32
+        pixldst4 op, 2s, %(basereg+4), %(basereg+5), \
+                              %(basereg+6), %(basereg+7), mem_operand, abits
+    .elseif elem_size==16
+        pixldst4 op, 4h, %(basereg+4), %(basereg+5), \
+                              %(basereg+6), %(basereg+7), mem_operand, abits
+    .else
+        pixldst4 op, 8b, %(basereg+4), %(basereg+5), \
+                              %(basereg+6), %(basereg+7), mem_operand, abits
+    .endif
+.elseif numbytes == 16
+    .if elem_size==32
+          pixldst2 op, 2s, %(basereg+2), %(basereg+3), mem_operand, abits
+    .elseif elem_size==16
+          pixldst2 op, 4h, %(basereg+2), %(basereg+3), mem_operand, abits
+    .else
+          pixldst2 op, 8b, %(basereg+2), %(basereg+3), mem_operand, abits
+    .endif
+.elseif numbytes == 8
+    .if elem_size==32
+        pixldst1 op, 2s, %(basereg+1), mem_operand, abits
+    .elseif elem_size==16
+        pixldst1 op, 4h, %(basereg+1), mem_operand, abits
+    .else
+        pixldst1 op, 8b, %(basereg+1), mem_operand, abits
+    .endif
+.elseif numbytes == 4
+    .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 32)
+        pixldst0 op, s, %(basereg+0), 1, mem_operand, abits, 4
+    .elseif elem_size == 16
+        pixldst0 op, h, %(basereg+0), 2, mem_operand, abits, 2
+        pixldst0 op, h, %(basereg+0), 3, mem_operand, abits, 2
+    .else
+        pixldst0 op, b, %(basereg+0), 4, mem_operand, abits, 1
+        pixldst0 op, b, %(basereg+0), 5, mem_operand, abits, 1
+        pixldst0 op, b, %(basereg+0), 6, mem_operand, abits, 1
+        pixldst0 op, b, %(basereg+0), 7, mem_operand, abits, 1
+    .endif
+.elseif numbytes == 2
+    .if !RESPECT_STRICT_ALIGNMENT || (elem_size == 16)
+        pixldst0 op, h, %(basereg+0), 1, mem_operand, abits, 2
+    .else
+        pixldst0 op, b, %(basereg+0), 2, mem_operand, abits, 1
+        pixldst0 op, b, %(basereg+0), 3, mem_operand, abits, 1
+    .endif
+.elseif numbytes == 1
+        pixldst0 op, b, %(basereg+0), 1, mem_operand, abits, 1
+.else
+    .error "unsupported size: numbytes"
+.endif
+.endm
+
+.macro pixld numpix, bpp, basereg, mem_operand, abits=0
+.if bpp > 0
+.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    pixldst4 ld4, 8b, %(basereg+4), %(basereg+5), \
+                      %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif (bpp == 24) && (numpix == 8)
+    pixldst3 ld3, 8b, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
+.elseif (bpp == 24) && (numpix == 4)
+    pixldst30 ld3, b, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
+    pixldst30 ld3, b, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
+    pixldst30 ld3, b, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
+    pixldst30 ld3, b, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
+.elseif (bpp == 24) && (numpix == 2)
+    pixldst30 ld3, b, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
+    pixldst30 ld3, b, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
+.elseif (bpp == 24) && (numpix == 1)
+    pixldst30 ld3, b, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
+.else
+    pixldst %(numpix * bpp / 8), ld1, %(bpp), basereg, mem_operand, abits
+.endif
+.endif
+.endm
+
+.macro pixst numpix, bpp, basereg, mem_operand, abits=0
+.if bpp > 0
+.if (bpp == 32) && (numpix == 8) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    pixldst4 st4, 8b, %(basereg+4), %(basereg+5), \
+                      %(basereg+6), %(basereg+7), mem_operand, abits
+.elseif (bpp == 24) && (numpix == 8)
+    pixldst3 st3, 8b, %(basereg+3), %(basereg+4), %(basereg+5), mem_operand
+.elseif (bpp == 24) && (numpix == 4)
+    pixldst30 st3, b, %(basereg+0), %(basereg+1), %(basereg+2), 4, mem_operand
+    pixldst30 st3, b, %(basereg+0), %(basereg+1), %(basereg+2), 5, mem_operand
+    pixldst30 st3, b, %(basereg+0), %(basereg+1), %(basereg+2), 6, mem_operand
+    pixldst30 st3, b, %(basereg+0), %(basereg+1), %(basereg+2), 7, mem_operand
+.elseif (bpp == 24) && (numpix == 2)
+    pixldst30 st3, b, %(basereg+0), %(basereg+1), %(basereg+2), 2, mem_operand
+    pixldst30 st3, b, %(basereg+0), %(basereg+1), %(basereg+2), 3, mem_operand
+.elseif (bpp == 24) && (numpix == 1)
+    pixldst30 st3, b, %(basereg+0), %(basereg+1), %(basereg+2), 1, mem_operand
+.elseif numpix * bpp == 32 && abits == 32
+    pixldst 4, st1, 32, basereg, mem_operand, abits
+.elseif numpix * bpp == 16 && abits == 16
+    pixldst 2, st1, 16, basereg, mem_operand, abits
+.else
+    pixldst %(numpix * bpp / 8), st1, %(bpp), basereg, mem_operand, abits
+.endif
+.endif
+.endm
+
+.macro pixld_a numpix, bpp, basereg, mem_operand
+.if (bpp * numpix) <= 128
+    pixld numpix, bpp, basereg, mem_operand, %(bpp * numpix)
+.else
+    pixld numpix, bpp, basereg, mem_operand, 128
+.endif
+.endm
+
+.macro pixst_a numpix, bpp, basereg, mem_operand
+.if (bpp * numpix) <= 128
+    pixst numpix, bpp, basereg, mem_operand, %(bpp * numpix)
+.else
+    pixst numpix, bpp, basereg, mem_operand, 128
+.endif
+.endm
+
+/*
+ * Pixel fetcher for nearest scaling (needs TMP1, TMP2, VX, UNIT_X register
+ * aliases to be defined)
+ */
+.macro pixld1_s elem_size, reg1, mem_operand
+.if elem_size == 16
+    asr     TMP1, VX, #16
+    adds    VX, VX, UNIT_X
+    bmi     55f
+5:  subs    VX, VX, SRC_WIDTH_FIXED
+    bpl     5b
+55:
+    add     TMP1, mem_operand, TMP1, lsl #1
+    asr     TMP2, VX, #16
+    adds    VX, VX, UNIT_X
+    bmi     55f
+5:  subs    VX, VX, SRC_WIDTH_FIXED
+    bpl     5b
+55:
+    add     TMP2, mem_operand, TMP2, lsl #1
+    ld1     {v&reg1&.h}[0], [TMP1]
+    asr     TMP1, VX, #16
+    adds    VX, VX, UNIT_X
+    bmi     55f
+5:  subs    VX, VX, SRC_WIDTH_FIXED
+    bpl     5b
+55:
+    add     TMP1, mem_operand, TMP1, lsl #1
+    ld1     {v&reg1&.h}[1], [TMP2]
+    asr     TMP2, VX, #16
+    adds    VX, VX, UNIT_X
+    bmi     55f
+5:  subs    VX, VX, SRC_WIDTH_FIXED
+    bpl     5b
+55:
+    add     TMP2, mem_operand, TMP2, lsl #1
+    ld1     {v&reg1&.h}[2], [TMP1]
+    ld1     {v&reg1&.h}[3], [TMP2]
+.elseif elem_size == 32
+    asr     TMP1, VX, #16
+    adds    VX, VX, UNIT_X
+    bmi     55f
+5:  subs    VX, VX, SRC_WIDTH_FIXED
+    bpl     5b
+55:
+    add     TMP1, mem_operand, TMP1, lsl #2
+    asr     TMP2, VX, #16
+    adds    VX, VX, UNIT_X
+    bmi     55f
+5:  subs    VX, VX, SRC_WIDTH_FIXED
+    bpl     5b
+55:
+    add     TMP2, mem_operand, TMP2, lsl #2
+    ld1     {v&reg1&.s}[0], [TMP1]
+    ld1     {v&reg1&.s}[1], [TMP2]
+.else
+    .error "unsupported"
+.endif
+.endm
+
+.macro pixld2_s elem_size, reg1, reg2, mem_operand
+.if 0 /* elem_size == 32 */
+    mov     TMP1, VX, asr #16
+    add     VX, VX, UNIT_X, asl #1
+    add     TMP1, mem_operand, TMP1, asl #2
+    mov     TMP2, VX, asr #16
+    sub     VX, VX, UNIT_X
+    add     TMP2, mem_operand, TMP2, asl #2
+    ld1     {v&reg1&.s}[0], [TMP1]
+    mov     TMP1, VX, asr #16
+    add     VX, VX, UNIT_X, asl #1
+    add     TMP1, mem_operand, TMP1, asl #2
+    ld1     {v&reg2&.s}[0], [TMP2, :32]
+    mov     TMP2, VX, asr #16
+    add     VX, VX, UNIT_X
+    add     TMP2, mem_operand, TMP2, asl #2
+    ld1     {v&reg1&.s}[1], [TMP1]
+    ld1     {v&reg2&.s}[1], [TMP2]
+.else
+    pixld1_s elem_size, reg1, mem_operand
+    pixld1_s elem_size, reg2, mem_operand
+.endif
+.endm
+
+.macro pixld0_s elem_size, reg1, idx, mem_operand
+.if elem_size == 16
+    asr     TMP1, VX, #16
+    adds    VX, VX, UNIT_X
+    bmi     55f
+5:  subs    VX, VX, SRC_WIDTH_FIXED
+    bpl     5b
+55:
+    add     TMP1, mem_operand, TMP1, lsl #1
+    ld1     {v&reg1&.h}[idx], [TMP1]
+.elseif elem_size == 32
+    asr     DUMMY, VX, #16
+    mov     TMP1, DUMMY
+    adds    VX, VX, UNIT_X
+    bmi     55f
+5:  subs    VX, VX, SRC_WIDTH_FIXED
+    bpl     5b
+55:
+    add     TMP1, mem_operand, TMP1, lsl #2
+    ld1     {v&reg1&.s}[idx], [TMP1]
+.endif
+.endm
+
+.macro pixld_s_internal numbytes, elem_size, basereg, mem_operand
+.if numbytes == 32
+    pixld2_s elem_size, %(basereg+4), %(basereg+5), mem_operand
+    pixld2_s elem_size, %(basereg+6), %(basereg+7), mem_operand
+    pixdeinterleave elem_size, %(basereg+4)
+.elseif numbytes == 16
+    pixld2_s elem_size, %(basereg+2), %(basereg+3), mem_operand
+.elseif numbytes == 8
+    pixld1_s elem_size, %(basereg+1), mem_operand
+.elseif numbytes == 4
+    .if elem_size == 32
+        pixld0_s elem_size, %(basereg+0), 1, mem_operand
+    .elseif elem_size == 16
+        pixld0_s elem_size, %(basereg+0), 2, mem_operand
+        pixld0_s elem_size, %(basereg+0), 3, mem_operand
+    .else
+        pixld0_s elem_size, %(basereg+0), 4, mem_operand
+        pixld0_s elem_size, %(basereg+0), 5, mem_operand
+        pixld0_s elem_size, %(basereg+0), 6, mem_operand
+        pixld0_s elem_size, %(basereg+0), 7, mem_operand
+    .endif
+.elseif numbytes == 2
+    .if elem_size == 16
+        pixld0_s elem_size, %(basereg+0), 1, mem_operand
+    .else
+        pixld0_s elem_size, %(basereg+0), 2, mem_operand
+        pixld0_s elem_size, %(basereg+0), 3, mem_operand
+    .endif
+.elseif numbytes == 1
+    pixld0_s elem_size, %(basereg+0), 1, mem_operand
+.else
+    .error "unsupported size: numbytes"
+.endif
+.endm
+
+.macro pixld_s numpix, bpp, basereg, mem_operand
+.if bpp > 0
+    pixld_s_internal %(numpix * bpp / 8), %(bpp), basereg, mem_operand
+.endif
+.endm
+
+.macro vuzp8 reg1, reg2
+    umov DUMMY, v16.d[0]
+    uzp1 v16.8b,     v&reg1&.8b, v&reg2&.8b
+    uzp2 v&reg2&.8b, v&reg1&.8b, v&reg2&.8b
+    mov  v&reg1&.8b, v16.8b
+    mov  v16.d[0], DUMMY
+.endm
+
+.macro vzip8 reg1, reg2
+    umov DUMMY, v16.d[0]
+    zip1 v16.8b,     v&reg1&.8b, v&reg2&.8b
+    zip2 v&reg2&.8b, v&reg1&.8b, v&reg2&.8b
+    mov  v&reg1&.8b, v16.8b
+    mov  v16.d[0], DUMMY
+.endm
+
+/* deinterleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
+.macro pixdeinterleave bpp, basereg
+.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    vuzp8 %(basereg+0), %(basereg+1)
+    vuzp8 %(basereg+2), %(basereg+3)
+    vuzp8 %(basereg+1), %(basereg+3)
+    vuzp8 %(basereg+0), %(basereg+2)
+.endif
+.endm
+
+/* interleave B, G, R, A channels for eight 32bpp pixels in 4 registers */
+.macro pixinterleave bpp, basereg
+.if (bpp == 32) && (DEINTERLEAVE_32BPP_ENABLED != 0)
+    vzip8 %(basereg+0), %(basereg+2)
+    vzip8 %(basereg+1), %(basereg+3)
+    vzip8 %(basereg+2), %(basereg+3)
+    vzip8 %(basereg+0), %(basereg+1)
+.endif
+.endm
+
+/*
+ * This is a macro for implementing cache preload. The main idea is that
+ * cache preload logic is mostly independent from the rest of pixels
+ * processing code. It starts at the top left pixel and moves forward
+ * across pixels and can jump across scanlines. Prefetch distance is
+ * handled in an 'incremental' way: it starts from 0 and advances to the
+ * optimal distance over time. After reaching optimal prefetch distance,
+ * it is kept constant. There are some checks which prevent prefetching
+ * unneeded pixel lines below the image (but it still can prefetch a bit
+ * more data on the right side of the image - not a big issue and may
+ * be actually helpful when rendering text glyphs). Additional trick is
+ * the use of LDR instruction for prefetch instead of PLD when moving to
+ * the next line, the point is that we have a high chance of getting TLB
+ * miss in this case, and PLD would be useless.
+ *
+ * This sounds like it may introduce a noticeable overhead (when working with
+ * fully cached data). But in reality, due to having a separate pipeline and
+ * instruction queue for NEON unit in ARM Cortex-A8, normal ARM code can
+ * execute simultaneously with NEON and be completely shadowed by it. Thus
+ * we get no performance overhead at all (*). This looks like a very nice
+ * feature of Cortex-A8, if used wisely. We don't have a hardware prefetcher,
+ * but still can implement some rather advanced prefetch logic in software
+ * for almost zero cost!
+ *
+ * (*) The overhead of the prefetcher is visible when running some trivial
+ * pixels processing like simple copy. Anyway, having prefetch is a must
+ * when working with the graphics data.
+ */
+.macro PF a, x:vararg
+.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_ADVANCED)
+    a x
+.endif
+.endm
+
+.macro cache_preload std_increment, boost_increment
+.if (src_bpp_shift >= 0) || (dst_r_bpp != 0) || (mask_bpp_shift >= 0)
+.if std_increment != 0
+    PF add PF_X, PF_X, #std_increment
+.endif
+    PF tst PF_CTL, #0xF
+    PF beq 71f
+    PF add PF_X, PF_X, #boost_increment
+    PF sub PF_CTL, PF_CTL, #1
+71:
+    PF cmp PF_X, ORIG_W
+.if src_bpp_shift >= 0
+    PF lsl DUMMY, PF_X, #src_bpp_shift
+    PF prfm PREFETCH_MODE, [PF_SRC, DUMMY]
+.endif
+.if dst_r_bpp != 0
+    PF lsl DUMMY, PF_X, #dst_bpp_shift
+    PF prfm PREFETCH_MODE, [PF_DST, DUMMY]
+.endif
+.if mask_bpp_shift >= 0
+    PF lsl DUMMY, PF_X, #mask_bpp_shift
+    PF prfm PREFETCH_MODE, [PF_MASK, DUMMY]
+.endif
+    PF ble 71f
+    PF sub PF_X, PF_X, ORIG_W
+    PF subs PF_CTL, PF_CTL, #0x10
+71:
+    PF ble 72f
+.if src_bpp_shift >= 0
+    PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
+    PF ldrsb DUMMY, [PF_SRC, DUMMY]
+    PF add PF_SRC, PF_SRC, #1
+.endif
+.if dst_r_bpp != 0
+    PF lsl DUMMY, DST_STRIDE, #dst_bpp_shift
+    PF ldrsb DUMMY, [PF_DST, DUMMY]
+    PF add PF_DST, PF_DST, #1
+.endif
+.if mask_bpp_shift >= 0
+    PF lsl DUMMY, MASK_STRIDE, #mask_bpp_shift
+    PF ldrsb DUMMY, [PF_MASK, DUMMY]
+    PF add PF_MASK, PF_MASK, #1
+.endif
+72:
+.endif
+.endm
+
+.macro cache_preload_simple
+.if (PREFETCH_TYPE_CURRENT == PREFETCH_TYPE_SIMPLE)
+.if src_bpp > 0
+    prfm PREFETCH_MODE, [SRC, #(PREFETCH_DISTANCE_SIMPLE * src_bpp / 8)]
+.endif
+.if dst_r_bpp > 0
+    prfm PREFETCH_MODE, [DST_R, #(PREFETCH_DISTANCE_SIMPLE * dst_r_bpp / 8)]
+.endif
+.if mask_bpp > 0
+    prfm PREFETCH_MODE, [MASK, #(PREFETCH_DISTANCE_SIMPLE * mask_bpp / 8)]
+.endif
+.endif
+.endm
+
+.macro fetch_mask_pixblock
+    pixld       pixblock_size, mask_bpp, \
+                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+.endm
+
+/*
+ * Macro which is used to process leading pixels until destination
+ * pointer is properly aligned (at 16 bytes boundary). When destination
+ * buffer uses 16bpp format, this is unnecessary, or even pointless.
+ */
+.macro ensure_destination_ptr_alignment process_pixblock_head, \
+                                        process_pixblock_tail, \
+                                        process_pixblock_tail_head
+.if dst_w_bpp != 24
+    tst         DST_R, #0xF
+    beq         52f
+
+.if src_bpp > 0 || mask_bpp > 0 || dst_r_bpp > 0
+.irp lowbit, 1, 2, 4, 8, 16
+local skip1
+.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
+.if lowbit < 16 /* we don't need more than 16-byte alignment */
+    tst         DST_R, #lowbit
+    beq         51f
+.endif
+    pixld_src   (lowbit * 8 / dst_w_bpp), src_bpp, src_basereg, SRC
+    pixld       (lowbit * 8 / dst_w_bpp), mask_bpp, mask_basereg, MASK
+.if dst_r_bpp > 0
+    pixld_a     (lowbit * 8 / dst_r_bpp), dst_r_bpp, dst_r_basereg, DST_R
+.else
+    add         DST_R, DST_R, #lowbit
+.endif
+    PF add      PF_X, PF_X, #(lowbit * 8 / dst_w_bpp)
+    sub         W, W, #(lowbit * 8 / dst_w_bpp)
+51:
+.endif
+.endr
+.endif
+    pixdeinterleave src_bpp, src_basereg
+    pixdeinterleave mask_bpp, mask_basereg
+    pixdeinterleave dst_r_bpp, dst_r_basereg
+
+    process_pixblock_head
+    cache_preload 0, pixblock_size
+    cache_preload_simple
+    process_pixblock_tail
+
+    pixinterleave dst_w_bpp, dst_w_basereg
+
+.irp lowbit, 1, 2, 4, 8, 16
+.if (dst_w_bpp <= (lowbit * 8)) && ((lowbit * 8) < (pixblock_size * dst_w_bpp))
+.if lowbit < 16 /* we don't need more than 16-byte alignment */
+    tst         DST_W, #lowbit
+    beq         51f
+.endif
+.if src_bpp == 0 && mask_bpp == 0 && dst_r_bpp == 0
+    sub         W, W, #(lowbit * 8 / dst_w_bpp)
+.endif
+    pixst_a     (lowbit * 8 / dst_w_bpp), dst_w_bpp, dst_w_basereg, DST_W
+51:
+.endif
+.endr
+.endif
+52:
+.endm
+
+/*
+ * Special code for processing up to (pixblock_size - 1) remaining
+ * trailing pixels. As SIMD processing performs operation on
+ * pixblock_size pixels, anything smaller than this has to be loaded
+ * and stored in a special way. Loading and storing of pixel data is
+ * performed in such a way that we fill some 'slots' in the NEON
+ * registers (some slots naturally are unused), then perform compositing
+ * operation as usual. In the end, the data is taken from these 'slots'
+ * and saved to memory.
+ *
+ * cache_preload_flag - allows to suppress prefetch if
+ *                      set to 0
+ * dst_aligned_flag   - selects whether destination buffer
+ *                      is aligned
+ */
+.macro process_trailing_pixels cache_preload_flag, \
+                               dst_aligned_flag, \
+                               process_pixblock_head, \
+                               process_pixblock_tail, \
+                               process_pixblock_tail_head
+    tst         W, #(pixblock_size - 1)
+    beq         52f
+.if src_bpp > 0 || mask_bpp > 0 || dst_r_bpp > 0
+.irp chunk_size, 16, 8, 4, 2, 1
+.if pixblock_size > chunk_size
+    tst         W, #chunk_size
+    beq         51f
+    pixld_src   chunk_size, src_bpp, src_basereg, SRC
+    pixld       chunk_size, mask_bpp, mask_basereg, MASK
+.if dst_aligned_flag != 0
+    pixld_a     chunk_size, dst_r_bpp, dst_r_basereg, DST_R
+.else
+    pixld       chunk_size, dst_r_bpp, dst_r_basereg, DST_R
+.endif
+.if cache_preload_flag != 0
+    PF add      PF_X, PF_X, #chunk_size
+.endif
+51:
+.endif
+.endr
+.endif
+    pixdeinterleave src_bpp, src_basereg
+    pixdeinterleave mask_bpp, mask_basereg
+    pixdeinterleave dst_r_bpp, dst_r_basereg
+
+    process_pixblock_head
+.if cache_preload_flag != 0
+    cache_preload 0, pixblock_size
+    cache_preload_simple
+.endif
+    process_pixblock_tail
+    pixinterleave dst_w_bpp, dst_w_basereg
+.irp chunk_size, 16, 8, 4, 2, 1
+.if pixblock_size > chunk_size
+    tst         W, #chunk_size
+    beq         51f
+.if dst_aligned_flag != 0
+    pixst_a     chunk_size, dst_w_bpp, dst_w_basereg, DST_W
+.else
+    pixst       chunk_size, dst_w_bpp, dst_w_basereg, DST_W
+.endif
+51:
+.endif
+.endr
+52:
+.endm
+
+/*
+ * Macro, which performs all the needed operations to switch to the next
+ * scanline and start the next loop iteration unless all the scanlines
+ * are already processed.
+ */
+.macro advance_to_next_scanline start_of_loop_label
+    mov         W, ORIG_W
+    add         DST_W, DST_W, DST_STRIDE, lsl #dst_bpp_shift
+.if src_bpp != 0
+    add         SRC, SRC, SRC_STRIDE, lsl #src_bpp_shift
+.endif
+.if mask_bpp != 0
+    add         MASK, MASK, MASK_STRIDE, lsl #mask_bpp_shift
+.endif
+.if (dst_w_bpp != 24)
+    sub         DST_W, DST_W, W, lsl #dst_bpp_shift
+.endif
+.if (src_bpp != 24) && (src_bpp != 0)
+    sub         SRC, SRC, W, lsl #src_bpp_shift
+.endif
+.if (mask_bpp != 24) && (mask_bpp != 0)
+    sub         MASK, MASK, W, lsl #mask_bpp_shift
+.endif
+    subs        H, H, #1
+    mov         DST_R, DST_W
+    bge         start_of_loop_label
+.endm
+
+/*
+ * Registers are allocated in the following way by default:
+ * v0, v1, v2, v3     - reserved for loading source pixel data
+ * v4, v5, v6, v7     - reserved for loading destination pixel data
+ * v24, v25, v26, v27 - reserved for loading mask pixel data
+ * v28, v29, v30, v31 - final destination pixel data for writeback to memory
+ */
+.macro generate_composite_function fname, \
+                                   src_bpp_, \
+                                   mask_bpp_, \
+                                   dst_w_bpp_, \
+                                   flags, \
+                                   pixblock_size_, \
+                                   prefetch_distance, \
+                                   init, \
+                                   cleanup, \
+                                   process_pixblock_head, \
+                                   process_pixblock_tail, \
+                                   process_pixblock_tail_head, \
+                                   dst_w_basereg_ = 28, \
+                                   dst_r_basereg_ = 4, \
+                                   src_basereg_   = 0, \
+                                   mask_basereg_  = 24
+
+    pixman_asm_function fname
+    stp         x29, x30, [sp, -16]!
+    mov         x29, sp
+    sub         sp,   sp, 232  /* push all registers */
+    sub         x29, x29, 64
+    st1         {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], #32
+    st1         {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], #32
+    stp          x8,   x9, [x29, -80]
+    stp         x10,  x11, [x29, -96]
+    stp         x12,  x13, [x29, -112]
+    stp         x14,  x15, [x29, -128]
+    stp         x16,  x17, [x29, -144]
+    stp         x18,  x19, [x29, -160]
+    stp         x20,  x21, [x29, -176]
+    stp         x22,  x23, [x29, -192]
+    stp         x24,  x25, [x29, -208]
+    stp         x26,  x27, [x29, -224]
+    str         x28, [x29, -232]
+
+/*
+ * Select prefetch type for this function. If prefetch distance is
+ * set to 0 or one of the color formats is 24bpp, SIMPLE prefetch
+ * has to be used instead of ADVANCED.
+ */
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_DEFAULT
+.if prefetch_distance == 0
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
+.elseif (PREFETCH_TYPE_CURRENT > PREFETCH_TYPE_SIMPLE) && \
+        ((src_bpp_ == 24) || (mask_bpp_ == 24) || (dst_w_bpp_ == 24))
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_SIMPLE
+.endif
+
+/*
+ * Make some macro arguments globally visible and accessible
+ * from other macros
+ */
+    .set src_bpp, src_bpp_
+    .set mask_bpp, mask_bpp_
+    .set dst_w_bpp, dst_w_bpp_
+    .set pixblock_size, pixblock_size_
+    .set dst_w_basereg, dst_w_basereg_
+    .set dst_r_basereg, dst_r_basereg_
+    .set src_basereg, src_basereg_
+    .set mask_basereg, mask_basereg_
+
+    .macro pixld_src x:vararg
+        pixld x
+    .endm
+    .macro fetch_src_pixblock
+        pixld_src   pixblock_size, src_bpp, \
+                    (src_basereg - pixblock_size * src_bpp / 64), SRC
+    .endm
+/*
+ * Assign symbolic names to registers
+ */
+    W           .req       x0      /* width (is updated during processing) */
+    H           .req       x1      /* height (is updated during processing) */
+    DST_W       .req       x2      /* destination buffer pointer for writes */
+    DST_STRIDE  .req       x3      /* destination image stride */
+    SRC         .req       x4      /* source buffer pointer */
+    SRC_STRIDE  .req       x5      /* source image stride */
+    MASK        .req       x6      /* mask pointer */
+    MASK_STRIDE .req       x7      /* mask stride */
+
+    DST_R       .req       x8      /* destination buffer pointer for reads */
+
+    PF_CTL      .req       x9      /* combined lines counter and prefetch */
+                                    /* distance increment counter */
+    PF_X        .req       x10     /* pixel index in a scanline for current */
+                                    /* pretetch position */
+    PF_SRC      .req       x11     /* pointer to source scanline start */
+                                    /* for prefetch purposes */
+    PF_DST      .req       x12     /* pointer to destination scanline start */
+                                    /* for prefetch purposes */
+    PF_MASK     .req       x13     /* pointer to mask scanline start */
+                                    /* for prefetch purposes */
+
+    ORIG_W      .req       x14     /* saved original width */
+    DUMMY       .req       x15     /* temporary register */
+
+    sxtw        x0, w0
+    sxtw        x1, w1
+    sxtw        x3, w3
+    sxtw        x5, w5
+    sxtw        x7, w7
+
+    .set mask_bpp_shift, -1
+.if src_bpp == 32
+    .set src_bpp_shift, 2
+.elseif src_bpp == 24
+    .set src_bpp_shift, 0
+.elseif src_bpp == 16
+    .set src_bpp_shift, 1
+.elseif src_bpp == 8
+    .set src_bpp_shift, 0
+.elseif src_bpp == 0
+    .set src_bpp_shift, -1
+.else
+    .error "requested src bpp (src_bpp) is not supported"
+.endif
+.if mask_bpp == 32
+    .set mask_bpp_shift, 2
+.elseif mask_bpp == 24
+    .set mask_bpp_shift, 0
+.elseif mask_bpp == 8
+    .set mask_bpp_shift, 0
+.elseif mask_bpp == 0
+    .set mask_bpp_shift, -1
+.else
+    .error "requested mask bpp (mask_bpp) is not supported"
+.endif
+.if dst_w_bpp == 32
+    .set dst_bpp_shift, 2
+.elseif dst_w_bpp == 24
+    .set dst_bpp_shift, 0
+.elseif dst_w_bpp == 16
+    .set dst_bpp_shift, 1
+.elseif dst_w_bpp == 8
+    .set dst_bpp_shift, 0
+.else
+    .error "requested dst bpp (dst_w_bpp) is not supported"
+.endif
+
+.if (((flags) & FLAG_DST_READWRITE) != 0)
+    .set dst_r_bpp, dst_w_bpp
+.else
+    .set dst_r_bpp, 0
+.endif
+.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
+    .set DEINTERLEAVE_32BPP_ENABLED, 1
+.else
+    .set DEINTERLEAVE_32BPP_ENABLED, 0
+.endif
+
+.if prefetch_distance < 0 || prefetch_distance > 15
+    .error "invalid prefetch distance (prefetch_distance)"
+.endif
+
+    PF mov      PF_X, #0
+    mov         DST_R, DST_W
+
+.if src_bpp == 24
+    sub         SRC_STRIDE, SRC_STRIDE, W
+    sub         SRC_STRIDE, SRC_STRIDE, W, lsl #1
+.endif
+.if mask_bpp == 24
+    sub         MASK_STRIDE, MASK_STRIDE, W
+    sub         MASK_STRIDE, MASK_STRIDE, W, lsl #1
+.endif
+.if dst_w_bpp == 24
+    sub         DST_STRIDE, DST_STRIDE, W
+    sub         DST_STRIDE, DST_STRIDE, W, lsl #1
+.endif
+
+/*
+ * Setup advanced prefetcher initial state
+ */
+    PF mov      PF_SRC, SRC
+    PF mov      PF_DST, DST_R
+    PF mov      PF_MASK, MASK
+    /* PF_CTL = prefetch_distance | ((h - 1) << 4) */
+    PF lsl      DUMMY, H, #4
+    PF mov      PF_CTL, DUMMY
+    PF add      PF_CTL, PF_CTL, #(prefetch_distance - 0x10)
+
+    init
+    subs        H, H, #1
+    mov         ORIG_W, W
+    blt         9f
+    cmp         W, #(pixblock_size * 2)
+    blt         800f
+/*
+ * This is the start of the pipelined loop, which if optimized for
+ * long scanlines
+ */
+0:
+    ensure_destination_ptr_alignment process_pixblock_head, \
+                                     process_pixblock_tail, \
+                                     process_pixblock_tail_head
+
+    /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
+    pixld_a     pixblock_size, dst_r_bpp, \
+                (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+    fetch_src_pixblock
+    pixld       pixblock_size, mask_bpp, \
+                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+    PF add      PF_X, PF_X, #pixblock_size
+    process_pixblock_head
+    cache_preload 0, pixblock_size
+    cache_preload_simple
+    subs        W, W, #(pixblock_size * 2)
+    blt         200f
+
+100:
+    process_pixblock_tail_head
+    cache_preload_simple
+    subs        W, W, #pixblock_size
+    bge         100b
+
+200:
+    process_pixblock_tail
+    pixst_a     pixblock_size, dst_w_bpp, \
+                (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+
+    /* Process the remaining trailing pixels in the scanline */
+    process_trailing_pixels 1, 1, \
+                            process_pixblock_head, \
+                            process_pixblock_tail, \
+                            process_pixblock_tail_head
+    advance_to_next_scanline 0b
+
+    cleanup
+1000:
+    /* pop all registers */
+    sub         x29, x29, 64
+    ld1         {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    ld1         {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    ldp          x8,   x9, [x29, -80]
+    ldp         x10,  x11, [x29, -96]
+    ldp         x12,  x13, [x29, -112]
+    ldp         x14,  x15, [x29, -128]
+    ldp         x16,  x17, [x29, -144]
+    ldp         x18,  x19, [x29, -160]
+    ldp         x20,  x21, [x29, -176]
+    ldp         x22,  x23, [x29, -192]
+    ldp         x24,  x25, [x29, -208]
+    ldp         x26,  x27, [x29, -224]
+    ldr         x28, [x29, -232]
+    mov         sp, x29
+    ldp         x29, x30, [sp], 16
+    ret  /* exit */
+/*
+ * This is the start of the loop, designed to process images with small width
+ * (less than pixblock_size * 2 pixels). In this case neither pipelining
+ * nor prefetch are used.
+ */
+800:
+.if src_bpp_shift >= 0
+    PF lsl DUMMY, SRC_STRIDE, #src_bpp_shift
+    PF prfm PREFETCH_MODE, [SRC, DUMMY]
+.endif
+.if dst_r_bpp != 0
+    PF lsl  DUMMY, DST_STRIDE, #dst_bpp_shift
+    PF prfm PREFETCH_MODE, [DST_R, DUMMY]
+.endif
+.if mask_bpp_shift >= 0
+    PF lsl  DUMMY, MASK_STRIDE, #mask_bpp_shift
+    PF prfm PREFETCH_MODE, [MASK, DUMMY]
+.endif
+    /* Process exactly pixblock_size pixels if needed */
+    tst         W, #pixblock_size
+    beq         100f
+    pixld       pixblock_size, dst_r_bpp, \
+                (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+    fetch_src_pixblock
+    pixld       pixblock_size, mask_bpp, \
+                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+    process_pixblock_head
+    process_pixblock_tail
+    pixst       pixblock_size, dst_w_bpp, \
+                (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+100:
+    /* Process the remaining trailing pixels in the scanline */
+    process_trailing_pixels 0, 0, \
+                            process_pixblock_head, \
+                            process_pixblock_tail, \
+                            process_pixblock_tail_head
+    advance_to_next_scanline 800b
+9:
+    cleanup
+    /* pop all registers */
+    sub         x29, x29, 64
+    ld1         {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    ld1         {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    ldp          x8,   x9, [x29, -80]
+    ldp         x10,  x11, [x29, -96]
+    ldp         x12,  x13, [x29, -112]
+    ldp         x14,  x15, [x29, -128]
+    ldp         x16,  x17, [x29, -144]
+    ldp         x18,  x19, [x29, -160]
+    ldp         x20,  x21, [x29, -176]
+    ldp         x22,  x23, [x29, -192]
+    ldp         x24,  x25, [x29, -208]
+    ldp         x26,  x27, [x29, -224]
+    ldr         x28, [x29, -232]
+    mov         sp, x29
+    ldp         x29, x30, [sp], 16
+    ret  /* exit */
+
+    .purgem     fetch_src_pixblock
+    .purgem     pixld_src
+
+    .unreq      SRC
+    .unreq      MASK
+    .unreq      DST_R
+    .unreq      DST_W
+    .unreq      ORIG_W
+    .unreq      W
+    .unreq      H
+    .unreq      SRC_STRIDE
+    .unreq      DST_STRIDE
+    .unreq      MASK_STRIDE
+    .unreq      PF_CTL
+    .unreq      PF_X
+    .unreq      PF_SRC
+    .unreq      PF_DST
+    .unreq      PF_MASK
+    .unreq      DUMMY
+    .endfunc
+.endm
+
+/*
+ * A simplified variant of function generation template for a single
+ * scanline processing (for implementing pixman combine functions)
+ */
+.macro generate_composite_function_scanline        use_nearest_scaling, \
+                                                   fname, \
+                                                   src_bpp_, \
+                                                   mask_bpp_, \
+                                                   dst_w_bpp_, \
+                                                   flags, \
+                                                   pixblock_size_, \
+                                                   init, \
+                                                   cleanup, \
+                                                   process_pixblock_head, \
+                                                   process_pixblock_tail, \
+                                                   process_pixblock_tail_head, \
+                                                   dst_w_basereg_ = 28, \
+                                                   dst_r_basereg_ = 4, \
+                                                   src_basereg_   = 0, \
+                                                   mask_basereg_  = 24
+
+    pixman_asm_function fname
+    .set PREFETCH_TYPE_CURRENT, PREFETCH_TYPE_NONE
+
+/*
+ * Make some macro arguments globally visible and accessible
+ * from other macros
+ */
+    .set src_bpp, src_bpp_
+    .set mask_bpp, mask_bpp_
+    .set dst_w_bpp, dst_w_bpp_
+    .set pixblock_size, pixblock_size_
+    .set dst_w_basereg, dst_w_basereg_
+    .set dst_r_basereg, dst_r_basereg_
+    .set src_basereg, src_basereg_
+    .set mask_basereg, mask_basereg_
+
+.if use_nearest_scaling != 0
+    /*
+     * Assign symbolic names to registers for nearest scaling
+     */
+    W           .req        x0
+    DST_W       .req        x1
+    SRC         .req        x2
+    VX          .req        x3
+    UNIT_X      .req        x4
+    SRC_WIDTH_FIXED .req    x5
+    MASK        .req        x6
+    TMP1        .req        x8
+    TMP2        .req        x9
+    DST_R       .req        x10
+    DUMMY       .req        x30
+
+    .macro pixld_src x:vararg
+        pixld_s x
+    .endm
+
+    sxtw        x0, w0
+    sxtw        x3, w3
+    sxtw        x4, w4
+    sxtw        x5, w5
+
+    stp         x29, x30, [sp, -16]!
+    mov         x29, sp
+    sub         sp, sp, 88
+    sub         x29, x29, 64
+    st1         {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    st1         {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    stp         x8, x9, [x29, -80]
+    str         x10, [x29, -88]
+.else
+    /*
+     * Assign symbolic names to registers
+     */
+    W           .req        x0      /* width (is updated during processing) */
+    DST_W       .req        x1      /* destination buffer pointer for writes */
+    SRC         .req        x2      /* source buffer pointer */
+    MASK        .req        x3      /* mask pointer */
+    DST_R       .req        x4      /* destination buffer pointer for reads */
+    DUMMY       .req        x30
+
+    .macro pixld_src x:vararg
+        pixld x
+    .endm
+
+    sxtw        x0, w0
+
+    stp         x29, x30, [sp, -16]!
+    mov         x29, sp
+    sub         sp, sp, 64
+    sub         x29, x29, 64
+    st1         {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    st1         {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+.endif
+
+.if (((flags) & FLAG_DST_READWRITE) != 0)
+    .set dst_r_bpp, dst_w_bpp
+.else
+    .set dst_r_bpp, 0
+.endif
+.if (((flags) & FLAG_DEINTERLEAVE_32BPP) != 0)
+    .set DEINTERLEAVE_32BPP_ENABLED, 1
+.else
+    .set DEINTERLEAVE_32BPP_ENABLED, 0
+.endif
+
+    .macro fetch_src_pixblock
+        pixld_src   pixblock_size, src_bpp, \
+                    (src_basereg - pixblock_size * src_bpp / 64), SRC
+    .endm
+
+    init
+    mov         DST_R, DST_W
+
+    cmp         W, #pixblock_size
+    blt         800f
+
+    ensure_destination_ptr_alignment process_pixblock_head, \
+                                     process_pixblock_tail, \
+                                     process_pixblock_tail_head
+
+    subs        W, W, #pixblock_size
+    blt         700f
+
+    /* Implement "head (tail_head) ... (tail_head) tail" loop pattern */
+    pixld_a     pixblock_size, dst_r_bpp, \
+                (dst_r_basereg - pixblock_size * dst_r_bpp / 64), DST_R
+    fetch_src_pixblock
+    pixld       pixblock_size, mask_bpp, \
+                (mask_basereg - pixblock_size * mask_bpp / 64), MASK
+    process_pixblock_head
+    subs        W, W, #pixblock_size
+    blt         200f
+100:
+    process_pixblock_tail_head
+    subs        W, W, #pixblock_size
+    bge         100b
+200:
+    process_pixblock_tail
+    pixst_a     pixblock_size, dst_w_bpp, \
+                (dst_w_basereg - pixblock_size * dst_w_bpp / 64), DST_W
+700:
+    /* Process the remaining trailing pixels in the scanline (dst aligned) */
+    process_trailing_pixels 0, 1, \
+                            process_pixblock_head, \
+                            process_pixblock_tail, \
+                            process_pixblock_tail_head
+
+    cleanup
+.if use_nearest_scaling != 0
+    sub         x29, x29, 64
+    ld1         {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    ld1         {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    ldp         x8, x9, [x29, -80]
+    ldr         x10, [x29, -96]
+    mov         sp, x29
+    ldp         x29, x30, [sp], 16
+    ret  /* exit */
+.else
+    sub         x29, x29, 64
+    ld1         {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    ld1         {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    mov         sp, x29
+    ldp         x29, x30, [sp], 16
+    ret  /* exit */
+.endif
+800:
+    /* Process the remaining trailing pixels in the scanline (dst unaligned) */
+    process_trailing_pixels 0, 0, \
+                            process_pixblock_head, \
+                            process_pixblock_tail, \
+                            process_pixblock_tail_head
+
+    cleanup
+.if use_nearest_scaling != 0
+    sub         x29, x29, 64
+    ld1         {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    ld1         {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    ldp         x8, x9, [x29, -80]
+    ldr         x10, [x29, -88]
+    mov         sp, x29
+    ldp         x29, x30, [sp], 16
+    ret  /* exit */
+
+    .unreq      DUMMY
+    .unreq      DST_R
+    .unreq      SRC
+    .unreq      W
+    .unreq      VX
+    .unreq      UNIT_X
+    .unreq      TMP1
+    .unreq      TMP2
+    .unreq      DST_W
+    .unreq      MASK
+    .unreq      SRC_WIDTH_FIXED
+
+.else
+    sub         x29, x29, 64
+    ld1         {v8.8b, v9.8b, v10.8b, v11.8b}, [x29], 32
+    ld1         {v12.8b, v13.8b, v14.8b, v15.8b}, [x29], 32
+    mov          sp, x29
+    ldp          x29, x30, [sp], 16
+    ret  /* exit */
+
+    .unreq      DUMMY
+    .unreq      SRC
+    .unreq      MASK
+    .unreq      DST_R
+    .unreq      DST_W
+    .unreq      W
+.endif
+
+    .purgem     fetch_src_pixblock
+    .purgem     pixld_src
+
+    .endfunc
+.endm
+
+.macro generate_composite_function_single_scanline x:vararg
+    generate_composite_function_scanline 0, x
+.endm
+
+.macro generate_composite_function_nearest_scanline x:vararg
+    generate_composite_function_scanline 1, x
+.endm
+
+/* Default prologue/epilogue, nothing special needs to be done */
+
+.macro default_init
+.endm
+
+.macro default_cleanup
+.endm
+
+/*
+ * Prologue/epilogue variant which additionally saves/restores v8-v15
+ * registers (they need to be saved/restored by callee according to ABI).
+ * This is required if the code needs to use all the NEON registers.
+ */
+
+.macro default_init_need_all_regs
+.endm
+
+.macro default_cleanup_need_all_regs
+.endm
+
+/******************************************************************************/
+
+/*
+ * Conversion of 8 r5g6b6 pixels packed in 128-bit register (in)
+ * into a planar a8r8g8b8 format (with a, r, g, b color components
+ * stored into 64-bit registers out_a, out_r, out_g, out_b respectively).
+ *
+ * Warning: the conversion is destructive and the original
+ *          value (in) is lost.
+ */
+.macro convert_0565_to_8888 in, out_a, out_r, out_g, out_b
+    shrn        &out_r&.8b, &in&.8h,    #8
+    shrn        &out_g&.8b, &in&.8h,    #3
+    sli         &in&.8h,    &in&.8h,    #5
+    movi        &out_a&.8b, #255
+    sri         &out_r&.8b, &out_r&.8b, #5
+    sri         &out_g&.8b, &out_g&.8b, #6
+    shrn        &out_b&.8b, &in&.8h,    #2
+.endm
+
+.macro convert_0565_to_x888 in, out_r, out_g, out_b
+    shrn        &out_r&.8b, &in&.8h,    #8
+    shrn        &out_g&.8b, &in&.8h,    #3
+    sli         &in&.8h,    &in&.8h,    #5
+    sri         &out_r&.8b, &out_r&.8b, #5
+    sri         &out_g&.8b, &out_g&.8b, #6
+    shrn        &out_b&.8b, &in&.8h,    #2
+.endm
+
+/*
+ * Conversion from planar a8r8g8b8 format (with a, r, g, b color components
+ * in 64-bit registers in_a, in_r, in_g, in_b respectively) into 8 r5g6b6
+ * pixels packed in 128-bit register (out). Requires two temporary 128-bit
+ * registers (tmp1, tmp2)
+ */
+.macro convert_8888_to_0565 in_r, in_g, in_b, out, tmp1, tmp2
+    ushll       &tmp1&.8h, &in_g&.8b, #7
+    shl         &tmp1&.8h, &tmp1&.8h, #1
+    ushll       &out&.8h,  &in_r&.8b, #7
+    shl         &out&.8h,  &out&.8h,  #1
+    ushll       &tmp2&.8h, &in_b&.8b, #7
+    shl         &tmp2&.8h, &tmp2&.8h, #1
+    sri         &out&.8h, &tmp1&.8h, #5
+    sri         &out&.8h, &tmp2&.8h, #11
+.endm
+
+/*
+ * Conversion of four r5g6b5 pixels (in) to four x8r8g8b8 pixels
+ * returned in (out0, out1) registers pair. Requires one temporary
+ * 64-bit register (tmp). 'out1' and 'in' may overlap, the original
+ * value from 'in' is lost
+ */
+.macro convert_four_0565_to_x888_packed in, out0, out1, tmp
+    shl         &out0&.4h, &in&.4h,   #5  /* G top 6 bits */
+    shl         &tmp&.4h,  &in&.4h,   #11 /* B top 5 bits */
+    sri         &in&.4h,   &in&.4h,   #5  /* R is ready in top bits */
+    sri         &out0&.4h, &out0&.4h, #6  /* G is ready in top bits */
+    sri         &tmp&.4h,  &tmp&.4h,  #5  /* B is ready in top bits */
+    ushr        &out1&.4h, &in&.4h,   #8  /* R is in place */
+    sri         &out0&.4h, &tmp&.4h,  #8  /* G & B is in place */
+    zip1        &tmp&.4h,  &out0&.4h, &out1&.4h  /* everything is in place */
+    zip2        &out1&.4h, &out0&.4h, &out1&.4h
+    mov         &out0&.d[0], &tmp&.d[0]
+.endm

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-bits-image.c
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-bits-image.c	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-bits-image.c	2022-10-18 21:13:29 UTC (rev 64747)
@@ -209,15 +209,15 @@
 {
     uint32_t *ret = p;
 
-    satot = (satot + 0x8000) >> 16;
-    srtot = (srtot + 0x8000) >> 16;
-    sgtot = (sgtot + 0x8000) >> 16;
-    sbtot = (sbtot + 0x8000) >> 16;
+    satot = (int32_t)(satot + 0x8000) / 65536;
+    srtot = (int32_t)(srtot + 0x8000) / 65536;
+    sgtot = (int32_t)(sgtot + 0x8000) / 65536;
+    sbtot = (int32_t)(sbtot + 0x8000) / 65536;
 
-    satot = CLIP (satot, 0, 0xff);
-    srtot = CLIP (srtot, 0, 0xff);
-    sgtot = CLIP (sgtot, 0, 0xff);
-    sbtot = CLIP (sbtot, 0, 0xff);
+    satot = CLIP ((int32_t)satot, 0, 0xff);
+    srtot = CLIP ((int32_t)srtot, 0, 0xff);
+    sgtot = CLIP ((int32_t)sgtot, 0, 0xff);
+    sbtot = CLIP ((int32_t)sbtot, 0, 0xff);
 
     *ret = ((satot << 24) | (srtot << 16) | (sgtot <<  8) | (sbtot));
 }
@@ -240,10 +240,10 @@
 {
     argb_t *ret = p;
 
-    ret->a = CLIP (satot / 65536.f, 0.f, 1.f);
-    ret->r = CLIP (srtot / 65536.f, 0.f, 1.f);
-    ret->g = CLIP (sgtot / 65536.f, 0.f, 1.f);
-    ret->b = CLIP (sbtot / 65536.f, 0.f, 1.f);
+    ret->a = CLIP ((int32_t)satot / 65536.f, 0.f, 1.f);
+    ret->r = CLIP ((int32_t)srtot / 65536.f, 0.f, 1.f);
+    ret->g = CLIP ((int32_t)sgtot / 65536.f, 0.f, 1.f);
+    ret->b = CLIP ((int32_t)sbtot / 65536.f, 0.f, 1.f);
 }
 
 typedef void (* accumulate_pixel_t) (unsigned int *satot, unsigned int *srtot,
@@ -482,6 +482,7 @@
     int             width  = iter->width;
     uint32_t *      buffer = iter->buffer;
 
+    const uint32_t wide_zero[4] = {0};
     pixman_fixed_t x, y;
     pixman_fixed_t ux, uy;
     pixman_vector_t v;
@@ -513,7 +514,8 @@
 
     for (i = 0; i < width; ++i)
     {
-	if (!mask || mask[i])
+	if (!mask || (!wide && mask[i]) ||
+	    (wide && memcmp(&mask[4 * i], wide_zero, 16) != 0))
 	{
 	    bits_image_fetch_pixel_filtered (
 		&image->bits, wide, x, y, get_pixel, buffer);
@@ -636,6 +638,7 @@
     get_pixel_t     get_pixel =
 	wide ? fetch_pixel_general_float : fetch_pixel_general_32;
 
+    const uint32_t wide_zero[4] = {0};
     pixman_fixed_t x, y, w;
     pixman_fixed_t ux, uy, uw;
     pixman_vector_t v;
@@ -670,7 +673,8 @@
     {
 	pixman_fixed_t x0, y0;
 
-	if (!mask || mask[i])
+	if (!mask || (!wide && mask[i]) ||
+	    (wide && memcmp(&mask[4 * i], wide_zero, 16) != 0))
 	{
 	    if (w != 0)
 	    {
@@ -1051,7 +1055,7 @@
     iter->y++;
 }
 
-static const float
+static float
 dither_factor_blue_noise_64 (int x, int y)
 {
     float m = dither_blue_noise_64x64[((y & 0x3f) << 6) | (x & 0x3f)];
@@ -1058,7 +1062,7 @@
     return m * (1. / 4096.f) + (1. / 8192.f);
 }
 
-static const float
+static float
 dither_factor_bayer_8 (int x, int y)
 {
     uint32_t m;

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-compiler.h
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-compiler.h	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-compiler.h	2022-10-18 21:13:29 UTC (rev 64747)
@@ -109,7 +109,7 @@
 #if defined(PIXMAN_NO_TLS)
 
 #   define PIXMAN_DEFINE_THREAD_LOCAL(type, name)			\
-    static type name
+    static type name;
 #   define PIXMAN_GET_THREAD_LOCAL(name)				\
     (&name)
 
@@ -116,7 +116,7 @@
 #elif defined(TLS)
 
 #   define PIXMAN_DEFINE_THREAD_LOCAL(type, name)			\
-    static TLS type name
+    static TLS type name;
 #   define PIXMAN_GET_THREAD_LOCAL(name)				\
     (&name)
 
@@ -176,7 +176,7 @@
 #elif defined(_MSC_VER)
 
 #   define PIXMAN_DEFINE_THREAD_LOCAL(type, name)			\
-    static __declspec(thread) type name
+    static __declspec(thread) type name;
 #   define PIXMAN_GET_THREAD_LOCAL(name)				\
     (&name)
 

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-fast-path.c
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-fast-path.c	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-fast-path.c	2022-10-18 21:13:29 UTC (rev 64747)
@@ -2836,7 +2836,11 @@
 	sgtot = CLIP (sgtot, 0, 0xff);
 	sbtot = CLIP (sbtot, 0, 0xff);
 
+#ifdef WORDS_BIGENDIAN
+	buffer[k] = (satot << 0) | (srtot << 8) | (sgtot << 16) | (sbtot << 24);
+#else
 	buffer[k] = (satot << 24) | (srtot << 16) | (sgtot << 8) | (sbtot << 0);
+#endif
 
     next:
 	vx += ux;

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-filter.c
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-filter.c	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-filter.c	2022-10-18 21:13:29 UTC (rev 64747)
@@ -237,11 +237,14 @@
 		  pixman_kernel_t  sample,
 		  double           scale,
 		  int              n_phases,
-		  pixman_fixed_t *p)
+		  pixman_fixed_t *pstart,
+		  pixman_fixed_t *pend
+		  )
 {
+    pixman_fixed_t *p = pstart;
     double step;
     int i;
-
+    if(width <= 0) return;
     step = 1.0 / n_phases;
 
     for (i = 0; i < n_phases; ++i)
@@ -258,7 +261,7 @@
 
 	x1 = ceil (frac - width / 2.0 - 0.5);
 	x2 = x1 + width;
-
+    assert( p >= pstart && p + (x2 - x1) <= pend ); /* assert validity of the following loop */
 	total = 0;
         for (x = x1; x < x2; ++x)
         {
@@ -287,8 +290,10 @@
 
 	/* Normalize, with error diffusion */
 	p -= width;
-        total = 65536.0 / total;
-        new_total = 0;
+	assert(p >= pstart && p + (x2 - x1) <= pend); /* assert validity of the following loop */
+
+    total = 65536.0 / total;
+    new_total = 0;
 	e = 0.0;
 	for (x = x1; x < x2; ++x)
 	{
@@ -304,6 +309,8 @@
 	 * at the first sample, since that is the only one that
 	 * hasn't had any error diffused into it.
 	 */
+
+	assert(p - width >= pstart && p - width < pend); /* assert... */
 	*(p - width) += pixman_fixed_1 - new_total;
     }
 }
@@ -465,10 +472,16 @@
     params[2] = pixman_int_to_fixed (subsample_bits_x);
     params[3] = pixman_int_to_fixed (subsample_bits_y);
 
-    create_1d_filter (width, reconstruct_x, sample_x, sx, subsample_x,
-		      params + 4);
-    create_1d_filter (height, reconstruct_y, sample_y, sy, subsample_y,
-		      params + 4 + width * subsample_x);
+    {
+        pixman_fixed_t
+            *xparams = params+4,
+            *yparams = xparams + width*subsample_x,
+            *endparams = params + *n_values;
+        create_1d_filter(width, reconstruct_x, sample_x, sx, subsample_x,
+                         xparams, yparams);
+        create_1d_filter(height, reconstruct_y, sample_y, sy, subsample_y,
+                         yparams, endparams);
+    }
 
 #ifdef PIXMAN_GNUPLOT
     gnuplot_filter(width, subsample_x, params + 4);

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-implementation.c
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-implementation.c	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-implementation.c	2022-10-18 21:13:29 UTC (rev 64747)
@@ -63,7 +63,7 @@
     } cache [N_CACHED_FAST_PATHS];
 } cache_t;
 
-PIXMAN_DEFINE_THREAD_LOCAL (cache_t, fast_path_cache);
+PIXMAN_DEFINE_THREAD_LOCAL (cache_t, fast_path_cache)
 
 static void
 dummy_composite_rect (pixman_implementation_t *imp,

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-ppc.c
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-ppc.c	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-ppc.c	2022-10-18 21:13:29 UTC (rev 64747)
@@ -68,6 +68,24 @@
     return have_vmx;
 }
 
+#elif defined (__FreeBSD__)
+#include <machine/cpu.h>
+#include <sys/auxv.h>
+
+static pixman_bool_t
+pixman_have_vmx (void)
+{
+
+    unsigned long cpufeatures;
+    int have_vmx;
+
+    if (elf_aux_info(AT_HWCAP, &cpufeatures, sizeof(cpufeatures)))
+    return FALSE;
+
+    have_vmx = cpufeatures & PPC_FEATURE_HAS_ALTIVEC;
+    return have_vmx;
+}
+
 #elif defined (__linux__)
 
 #include <sys/types.h>

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-private.h
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-private.h	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-private.h	2022-10-18 21:13:29 UTC (rev 64747)
@@ -640,6 +640,11 @@
 _pixman_implementation_create_arm_neon (pixman_implementation_t *fallback);
 #endif
 
+#ifdef USE_ARM_A64_NEON
+pixman_implementation_t *
+_pixman_implementation_create_arm_neon (pixman_implementation_t *fallback);
+#endif
+
 #ifdef USE_MIPS_DSPR2
 pixman_implementation_t *
 _pixman_implementation_create_mips_dspr2 (pixman_implementation_t *fallback);

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-region.c
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-region.c	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-region.c	2022-10-18 21:13:29 UTC (rev 64747)
@@ -76,7 +76,7 @@
 #define PIXREGION_SIZE(reg) ((reg)->data ? (reg)->data->size : 0)
 #define PIXREGION_RECTS(reg) \
     ((reg)->data ? (box_type_t *)((reg)->data + 1) \
-     : &(reg)->extents)
+     : (box_type_t *)&(reg)->extents)
 #define PIXREGION_BOXPTR(reg) ((box_type_t *)((reg)->data + 1))
 #define PIXREGION_BOX(reg, i) (&PIXREGION_BOXPTR (reg)[i])
 #define PIXREGION_TOP(reg) PIXREGION_BOX (reg, (reg)->data->numRects)
@@ -292,7 +292,7 @@
     } while (0)
 
 PIXMAN_EXPORT pixman_bool_t
-PREFIX (_equal) (region_type_t *reg1, region_type_t *reg2)
+PREFIX (_equal) (const region_type_t *reg1, const region_type_t *reg2)
 {
     int i;
     box_type_t *rects1;
@@ -395,7 +395,7 @@
 }
 
 PIXMAN_EXPORT void
-PREFIX (_init_with_extents) (region_type_t *region, box_type_t *extents)
+PREFIX (_init_with_extents) (region_type_t *region, const box_type_t *extents)
 {
     if (!GOOD_RECT (extents))
     {
@@ -417,13 +417,13 @@
 }
 
 PIXMAN_EXPORT int
-PREFIX (_n_rects) (region_type_t *region)
+PREFIX (_n_rects) (const region_type_t *region)
 {
     return PIXREGION_NUMRECTS (region);
 }
 
 PIXMAN_EXPORT box_type_t *
-PREFIX (_rectangles) (region_type_t *region,
+PREFIX (_rectangles) (const region_type_t *region,
                       int               *n_rects)
 {
     if (n_rects)
@@ -505,7 +505,7 @@
 }
 
 PIXMAN_EXPORT pixman_bool_t
-PREFIX (_copy) (region_type_t *dst, region_type_t *src)
+PREFIX (_copy) (region_type_t *dst, const region_type_t *src)
 {
     GOOD (dst);
     GOOD (src);
@@ -746,8 +746,8 @@
 
 static pixman_bool_t
 pixman_op (region_type_t *  new_reg,               /* Place to store result	    */
-	   region_type_t *  reg1,                  /* First region in operation     */
-	   region_type_t *  reg2,                  /* 2d region in operation        */
+	   const region_type_t *  reg1,                  /* First region in operation     */
+	   const region_type_t *  reg2,                  /* 2d region in operation        */
 	   overlap_proc_ptr overlap_func,          /* Function to call for over-
 						    * lapping bands		    */
 	   int              append_non1,           /* Append non-overlapping bands  
@@ -1155,8 +1155,8 @@
 
 PIXMAN_EXPORT pixman_bool_t
 PREFIX (_intersect) (region_type_t *     new_reg,
-                     region_type_t *        reg1,
-                     region_type_t *        reg2)
+                     const region_type_t *        reg1,
+                     const region_type_t *        reg2)
 {
     GOOD (reg1);
     GOOD (reg2);
@@ -1321,7 +1321,7 @@
 
 PIXMAN_EXPORT pixman_bool_t
 PREFIX(_intersect_rect) (region_type_t *dest,
-			 region_type_t *source,
+			 const region_type_t *source,
 			 int x, int y,
 			 unsigned int width,
 			 unsigned int height)
@@ -1342,7 +1342,7 @@
  */
 PIXMAN_EXPORT pixman_bool_t
 PREFIX (_union_rect) (region_type_t *dest,
-                      region_type_t *source,
+                      const region_type_t *source,
                       int            x,
 		      int            y,
                       unsigned int   width,
@@ -1368,9 +1368,9 @@
 }
 
 PIXMAN_EXPORT pixman_bool_t
-PREFIX (_union) (region_type_t *new_reg,
-                 region_type_t *reg1,
-                 region_type_t *reg2)
+PREFIX (_union) (region_type_t *      new_reg,
+                 const region_type_t *reg1,
+                 const region_type_t *reg2)
 {
     /* Return TRUE if some overlap
      * between reg1, reg2
@@ -1954,9 +1954,9 @@
  *-----------------------------------------------------------------------
  */
 PIXMAN_EXPORT pixman_bool_t
-PREFIX (_subtract) (region_type_t *reg_d,
-                    region_type_t *reg_m,
-                    region_type_t *reg_s)
+PREFIX (_subtract) (region_type_t *      reg_d,
+                    const region_type_t *reg_m,
+                    const region_type_t *reg_s)
 {
     GOOD (reg_m);
     GOOD (reg_s);
@@ -2019,9 +2019,9 @@
  *-----------------------------------------------------------------------
  */
 PIXMAN_EXPORT pixman_bool_t
-PREFIX (_inverse) (region_type_t *new_reg,  /* Destination region */
-		   region_type_t *reg1,     /* Region to invert */
-		   box_type_t *   inv_rect) /* Bounding box for inversion */
+PREFIX (_inverse) (region_type_t *      new_reg,  /* Destination region */
+		   const region_type_t *reg1,     /* Region to invert */
+		   const box_type_t *   inv_rect) /* Bounding box for inversion */
 {
     region_type_t inv_reg; /* Quick and dirty region made from the
 			    * bounding box */
@@ -2113,8 +2113,8 @@
  *   that doesn't overlap the box at all and part_in is false)
  */
 PIXMAN_EXPORT pixman_region_overlap_t
-PREFIX (_contains_rectangle) (region_type_t *  region,
-			      box_type_t *     prect)
+PREFIX (_contains_rectangle) (const region_type_t *  region,
+			      const box_type_t *     prect)
 {
     box_type_t *     pbox;
     box_type_t *     pbox_end;
@@ -2318,7 +2318,7 @@
 }
 
 PIXMAN_EXPORT void
-PREFIX (_reset) (region_type_t *region, box_type_t *box)
+PREFIX (_reset) (region_type_t *region, const box_type_t *box)
 {
     GOOD (region);
 
@@ -2343,7 +2343,7 @@
 
 /* box is "return" value */
 PIXMAN_EXPORT int
-PREFIX (_contains_point) (region_type_t * region,
+PREFIX (_contains_point) (const region_type_t * region,
                           int x, int y,
                           box_type_t * box)
 {
@@ -2387,7 +2387,7 @@
 }
 
 PIXMAN_EXPORT int
-PREFIX (_not_empty) (region_type_t * region)
+PREFIX (_not_empty) (const region_type_t * region)
 {
     GOOD (region);
 
@@ -2395,11 +2395,11 @@
 }
 
 PIXMAN_EXPORT box_type_t *
-PREFIX (_extents) (region_type_t * region)
+PREFIX (_extents) (const region_type_t * region)
 {
     GOOD (region);
 
-    return(&region->extents);
+    return(box_type_t *)(&region->extents);
 }
 
 /*

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-sse2.c
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-sse2.c	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman-sse2.c	2022-10-18 21:13:29 UTC (rev 64747)
@@ -3202,7 +3202,7 @@
     uint8_t *mask_line, *mask;
     int dst_stride, mask_stride;
     int32_t w;
-    uint32_t m, d;
+    uint32_t d;
 
     __m128i xmm_src, xmm_alpha, xmm_def;
     __m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
@@ -3257,6 +3257,7 @@
 
 	while (w >= 4)
 	{
+            uint32_t m;
             memcpy(&m, mask, sizeof(uint32_t));
 
 	    if (srca == 0xff && m == 0xffffffff)
@@ -3477,7 +3478,6 @@
     uint8_t     *mask_line, *mask;
     int dst_stride, mask_stride;
     int32_t w;
-    uint32_t m;
 
     __m128i xmm_src, xmm_def;
     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
@@ -3529,6 +3529,7 @@
 
 	while (w >= 4)
 	{
+            uint32_t m;
             memcpy(&m, mask, sizeof(uint32_t));
 
 	    if (srca == 0xff && m == 0xffffffff)
@@ -3595,7 +3596,6 @@
     uint8_t     *mask_line, *mask;
     int dst_stride, mask_stride;
     int32_t w;
-    uint32_t m;
     __m128i mmx_src, mmx_alpha, mmx_mask, mmx_dest;
 
     __m128i xmm_src, xmm_alpha;
@@ -3627,7 +3627,7 @@
 
 	while (w && (uintptr_t)dst & 15)
 	{
-	    m = *mask++;
+	    uint8_t m = *mask++;
 
 	    if (m)
 	    {
@@ -3647,6 +3647,8 @@
 
 	while (w >= 8)
 	{
+            uint32_t m;
+
 	    xmm_dst = load_128_aligned ((__m128i*) dst);
 	    unpack_565_128_4x128 (xmm_dst,
 				  &xmm_dst0, &xmm_dst1, &xmm_dst2, &xmm_dst3);
@@ -3700,7 +3702,7 @@
 
 	while (w)
 	{
-	    m = *mask++;
+	    uint8_t m = *mask++;
 
 	    if (m)
 	    {
@@ -4062,7 +4064,7 @@
     uint8_t     *dst_line, *dst;
     uint8_t     *mask_line, *mask;
     int dst_stride, mask_stride;
-    uint32_t d, m;
+    uint32_t d;
     uint32_t src;
     int32_t w;
 
@@ -4089,7 +4091,7 @@
 
 	while (w && ((uintptr_t)dst & 15))
 	{
-	    m = (uint32_t) *mask++;
+	    uint8_t m = *mask++;
 	    d = (uint32_t) *dst;
 
 	    *dst++ = (uint8_t) pack_1x128_32 (
@@ -4126,7 +4128,7 @@
 
 	while (w)
 	{
-	    m = (uint32_t) *mask++;
+	    uint8_t m = *mask++;
 	    d = (uint32_t) *dst;
 
 	    *dst++ = (uint8_t) pack_1x128_32 (
@@ -4303,7 +4305,7 @@
     int dst_stride, mask_stride;
     int32_t w;
     uint32_t src;
-    uint32_t m, d;
+    uint32_t d;
 
     __m128i xmm_alpha;
     __m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
@@ -4328,7 +4330,7 @@
 
 	while (w && ((uintptr_t)dst & 15))
 	{
-	    m = (uint32_t) *mask++;
+	    uint8_t m = *mask++;
 	    d = (uint32_t) *dst;
 
 	    *dst++ = (uint8_t) pack_1x128_32 (
@@ -4364,7 +4366,7 @@
 
 	while (w)
 	{
-	    m = (uint32_t) *mask++;
+	    uint8_t m = (uint32_t) *mask++;
 	    d = (uint32_t) *dst;
 
 	    *dst++ = (uint8_t) pack_1x128_32 (
@@ -4832,7 +4834,6 @@
     uint32_t    *src, *src_line, s;
     uint32_t    *dst, *dst_line, d;
     uint8_t         *mask, *mask_line;
-    uint32_t m;
     int src_stride, mask_stride, dst_stride;
     int32_t w;
     __m128i ms;
@@ -4861,8 +4862,8 @@
 
         while (w && (uintptr_t)dst & 15)
         {
+            uint8_t m = *mask++;
             s = 0xff000000 | *src++;
-            memcpy(&m, mask++, sizeof(uint32_t));
             d = *dst;
             ms = unpack_32_1x128 (s);
 
@@ -4880,6 +4881,7 @@
 
         while (w >= 4)
         {
+            uint32_t m;
             memcpy(&m, mask, sizeof(uint32_t));
             xmm_src = _mm_or_si128 (
 		load_128_unaligned ((__m128i*)src), mask_ff000000);
@@ -4916,7 +4918,7 @@
 
         while (w)
         {
-            memcpy(&m, mask++, sizeof(uint32_t));
+            uint8_t m = *mask++;
 
             if (m)
             {
@@ -4957,7 +4959,6 @@
     uint32_t    *src, *src_line, s;
     uint32_t    *dst, *dst_line, d;
     uint8_t         *mask, *mask_line;
-    uint32_t m;
     int src_stride, mask_stride, dst_stride;
     int32_t w;
 
@@ -4986,9 +4987,9 @@
         while (w && (uintptr_t)dst & 15)
         {
 	    uint32_t sa;
+            uint8_t m = *mask++;
 
             s = *src++;
-            m = (uint32_t) *mask++;
             d = *dst;
 
 	    sa = s >> 24;
@@ -5019,6 +5020,7 @@
 
         while (w >= 4)
         {
+            uint32_t m;
             memcpy(&m, mask, sizeof(uint32_t));
 
 	    if (m)
@@ -5058,9 +5060,9 @@
         while (w)
         {
 	    uint32_t sa;
+            uint8_t m = *mask++;
 
             s = *src++;
-            m = (uint32_t) *mask++;
             d = *dst;
 
 	    sa = s >> 24;
@@ -5927,14 +5929,12 @@
     intptr_t unit_x = unit_x_;
     BILINEAR_DECLARE_VARIABLES;
     uint32_t pix1, pix2;
-    uint32_t m;
 
     while (w && ((uintptr_t)dst & 15))
     {
 	uint32_t sa;
+	uint8_t m = *mask++;
 
-	m = (uint32_t) *mask++;
-
 	if (m)
 	{
 	    BILINEAR_INTERPOLATE_ONE_PIXEL (pix1);
@@ -5969,6 +5969,8 @@
 
     while (w >= 4)
     {
+        uint32_t m;
+
 	__m128i xmm_src, xmm_src_lo, xmm_src_hi, xmm_srca_lo, xmm_srca_hi;
 	__m128i xmm_dst, xmm_dst_lo, xmm_dst_hi;
 	__m128i xmm_mask, xmm_mask_lo, xmm_mask_hi;
@@ -6015,9 +6017,8 @@
     while (w)
     {
 	uint32_t sa;
+	uint8_t m = *mask++;
 
-	m = (uint32_t) *mask++;
-
 	if (m)
 	{
 	    BILINEAR_INTERPOLATE_ONE_PIXEL (pix1);

Modified: trunk/Build/source/libs/pixman/pixman-src/pixman/pixman.h
===================================================================
--- trunk/Build/source/libs/pixman/pixman-src/pixman/pixman.h	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/pixman-src/pixman/pixman.h	2022-10-18 21:13:29 UTC (rev 64747)
@@ -514,8 +514,8 @@
 							  int                count);
 
 PIXMAN_API
-void                    pixman_region_init_with_extents  (pixman_region16_t *region,
-							  pixman_box16_t    *extents);
+void                    pixman_region_init_with_extents  (pixman_region16_t    *region,
+							  const pixman_box16_t *extents);
 
 PIXMAN_API
 void                    pixman_region_init_from_image    (pixman_region16_t *region,
@@ -532,78 +532,78 @@
 							  int                y);
 
 PIXMAN_API
-pixman_bool_t           pixman_region_copy               (pixman_region16_t *dest,
-							  pixman_region16_t *source);
+pixman_bool_t           pixman_region_copy               (pixman_region16_t       *dest,
+							  const pixman_region16_t *source);
 
 PIXMAN_API
-pixman_bool_t           pixman_region_intersect          (pixman_region16_t *new_reg,
-							  pixman_region16_t *reg1,
-							  pixman_region16_t *reg2);
+pixman_bool_t           pixman_region_intersect          (pixman_region16_t       *new_reg,
+							  const pixman_region16_t *reg1,
+							  const pixman_region16_t *reg2);
 
 PIXMAN_API
-pixman_bool_t           pixman_region_union              (pixman_region16_t *new_reg,
-							  pixman_region16_t *reg1,
-							  pixman_region16_t *reg2);
+pixman_bool_t           pixman_region_union              (pixman_region16_t       *new_reg,
+							  const pixman_region16_t *reg1,
+							  const pixman_region16_t *reg2);
 
 PIXMAN_API
-pixman_bool_t           pixman_region_union_rect         (pixman_region16_t *dest,
-							  pixman_region16_t *source,
-							  int                x,
-							  int                y,
-							  unsigned int       width,
-							  unsigned int       height);
+pixman_bool_t           pixman_region_union_rect         (pixman_region16_t       *dest,
+							  const pixman_region16_t *source,
+							  int                     x,
+							  int                     y,
+							  unsigned int            width,
+							  unsigned int            height);
 
 PIXMAN_API
-pixman_bool_t		pixman_region_intersect_rect     (pixman_region16_t *dest,
-							  pixman_region16_t *source,
-							  int                x,
-							  int                y,
-							  unsigned int       width,
-							  unsigned int       height);
+pixman_bool_t		pixman_region_intersect_rect     (pixman_region16_t       *dest,
+							  const pixman_region16_t *source,
+							  int                      x,
+							  int                      y,
+							  unsigned int             width,
+							  unsigned int             height);
 
 PIXMAN_API
-pixman_bool_t           pixman_region_subtract           (pixman_region16_t *reg_d,
-							  pixman_region16_t *reg_m,
-							  pixman_region16_t *reg_s);
+pixman_bool_t           pixman_region_subtract           (pixman_region16_t       *reg_d,
+							  const pixman_region16_t *reg_m,
+							  const pixman_region16_t *reg_s);
 
 PIXMAN_API
-pixman_bool_t           pixman_region_inverse            (pixman_region16_t *new_reg,
-							  pixman_region16_t *reg1,
-							  pixman_box16_t    *inv_rect);
+pixman_bool_t           pixman_region_inverse            (pixman_region16_t       *new_reg,
+							  const pixman_region16_t *reg1,
+							  const pixman_box16_t    *inv_rect);
 
 PIXMAN_API
-pixman_bool_t           pixman_region_contains_point     (pixman_region16_t *region,
-							  int                x,
-							  int                y,
-							  pixman_box16_t    *box);
+pixman_bool_t           pixman_region_contains_point     (const pixman_region16_t *region,
+							  int                      x,
+							  int                      y,
+							  pixman_box16_t          *box);
 
 PIXMAN_API
-pixman_region_overlap_t pixman_region_contains_rectangle (pixman_region16_t *region,
-							  pixman_box16_t    *prect);
+pixman_region_overlap_t pixman_region_contains_rectangle (const pixman_region16_t *region,
+							  const pixman_box16_t    *prect);
 
 PIXMAN_API
-pixman_bool_t           pixman_region_not_empty          (pixman_region16_t *region);
+pixman_bool_t           pixman_region_not_empty          (const pixman_region16_t *region);
 
 PIXMAN_API
-pixman_box16_t *        pixman_region_extents            (pixman_region16_t *region);
+pixman_box16_t *        pixman_region_extents            (const pixman_region16_t *region);
 
 PIXMAN_API
-int                     pixman_region_n_rects            (pixman_region16_t *region);
+int                     pixman_region_n_rects            (const pixman_region16_t *region);
 
 PIXMAN_API
-pixman_box16_t *        pixman_region_rectangles         (pixman_region16_t *region,
-							  int               *n_rects);
+pixman_box16_t *        pixman_region_rectangles         (const pixman_region16_t *region,
+							  int                     *n_rects);
 
 PIXMAN_API
-pixman_bool_t           pixman_region_equal              (pixman_region16_t *region1,
-							  pixman_region16_t *region2);
+pixman_bool_t           pixman_region_equal              (const pixman_region16_t *region1,
+							  const pixman_region16_t *region2);
 
 PIXMAN_API
 pixman_bool_t           pixman_region_selfcheck          (pixman_region16_t *region);
 
 PIXMAN_API
-void                    pixman_region_reset              (pixman_region16_t *region,
-							  pixman_box16_t    *box);
+void                    pixman_region_reset              (pixman_region16_t       *region,
+							  const pixman_box16_t    *box);
 
 PIXMAN_API
 void			pixman_region_clear		 (pixman_region16_t *region);
@@ -655,8 +655,8 @@
 							    int                count);
 
 PIXMAN_API
-void                    pixman_region32_init_with_extents  (pixman_region32_t *region,
-							    pixman_box32_t    *extents);
+void                    pixman_region32_init_with_extents  (pixman_region32_t    *region,
+							    const pixman_box32_t *extents);
 
 PIXMAN_API
 void                    pixman_region32_init_from_image    (pixman_region32_t *region,
@@ -673,78 +673,78 @@
 							    int                y);
 
 PIXMAN_API
-pixman_bool_t           pixman_region32_copy               (pixman_region32_t *dest,
-							    pixman_region32_t *source);
+pixman_bool_t           pixman_region32_copy               (pixman_region32_t       *dest,
+							    const pixman_region32_t *source);
 
 PIXMAN_API
-pixman_bool_t           pixman_region32_intersect          (pixman_region32_t *new_reg,
-							    pixman_region32_t *reg1,
-							    pixman_region32_t *reg2);
+pixman_bool_t           pixman_region32_intersect          (pixman_region32_t       *new_reg,
+							    const pixman_region32_t *reg1,
+							    const pixman_region32_t *reg2);
 
 PIXMAN_API
-pixman_bool_t           pixman_region32_union              (pixman_region32_t *new_reg,
-							    pixman_region32_t *reg1,
-							    pixman_region32_t *reg2);
+pixman_bool_t           pixman_region32_union              (pixman_region32_t       *new_reg,
+							    const pixman_region32_t *reg1,
+							    const pixman_region32_t *reg2);
 
 PIXMAN_API
-pixman_bool_t		pixman_region32_intersect_rect     (pixman_region32_t *dest,
-							    pixman_region32_t *source,
-							    int                x,
-							    int                y,
-							    unsigned int       width,
-							    unsigned int       height);
+pixman_bool_t		pixman_region32_intersect_rect     (pixman_region32_t       *dest,
+							    const pixman_region32_t *source,
+							    int                      x,
+							    int                      y,
+							    unsigned int             width,
+							    unsigned int             height);
 
 PIXMAN_API
-pixman_bool_t           pixman_region32_union_rect         (pixman_region32_t *dest,
-							    pixman_region32_t *source,
-							    int                x,
-							    int                y,
-							    unsigned int       width,
-							    unsigned int       height);
+pixman_bool_t           pixman_region32_union_rect         (pixman_region32_t       *dest,
+							    const pixman_region32_t *source,
+							    int                      x,
+							    int                      y,
+							    unsigned int             width,
+							    unsigned int             height);
 
 PIXMAN_API
-pixman_bool_t           pixman_region32_subtract           (pixman_region32_t *reg_d,
-							    pixman_region32_t *reg_m,
-							    pixman_region32_t *reg_s);
+pixman_bool_t           pixman_region32_subtract           (pixman_region32_t       *reg_d,
+							    const pixman_region32_t *reg_m,
+							    const pixman_region32_t *reg_s);
 
 PIXMAN_API
-pixman_bool_t           pixman_region32_inverse            (pixman_region32_t *new_reg,
-							    pixman_region32_t *reg1,
-							    pixman_box32_t    *inv_rect);
+pixman_bool_t           pixman_region32_inverse            (pixman_region32_t       *new_reg,
+							    const pixman_region32_t *reg1,
+							    const pixman_box32_t    *inv_rect);
 
 PIXMAN_API
-pixman_bool_t           pixman_region32_contains_point     (pixman_region32_t *region,
-							    int                x,
-							    int                y,
-							    pixman_box32_t    *box);
+pixman_bool_t           pixman_region32_contains_point     (const pixman_region32_t *region,
+							    int                      x,
+							    int                      y,
+							    pixman_box32_t          *box);
 
 PIXMAN_API
-pixman_region_overlap_t pixman_region32_contains_rectangle (pixman_region32_t *region,
-							    pixman_box32_t    *prect);
+pixman_region_overlap_t pixman_region32_contains_rectangle (const pixman_region32_t *region,
+							    const pixman_box32_t    *prect);
 
 PIXMAN_API
-pixman_bool_t           pixman_region32_not_empty          (pixman_region32_t *region);
+pixman_bool_t           pixman_region32_not_empty          (const pixman_region32_t *region);
 
 PIXMAN_API
-pixman_box32_t *        pixman_region32_extents            (pixman_region32_t *region);
+pixman_box32_t *        pixman_region32_extents            (const pixman_region32_t *region);
 
 PIXMAN_API
-int                     pixman_region32_n_rects            (pixman_region32_t *region);
+int                     pixman_region32_n_rects            (const pixman_region32_t *region);
 
 PIXMAN_API
-pixman_box32_t *        pixman_region32_rectangles         (pixman_region32_t *region,
-							    int               *n_rects);
+pixman_box32_t *        pixman_region32_rectangles         (const pixman_region32_t *region,
+							    int                     *n_rects);
 
 PIXMAN_API
-pixman_bool_t           pixman_region32_equal              (pixman_region32_t *region1,
-							    pixman_region32_t *region2);
+pixman_bool_t           pixman_region32_equal              (const pixman_region32_t *region1,
+							    const pixman_region32_t *region2);
 
 PIXMAN_API
 pixman_bool_t           pixman_region32_selfcheck          (pixman_region32_t *region);
 
 PIXMAN_API
-void                    pixman_region32_reset              (pixman_region32_t *region,
-							    pixman_box32_t    *box);
+void                    pixman_region32_reset              (pixman_region32_t    *region,
+							    const pixman_box32_t *box);
 
 PIXMAN_API
 void			pixman_region32_clear		   (pixman_region32_t *region);

Modified: trunk/Build/source/libs/pixman/version.ac
===================================================================
--- trunk/Build/source/libs/pixman/version.ac	2022-10-18 20:56:06 UTC (rev 64746)
+++ trunk/Build/source/libs/pixman/version.ac	2022-10-18 21:13:29 UTC (rev 64747)
@@ -8,4 +8,4 @@
 dnl --------------------------------------------------------
 dnl
 dnl  m4-include this file to define the current pixman version
-m4_define([pixman_version], [0.40.0])
+m4_define([pixman_version], [0.42.0])



More information about the tex-live-commits mailing list.