summaryrefslogtreecommitdiff
path: root/arm/neon_scale3x.Sinc
diff options
context:
space:
mode:
Diffstat (limited to 'arm/neon_scale3x.Sinc')
-rw-r--r--arm/neon_scale3x.Sinc976
1 files changed, 976 insertions, 0 deletions
diff --git a/arm/neon_scale3x.Sinc b/arm/neon_scale3x.Sinc
new file mode 100644
index 0000000..fc72d31
--- /dev/null
+++ b/arm/neon_scale3x.Sinc
@@ -0,0 +1,976 @@
+@@
+@@ Copyright (C) 2012 Roman Pauer
+@@
+@@ Permission is hereby granted, free of charge, to any person obtaining a copy of
+@@ this software and associated documentation files (the "Software"), to deal in
+@@ the Software without restriction, including without limitation the rights to
+@@ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+@@ of the Software, and to permit persons to whom the Software is furnished to do
+@@ so, subject to the following conditions:
+@@
+@@ The above copyright notice and this permission notice shall be included in all
+@@ copies or substantial portions of the Software.
+@@
+@@ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+@@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+@@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+@@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+@@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+@@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+@@ SOFTWARE.
+@@
+
+
+@ A B C --\ E0 E1 E2
+@ D E F --/ E3 E4 E5
+@ G H I E6 E7 E8
+
+@ q0 = S1sl < A >
+@ q1 = S2sl < D >
+@ q2 = S3sl < G >
+@ q3 = S1sr < C >
+@ q4 = S2sr < F >
+@ q5 = S3sr < I >
+@ q6 =
+@ q7 =
+@ q8 = S1
+@ q9 = S2
+@ q10 = S3
+@ q11 =
+@ q12 = S2prev < E >
+@ q13 =
+@ q14 = S1prev < B >
+@ q15 = S3prev < H >
+
+
+.macro ___neon_scale3x_8_8_slice counter, dst1, dst2, dst3, reg1, dB0, dB1, dH0, dH1, first15, last16
+
+@ d12 = E0[0]
+@ d13 = E1[0]
+@ d14 = E2[0]
+@ d15 = tmp0[0]
+@ d22 = E3[0]
+@ d23 = tmp1[0]
+@ d24 = E4[0]
+@ d25 = E4[1]
+@ d26 = E5[0]
+@ d27 = C0[0]
+
+@ q0 = tmp2
+@ q3 = tmp3
+@ q2 = tmp4
+@ q5 = tmp5
+
+ vceq.i8 d15, \dB0, \dH0 @ tmp0[0] = < B == H >
+
+ vceq.i8 d23, d2, d8 @ tmp1[0] = < D == F >
+
+ .if \first15
+ cmp \reg1, #8
+ .endif
+
+ vceq.i8 q0, q12, q0 @ tmp2 = < E == A >
+
+ vceq.i8 q3, q12, q3 @ tmp3 = < E == C >
+
+ .if \first15
+ add \reg1, \reg1, \reg1, lsl #1 @ reg1 = 3 * (counter & 15)
+ .endif
+
+ vorr d27, d15, d23 @ C0[0] = < B == H || D == F >
+
+ vceq.i8 d22, d2, \dB0 @ E3[0] = < D == B >
+
+ vceq.i8 d26, \dB0, d8 @ E5[0] = < B == F >
+
+ vceq.i8 q2, q12, q2 @ tmp4 = < E == G >
+
+ vceq.i8 q5, q12, q5 @ tmp5 = < E == I >
+
+ .if \first15
+ sub \reg1, \reg1, #(3*8) @ reg1 -= 3*8
+ .endif
+
+ vorn d15, d6, d22 @ tmp0[0] = < (E == C) || !(D == B) >
+
+ vorn d23, d0, d26 @ tmp1[0] = < (E == A) || !(B == F) >
+
+ vorn d12, d27, d22 @ E0[0] = < C0 || !(D == B) >
+
+ vand d13, d15, d23 @ E1[0] = < ((E == C) || !(D == B)) && ((E == A) || !(B == F)) >
+
+ vorn d14, d27, d26 @ E2[0] = < C0 || !(B == F) >
+
+ vorr d13, d27, d13 @ E1[0] = < C0 || (((E == C) || !(D == B)) && ((E == A) || !(B == F))) >
+
+ vbsl d12, d24, d2 @ E0[0] = < (C0 || !(D == B)) ? E : D >
+
+ vbsl d14, d24, d8 @ E2[0] = < (C0 || !(B == F)) ? E : F >
+
+ vbsl d13, d24, \dB0 @ E1[0] = < (C0 || (((E == C) || !(D == B)) && ((E == A) || !(B == F)))) ? E : B >
+
+ vceq.i8 d15, d2, \dH0 @ tmp0[0] = < D == H >
+
+ vceq.i8 d23, \dH0, d8 @ tmp1[0] = < H == F >
+
+ vorn d22, d4, d22 @ E3[0] = < (E == G) || !(D == B) >
+ vst3.8 {d12-d14}, [\dst1]! @ [dst] = E0,E1,E2; dst1 += 3*8
+
+ .if \first15
+ addls \dst1, \dst1, \reg1 @ dst1 += reg1
+ .endif
+
+ vorn d26, d10, d26 @ E5[0] = < (E == I) || !(B == F) >
+
+@ d12 = tmp6[0]
+@ d13 = tmp7[0]
+
+ vorn d12, d0, d15 @ tmp6[0] = < (E == A) || !(D == H) >
+
+ vorn d13, d6, d23 @ tmp7[0] = < (E == C) || !(H == F) >
+
+ vand d22, d22, d12 @ E3[0] = < ((E == G) || !(D == B)) && ((E == A) || !(D == H)) >
+
+ vand d26, d26, d13 @ E5[0] = < ((E == I) || !(B == F)) && ((E == C) || !(H == F)) >
+
+@ d12 = E6[0]
+@ d13 = E7[0]
+@ d14 = E8[0]
+
+ vorr d22, d27, d22 @ E3[0] = < C0 || (((E == G) || !(D == B)) && ((E == A) || !(D == H))) >
+
+ vorr d26, d27, d26 @ E5[0] = < C0 || (((E == I) || !(B == F)) && ((E == C) || !(H == F))) >
+
+ vbsl d22, d24, d2 @ E3[0] = < (C0 || (((E == G) || !(D == B)) && ((E == A) || !(D == H)))) ? E : D >
+
+ vbsl d26, d24, d8 @ E5[0] = < (C0 || (((E == I) || !(B == F)) && ((E == C) || !(H == F)))) ? E : F >
+
+ vorn d13, d10, d15 @ E7[0] = < (E == I) || !(D == H) >
+
+ vorn d12, d27, d15 @ E6[0] = < C0 || !(D == H) >
+ vst3.8 {d22,d24,d26}, [\dst2]! @ [dst + dststride] = E3,E4,E5; dst2 += 3*8
+
+ .if \first15
+ addls \dst2, \dst2, \reg1 @ dst2 += reg1
+ .endif
+
+ vorn d15, d4, d23 @ tmp0[0] = < (E == G) || !(H == F) >
+
+ vorn d14, d27, d23 @ E8[0] = < C0 || !(H == F) >
+
+ vand d13, d13, d15 @ E7[0] = < ((E == I) || !(D == H)) && ((E == G) || !(H == F)) >
+
+ vbsl d12, d24, d2 @ E6[0] = < (C0 || !(D == H)) ? E : D >
+
+ vorr d13, d27, d13 @ E7[0] = < C0 || (((E == I) || !(D == H)) && ((E == G) || !(H == F))) >
+
+ vbsl d14, d24, d8 @ E8[0] = < (C0 || !(H == F)) ? E : F >
+
+ vbsl d13, d24, \dH0 @ E7[0] = < (C0 || (((E == I) || !(D == H)) && ((E == G) || !(H == F)))) ? E : H >
+
+@ d15 = tmp0[1]
+@ d22 = tmp1[1]
+@ d23 = E3[1]
+@ d24 = E4[0]
+@ d25 = E4[1]
+@ d26 = C0[1]
+@ d27 = E5[1]
+
+ vceq.i8 d15, \dB1, \dH1 @ tmp0[1] = < B == H >
+
+ vceq.i8 d22, d3, d9 @ tmp1[1] = < D == F >
+
+ vceq.i8 d23, d3, \dB1 @ E3[1] = < D == B >
+ vst3.8 {d12-d14}, [\dst3]! @ [dst + 2 * dststride] = E6,E7,E8; dst3 += 3*8
+
+ .if \first15
+ addls \dst3, \dst3, \reg1 @ dst3 += reg1
+ bls 0f
+ .endif
+
+@ d12 = E0[1]
+@ d13 = E1[1]
+@ d14 = E2[1]
+
+ vorr d26, d15, d22 @ C0[1] = < B == H || D == F >
+
+ vceq.i8 d27, \dB1, d9 @ E5[1] = < B == F >
+
+ vorn d15, d7, d23 @ tmp0[1] = < (E == C) || !(D == B) >
+
+ vorn d22, d1, d27 @ tmp1[1] = < (E == A) || !(B == F) >
+
+ vorn d12, d26, d23 @ E0[1] = < C0 || !(D == B) >
+
+ vand d13, d15, d22 @ E1[1] = < ((E == C) || !(D == B)) && ((E == A) || !(B == F)) >
+
+ vorn d14, d26, d27 @ E2[1] = < C0 || !(B == F) >
+
+ vorr d13, d26, d13 @ E1[1] = < C0 || (((E == C) || !(D == B)) && ((E == A) || !(B == F))) >
+
+ vbsl d12, d25, d3 @ E0[1] = < (C0 || !(D == B)) ? E : D >
+
+ vbsl d14, d25, d9 @ E2[1] = < (C0 || !(B == F)) ? E : F >
+
+ vbsl d13, d25, \dB1 @ E1[1] = < (C0 || (((E == C) || !(D == B)) && ((E == A) || !(B == F)))) ? E : B >
+
+ vceq.i8 d15, d3, \dH1 @ tmp0[1] = < D == H >
+
+ vceq.i8 d22, \dH1, d9 @ tmp1[1] = < H == F >
+
+ vorn d23, d5, d23 @ E3[1] = < (E == G) || !(D == B) >
+ .ifeq \first15
+ vst3.8 {d12-d14}, [\dst1]! @ [dst] = E0,E1,E2; dst1 += 3*8
+ .else
+ vst3.8 {d12-d14}, [\dst1],\reg1 @ [dst] = E0,E1,E2; dst1 += reg1
+ .endif
+
+ vorn d27, d11, d27 @ E5[1] = < (E == I) || !(B == F) >
+
+@ d12 = tmp6[1]
+@ d13 = tmp7[1]
+
+ vorn d12, d1, d15 @ tmp6[1] = < (E == A) || !(D == H) >
+
+ vorn d13, d7, d22 @ tmp7[1] = < (E == C) || !(H == F) >
+
+ vand d23, d23, d12 @ E3[1] = < ((E == G) || !(D == B)) && ((E == A) || !(D == H)) >
+
+ vand d27, d27, d13 @ E5[1] = < ((E == I) || !(B == F)) && ((E == C) || !(H == F)) >
+
+@ d12 = E6[1]
+@ d13 = E7[1]
+@ d14 = E8[1]
+
+ vorr d23, d26, d23 @ E3[1] = < C0 || (((E == G) || !(D == B)) && ((E == A) || !(D == H))) >
+
+ vorr d27, d26, d27 @ E5[1] = < C0 || (((E == I) || !(B == F)) && ((E == C) || !(H == F))) >
+
+ vbsl d23, d25, d3 @ E3[1] = < (C0 || (((E == G) || !(D == B)) && ((E == A) || !(D == H)))) ? E : D >
+
+ vbsl d27, d25, d9 @ E5[1] = < (C0 || (((E == I) || !(B == F)) && ((E == C) || !(H == F)))) ? E : F >
+
+ vorn d13, d11, d15 @ E7[1] = < (E == I) || !(D == H) >
+
+ vorn d12, d26, d15 @ E6[1] = < C0 || !(D == H) >
+ .ifeq \first15
+ vst3.8 {d23,d25,d27}, [\dst2]! @ [dst + dststride] = E3,E4,E5; dst2 += 3*8
+ .else
+ vst3.8 {d23,d25,d27}, [\dst2],\reg1 @ [dst + dststride] = E3,E4,E5; dst2 += reg1
+ .endif
+
+ vorn d15, d5, d22 @ tmp0[1] = < (E == G) || !(H == F) >
+
+ vorn d14, d26, d22 @ E8[1] = < C0 || !(H == F) >
+
+ vand d13, d13, d15 @ E7[1] = < ((E == I) || !(D == H)) && ((E == G) || !(H == F)) >
+
+ vbsl d12, d25, d3 @ E6[1] = < (C0 || !(D == H)) ? E : D >
+
+ vorr d13, d26, d13 @ E7[1] = < C0 || (((E == I) || !(D == H)) && ((E == G) || !(H == F))) >
+
+ vbsl d14, d25, d9 @ E8[1] = < (C0 || !(H == F)) ? E : F >
+
+ vbsl d13, d25, \dH1 @ E7[1] = < (C0 || (((E == I) || !(D == H)) && ((E == G) || !(H == F)))) ? E : H >
+
+ .ifeq \first15
+
+ .ifeq \last16
+ sub \counter, \counter, #16 @ counter -= 16
+ cmp \counter, #16
+ .endif
+
+ vst3.8 {d12-d14}, [\dst3]! @ [dst + 2 * dststride] = E6,E7,E8; dst3 += 3*8
+ .else
+ vst3.8 {d12-d14}, [\dst3],\reg1 @ [dst + 2 * dststride] = E6,E7,E8; dst3 += reg1
+
+ 0:
+ .endif
+
+.endm
+
+.macro __neon_scale3x_8_8_line src1, src2, src3, counter, dst1, dst2, dst3, reg1, qB, qH, dB0, dB1, dH0, dH1, alsrc1, alsrc2, alsrc3, aldst1, aldst2, aldst3
+
+ .ifeqs "\qB", "q14"
+ vld1.8 {d29[7]}, [\src1] @ S1prev[15] = src[-srcstride]
+ .endif
+ vld1.8 {d25[7]}, [\src2] @ S2prev[15] = src[0]
+ .ifeqs "\qH", "q15"
+ vld1.8 {d31[7]}, [\src3] @ S3prev[15] = src[srcstride]
+ .endif
+ andS \reg1, \counter, #15 @ reg1 = counter & 15
+
+ .ifnes "\qB", "q14"
+ add \src1, \src1, \counter @ src1 += counter
+ .endif
+ .ifnes "\qH", "q15"
+ add \src3, \src3, \counter @ src3 += counter
+ .endif
+ beq 1f
+
+ @ first 1-15 pixels - align counter to 16 bytes
+
+ sub \reg1, \reg1, #1 @ reg1 = (counter & 15) - 1
+
+ .ifeqs "\qB", "q14"
+ vld1.8 {q8}, [\src1] @ S1 = [src - srcstride]
+ add \src1, \src1, \reg1 @ src1 += (counter & 15) - 1
+ .endif
+
+ vld1.8 {q9}, [\src2] @ S2 = [src ]
+ add \src2, \src2, \reg1 @ src2 += (counter & 15) - 1
+
+ .ifeqs "\qH", "q15"
+ vld1.8 {q10}, [\src3] @ S3 = [src + srcstride]
+ add \src3, \src3, \reg1 @ src3 += (counter & 15) - 1
+ .endif
+ .ifeqs "\qB", "q14"
+ vext.8 q0, \qB, q8, #15 @ S1sl = S1prev[15] | (S1 << 8) < S >
+
+ vmov \qB, q8 @ S1prev = S1 < T >
+ .endif
+ vext.8 q1, q12, q9, #15 @ S2sl = S2prev[15] | (S2 << 8) < V >
+
+ vmov q12, q9 @ S2prev = S2 < C >
+ .ifeqs "\qH", "q15"
+ vext.8 q2, \qH, q10, #15 @ S3sl = S3prev[15] | (S3 << 8) < X >
+
+ vmov \qH, q10 @ S3prev = S3 < Y >
+ .endif
+ .ifeqs "\qB", "q14"
+ vext.8 q3, \qB, q8, #1 @ S1sr = (S1prev >> 8) | ... < U >
+ .endif
+
+ vext.8 q4, q12, q9, #1 @ S2sr = (S2prev >> 8) | ... < W >
+
+ .ifeqs "\qH", "q15"
+ vext.8 q5, \qH, q10, #1 @ S3sr = (S3prev >> 8) | ... < Z >
+ .else
+ vmov q2, q1 @ S3sl = S2sl < X >
+
+ vmov q5, q4 @ S3sr = S2sr < Z >
+ .endif
+
+ .ifnes "\qB", "q14"
+ vmov q0, q1 @ S1sl = S2sl < S >
+
+ vmov q3, q4 @ S1sr = S2sr < U >
+ .else
+ vld1.8 {d29[7]}, [\src1]! @ S1prev[15] = src[counter & 15 - 1 - srcstride]; src1++
+ .endif
+
+ add \reg1, \reg1, #1 @ reg1 = counter & 15
+
+ vld1.8 {d25[7]}, [\src2]! @ S2prev[15] = src[counter & 15 - 1]; src2++
+
+ bic \counter, \counter, #15
+
+ .ifeqs "\qH", "q15"
+ vld1.8 {d31[7]}, [\src3]! @ S3prev[15] = src[counter & 15 - 1 + srcstride]; src3++
+ .endif
+
+ ___neon_scale3x_8_8_slice \counter, \dst1, \dst2, \dst3, \reg1, \dB0, \dB1, \dH0, \dH1, 1, 0
+
+
+ @ counter is aligned to 16 bytes
+
+ 1:
+ .ifeqs "\qB", "q14"
+ vld1.8 {q8}, [\alsrc1]! @ S1 = [src - srcstride]; src1 += 16
+ .endif
+ vld1.8 {q9}, [\alsrc2]! @ S2 = [src ]; src2 += 16
+ .ifeqs "\qH", "q15"
+ vld1.8 {q10}, [\alsrc3]! @ S3 = [src + srcstride]; src3 += 16
+ .endif
+
+ @ inner loop (16 pixels per iteration)
+ 2:
+
+ .ifeqs "\qB", "q14"
+ vext.8 q0, \qB, q8, #15 @ S1sl = S1prev[15] | (S1 << 8) < A >
+ vmov \qB, q8 @ S1prev = S1 < B >
+ .endif
+
+ vext.8 q1, q12, q9, #15 @ S2sl = S2prev[15] | (S2 << 8) < D >
+ vmov q12, q9 @ S2prev = S2 < E >
+
+ .ifeqs "\qH", "q15"
+ vext.8 q2, \qH, q10, #15 @ S3sl = S3prev[15] | (S3 << 8) < G >
+ vmov \qH, q10 @ S3prev = S3 < H >
+ .endif
+
+ .ifeqs "\qB", "q14"
+ vld1.8 {q8}, [\alsrc1]! @ S1 = [src - srcstride]; src1 += 16
+ vext.8 q3, \qB, q8, #1 @ S1sr = (S1prev >> 8) | S1[0] < C >
+ .endif
+
+ vld1.8 {q9}, [\alsrc2]! @ S2 = [src ]; src2 += 16
+ vext.8 q4, q12, q9, #1 @ S2sr = (S2prev >> 8) | S2[0] < F >
+
+ .ifeqs "\qH", "q15"
+ vld1.8 {q10}, [\alsrc3]! @ S3 = [src + srcstride]; src3 += 16
+ vext.8 q5, \qH, q10, #1 @ S3sr = (S3prev >> 8) | S3[0] < I >
+ .else
+ vmov q2, q1 @ S3sl = S2sl < G >
+
+ vmov q5, q4 @ S3sr = S2sr < I >
+ .endif
+
+ .ifnes "\qB", "q14"
+ vmov q0, q1 @ S1sl = S2sl < A >
+
+ vmov q3, q4 @ S1sr = S2sr < C >
+ .endif
+
+ ___neon_scale3x_8_8_slice \counter, \aldst1, \aldst2, \aldst3, \reg1, \dB0, \dB1, \dH0, \dH1, 0, 0
+
+ bhi 2b
+
+ @ last 16 pixels
+
+ .ifeqs "\qB", "q14"
+ vext.8 q0, \qB, q8, #15 @ S1sl = S1prev[15] | (S1 << 8) < A >
+ vmov \qB, q8 @ S1prev = S1 < B >
+ .endif
+
+ vext.8 q1, q12, q9, #15 @ S2sl = S2prev[15] | (S2 << 8) < D >
+ vmov q12, q9 @ S2prev = S2 < E >
+
+ .ifeqs "\qH", "q15"
+ vext.8 q2, \qH, q10, #15 @ S3sl = S3prev[15] | (S3 << 8) < G >
+ vmov \qH, q10 @ S3prev = S3 < H >
+ .endif
+
+ .ifeqs "\qB", "q14"
+ vshr.u64 d16, d17, #(64-8) @ S1[0] = S1[15] | ...
+ .endif
+
+ vshr.u64 d18, d19, #(64-8) @ S2[0] = S2[15] | ...
+
+ .ifeqs "\qH", "q15"
+ vshr.u64 d20, d21, #(64-8) @ S3[0] = S3[15] | ...
+ .endif
+ .ifeqs "\qB", "q14"
+ vext.8 q3, \qB, q8, #1 @ S1sr = (S1prev >> 8) | S1[0] < C >
+ .endif
+
+ vext.8 q4, q12, q9, #1 @ S2sr = (S2prev >> 8) | S2[0] < F >
+
+ .ifeqs "\qH", "q15"
+ vext.8 q5, \qH, q10, #1 @ S3sr = (S3prev >> 8) | S3[0] < I >
+ .else
+ vmov q2, q1 @ S3sl = S2sl < G >
+
+ vmov q5, q4 @ S3sr = S2sr < I >
+ .endif
+
+ .ifnes "\qB", "q14"
+ vmov q0, q1 @ S1sl = S2sl < A >
+
+ vmov q3, q4 @ S1sr = S2sr < C >
+ .endif
+
+ ___neon_scale3x_8_8_slice \counter, \aldst1, \aldst2, \aldst3, \reg1, \dB0, \dB1, \dH0, \dH1, 0, 1
+
+.endm
+
+.macro _neon_scale3x_8_8_line_first src1, src2, src3, counter, dst1, dst2, dst3, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2, aldst3
+ __neon_scale3x_8_8_line \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, q12, q15, d24, d25, d30, d31, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2, \aldst3
+.endm
+
+.macro _neon_scale3x_8_8_line_middle src1, src2, src3, counter, dst1, dst2, dst3, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2, aldst3
+ __neon_scale3x_8_8_line \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, q14, q15, d28, d29, d30, d31, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2, \aldst3
+.endm
+
+.macro _neon_scale3x_8_8_line_last src1, src2, src3, counter, dst1, dst2, dst3, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2, aldst3
+ __neon_scale3x_8_8_line \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, q14, q12, d28, d29, d24, d25, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2, \aldst3
+.endm
+
+.macro neon_scale3x_8_8_line part, src1, src2, src3, counter, dst1, dst2, dst3, reg1, srcalign16, dstalign8
+ .ifeq \srcalign16
+
+ .ifeq \dstalign8
+ _neon_scale3x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, \src1, \src2, \src3, \dst1, \dst2, \dst3
+ .else
+ _neon_scale3x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, \src1, \src2, \src3, \dst1:64, \dst2:64, \dst3:64
+ .endif
+
+ .else
+
+ .ifeq \dstalign8
+ _neon_scale3x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, \src1:128, \src2:128, \src3:128, \dst1, \dst2, \dst3
+ .else
+ _neon_scale3x_8_8_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, \src1:128, \src2:128, \src3:128, \dst1:64, \dst2:64, \dst3:64
+ .endif
+
+ .endif
+.endm
+
+
+.macro ___neon_scale3x_16_16_slice counter, dst1, dst2, dst3, reg1, dB0, dB1, dH0, dH1, first7, last8
+
+@ d12 = E0[0]
+@ d13 = E1[0]
+@ d14 = E2[0]
+@ d15 = tmp0[0]
+@ d22 = E3[0]
+@ d23 = tmp1[0]
+@ d24 = E4[0]
+@ d25 = E4[1]
+@ d26 = E5[0]
+@ d27 = C0[0]
+
+@ q0 = tmp2
+@ q3 = tmp3
+@ q2 = tmp4
+@ q5 = tmp5
+
+ vceq.i16 d15, \dB0, \dH0 @ tmp0[0] = < B == H >
+
+ vceq.i16 d23, d2, d8 @ tmp1[0] = < D == F >
+
+ .if \first7
+ cmp \reg1, #4
+ .endif
+
+ vceq.i16 q0, q12, q0 @ tmp2 = < E == A >
+
+ vceq.i16 q3, q12, q3 @ tmp3 = < E == C >
+
+ .if \first7
+ lsl \reg1, #1 @ reg1 = 2 * (counter & 7)
+ .endif
+
+ vorr d27, d15, d23 @ C0[0] = < B == H || D == F >
+
+ vceq.i16 d22, d2, \dB0 @ E3[0] = < D == B >
+
+ vceq.i16 d26, \dB0, d8 @ E5[0] = < B == F >
+
+ vceq.i16 q2, q12, q2 @ tmp4 = < E == G >
+
+ vceq.i16 q5, q12, q5 @ tmp5 = < E == I >
+
+ .if \first7
+ add \reg1, \reg1, \reg1, lsl #1 @ reg1 = 2 * 3 * (counter & 7)
+ .endif
+
+ vorn d15, d6, d22 @ tmp0[0] = < (E == C) || !(D == B) >
+
+ vorn d23, d0, d26 @ tmp1[0] = < (E == A) || !(B == F) >
+
+ vorn d12, d27, d22 @ E0[0] = < C0 || !(D == B) >
+
+ .if \first7
+ sub \reg1, \reg1, #(3*2*4) @ reg1 -= 3*2*4
+ .endif
+
+ vand d13, d15, d23 @ E1[0] = < ((E == C) || !(D == B)) && ((E == A) || !(B == F)) >
+
+ vorn d14, d27, d26 @ E2[0] = < C0 || !(B == F) >
+
+ vorr d13, d27, d13 @ E1[0] = < C0 || (((E == C) || !(D == B)) && ((E == A) || !(B == F))) >
+
+ vbsl d12, d24, d2 @ E0[0] = < (C0 || !(D == B)) ? E : D >
+
+ vbsl d14, d24, d8 @ E2[0] = < (C0 || !(B == F)) ? E : F >
+
+ vbsl d13, d24, \dB0 @ E1[0] = < (C0 || (((E == C) || !(D == B)) && ((E == A) || !(B == F)))) ? E : B >
+
+ vceq.i16 d15, d2, \dH0 @ tmp0[0] = < D == H >
+
+ vceq.i16 d23, \dH0, d8 @ tmp1[0] = < H == F >
+
+ vorn d22, d4, d22 @ E3[0] = < (E == G) || !(D == B) >
+ vst3.16 {d12-d14}, [\dst1]! @ [dst] = E0,E1,E2; dst1 += 3*2*4
+
+ .if \first7
+ addls \dst1, \dst1, \reg1 @ dst1 += reg1
+ .endif
+
+ vorn d26, d10, d26 @ E5[0] = < (E == I) || !(B == F) >
+
+@ d12 = tmp6[0]
+@ d13 = tmp7[0]
+
+ vorn d12, d0, d15 @ tmp6[0] = < (E == A) || !(D == H) >
+
+ vorn d13, d6, d23 @ tmp7[0] = < (E == C) || !(H == F) >
+
+ vand d22, d22, d12 @ E3[0] = < ((E == G) || !(D == B)) && ((E == A) || !(D == H)) >
+
+ vand d26, d26, d13 @ E5[0] = < ((E == I) || !(B == F)) && ((E == C) || !(H == F)) >
+
+@ d12 = E6[0]
+@ d13 = E7[0]
+@ d14 = E8[0]
+
+ vorr d22, d27, d22 @ E3[0] = < C0 || (((E == G) || !(D == B)) && ((E == A) || !(D == H))) >
+
+ vorr d26, d27, d26 @ E5[0] = < C0 || (((E == I) || !(B == F)) && ((E == C) || !(H == F))) >
+
+ vbsl d22, d24, d2 @ E3[0] = < (C0 || (((E == G) || !(D == B)) && ((E == A) || !(D == H)))) ? E : D >
+
+ vbsl d26, d24, d8 @ E5[0] = < (C0 || (((E == I) || !(B == F)) && ((E == C) || !(H == F)))) ? E : F >
+
+ vorn d13, d10, d15 @ E7[0] = < (E == I) || !(D == H) >
+
+ vorn d12, d27, d15 @ E6[0] = < C0 || !(D == H) >
+ vst3.16 {d22,d24,d26}, [\dst2]! @ [dst + dststride] = E3,E4,E5; dst2 += 3*2*4
+
+ .if \first7
+ addls \dst2, \dst2, \reg1 @ dst2 += reg1
+ .endif
+
+ vorn d15, d4, d23 @ tmp0[0] = < (E == G) || !(H == F) >
+
+ vorn d14, d27, d23 @ E8[0] = < C0 || !(H == F) >
+
+ vand d13, d13, d15 @ E7[0] = < ((E == I) || !(D == H)) && ((E == G) || !(H == F)) >
+
+ vbsl d12, d24, d2 @ E6[0] = < (C0 || !(D == H)) ? E : D >
+
+ vorr d13, d27, d13 @ E7[0] = < C0 || (((E == I) || !(D == H)) && ((E == G) || !(H == F))) >
+
+ vbsl d14, d24, d8 @ E8[0] = < (C0 || !(H == F)) ? E : F >
+
+ vbsl d13, d24, \dH0 @ E7[0] = < (C0 || (((E == I) || !(D == H)) && ((E == G) || !(H == F)))) ? E : H >
+
+@ d15 = tmp0[1]
+@ d22 = tmp1[1]
+@ d23 = E3[1]
+@ d24 = E4[0]
+@ d25 = E4[1]
+@ d26 = C0[1]
+@ d27 = E5[1]
+
+ vceq.i16 d15, \dB1, \dH1 @ tmp0[1] = < B == H >
+
+ vceq.i16 d22, d3, d9 @ tmp1[1] = < D == F >
+
+ vceq.i16 d23, d3, \dB1 @ E3[1] = < D == B >
+ vst3.16 {d12-d14}, [\dst3]! @ [dst + 2 * dststride] = E6,E7,E8; dst3 += 3*2*4
+
+ .if \first7
+ addls \dst3, \dst3, \reg1 @ dst3 += reg1
+ bls 0f
+ .endif
+
+@ d12 = E0[1]
+@ d13 = E1[1]
+@ d14 = E2[1]
+
+ vorr d26, d15, d22 @ C0[1] = < B == H || D == F >
+
+ vceq.i16 d27, \dB1, d9 @ E5[1] = < B == F >
+
+ vorn d15, d7, d23 @ tmp0[1] = < (E == C) || !(D == B) >
+
+ vorn d22, d1, d27 @ tmp1[1] = < (E == A) || !(B == F) >
+
+ vorn d12, d26, d23 @ E0[1] = < C0 || !(D == B) >
+
+ vand d13, d15, d22 @ E1[1] = < ((E == C) || !(D == B)) && ((E == A) || !(B == F)) >
+
+ vorn d14, d26, d27 @ E2[1] = < C0 || !(B == F) >
+
+ vorr d13, d26, d13 @ E1[1] = < C0 || (((E == C) || !(D == B)) && ((E == A) || !(B == F))) >
+
+ vbsl d12, d25, d3 @ E0[1] = < (C0 || !(D == B)) ? E : D >
+
+ vbsl d14, d25, d9 @ E2[1] = < (C0 || !(B == F)) ? E : F >
+
+ vbsl d13, d25, \dB1 @ E1[1] = < (C0 || (((E == C) || !(D == B)) && ((E == A) || !(B == F)))) ? E : B >
+
+ vceq.i16 d15, d3, \dH1 @ tmp0[1] = < D == H >
+
+ vceq.i16 d22, \dH1, d9 @ tmp1[1] = < H == F >
+
+ vorn d23, d5, d23 @ E3[1] = < (E == G) || !(D == B) >
+ .ifeq \first7
+ vst3.16 {d12-d14}, [\dst1]! @ [dst] = E0,E1,E2; dst1 += 3*2*4
+ .else
+ vst3.16 {d12-d14}, [\dst1], \reg1 @ [dst] = E0,E1,E2; dst1 += reg1
+ .endif
+
+ vorn d27, d11, d27 @ E5[1] = < (E == I) || !(B == F) >
+
+@ d12 = tmp6[1]
+@ d13 = tmp7[1]
+
+ vorn d12, d1, d15 @ tmp6[1] = < (E == A) || !(D == H) >
+
+ vorn d13, d7, d22 @ tmp7[1] = < (E == C) || !(H == F) >
+
+ vand d23, d23, d12 @ E3[1] = < ((E == G) || !(D == B)) && ((E == A) || !(D == H)) >
+
+ vand d27, d27, d13 @ E5[1] = < ((E == I) || !(B == F)) && ((E == C) || !(H == F)) >
+
+@ d12 = E6[1]
+@ d13 = E7[1]
+@ d14 = E8[1]
+
+ vorr d23, d26, d23 @ E3[1] = < C0 || (((E == G) || !(D == B)) && ((E == A) || !(D == H))) >
+
+ vorr d27, d26, d27 @ E5[1] = < C0 || (((E == I) || !(B == F)) && ((E == C) || !(H == F))) >
+
+ vbsl d23, d25, d3 @ E3[1] = < (C0 || (((E == G) || !(D == B)) && ((E == A) || !(D == H)))) ? E : D >
+
+ vbsl d27, d25, d9 @ E5[1] = < (C0 || (((E == I) || !(B == F)) && ((E == C) || !(H == F)))) ? E : F >
+
+ vorn d13, d11, d15 @ E7[1] = < (E == I) || !(D == H) >
+
+ vorn d12, d26, d15 @ E6[1] = < C0 || !(D == H) >
+ .ifeq \first7
+ vst3.16 {d23,d25,d27}, [\dst2]! @ [dst + dststride] = E3,E4,E5; dst2 += 3*2*4
+ .else
+ vst3.16 {d23,d25,d27}, [\dst2], \reg1 @ [dst + dststride] = E3,E4,E5; dst2 += reg1
+ .endif
+
+ vorn d15, d5, d22 @ tmp0[1] = < (E == G) || !(H == F) >
+
+ vorn d14, d26, d22 @ E8[1] = < C0 || !(H == F) >
+
+ vand d13, d13, d15 @ E7[1] = < ((E == I) || !(D == H)) && ((E == G) || !(H == F)) >
+
+ vbsl d12, d25, d3 @ E6[1] = < (C0 || !(D == H)) ? E : D >
+
+ vorr d13, d26, d13 @ E7[1] = < C0 || (((E == I) || !(D == H)) && ((E == G) || !(H == F))) >
+
+ vbsl d14, d25, d9 @ E8[1] = < (C0 || !(H == F)) ? E : F >
+
+ vbsl d13, d25, \dH1 @ E7[1] = < (C0 || (((E == I) || !(D == H)) && ((E == G) || !(H == F)))) ? E : H >
+
+ .ifeq \first7
+
+ .ifeq \last8
+ sub \counter, \counter, #8 @ counter -= 8
+ cmp \counter, #8
+ .endif
+
+ vst3.16 {d12-d14}, [\dst3]! @ [dst + 2 * dststride] = E6,E7,E8; dst3 += 3*2*4
+ .else
+ vst3.16 {d12-d14}, [\dst3], \reg1 @ [dst + 2 * dststride] = E6,E7,E8; dst3 += reg1
+
+ 0:
+ .endif
+
+.endm
+
+.macro __neon_scale3x_16_16_line src1, src2, src3, counter, dst1, dst2, dst3, reg1, qB, qH, dB0, dB1, dH0, dH1, alsrc1, alsrc2, alsrc3, aldst1, aldst2, aldst3
+
+ .ifeqs "\qB", "q14"
+ vld1.16 {d29[3]}, [\src1] @ S1prev[7] = src[-srcstride]
+ .endif
+ vld1.16 {d25[3]}, [\src2] @ S2prev[7] = src[0]
+ .ifeqs "\qH", "q15"
+ vld1.16 {d31[3]}, [\src3] @ S3prev[7] = src[srcstride]
+ .endif
+ andS \reg1, \counter, #7 @ reg1 = counter & 7
+
+ .ifnes "\qB", "q14"
+ add \src1, \src1, \counter, lsl #1 @ src1 += 2 * counter
+ .endif
+ .ifnes "\qH", "q15"
+ add \src3, \src3, \counter, lsl #1 @ src3 += 2 * counter
+ .endif
+ beq 1f
+
+ @ first 1-7 pixels - align counter to 16 bytes
+
+ sub \reg1, \reg1, #1 @ reg1 = (counter & 7) - 1
+
+ .ifeqs "\qB", "q14"
+ vld1.16 {q8}, [\src1] @ S1 = [src - srcstride]
+ add \src1, \src1, \reg1, lsl #1 @ src1 += 2 * ((counter & 7) - 1)
+ .endif
+
+ vld1.16 {q9}, [\src2] @ S2 = [src ]
+ add \src2, \src2, \reg1, lsl #1 @ src2 += 2 * ((counter & 7) - 1)
+
+ .ifeqs "\qH", "q15"
+ vld1.16 {q10}, [\src3] @ S3 = [src + srcstride]
+ add \src3, \src3, \reg1, lsl #1 @ src3 += 2 * ((counter & 7) - 1)
+ .endif
+ .ifeqs "\qB", "q14"
+ vext.8 q0, \qB, q8, #14 @ S1sl = S1prev[7] | (S1 << 16) < S >
+
+ vmov \qB, q8 @ S1prev = S1 < T >
+ .endif
+ vext.8 q1, q12, q9, #14 @ S2sl = S2prev[7] | (S2 << 16) < V >
+
+ vmov q12, q9 @ S2prev = S2 < C >
+ .ifeqs "\qH", "q15"
+ vext.8 q2, \qH, q10, #14 @ S3sl = S3prev[7] | (S3 << 16) < X >
+
+ vmov \qH, q10 @ S3prev = S3 < Y >
+ .endif
+ .ifeqs "\qB", "q14"
+ vext.8 q3, \qB, q8, #2 @ S1sr = (S1prev >> 16) | ... < U >
+ .endif
+
+ vext.8 q4, q12, q9, #2 @ S2sr = (S2prev >> 16) | ... < W >
+
+ .ifeqs "\qH", "q15"
+ vext.8 q5, \qH, q10, #2 @ S3sr = (S3prev >> 16) | ... < Z >
+ .else
+ vmov q2, q1 @ S3sl = S2sl < X >
+
+ vmov q5, q4 @ S3sr = S2sr < Z >
+ .endif
+
+ .ifnes "\qB", "q14"
+ vmov q0, q1 @ S1sl = S2sl < S >
+
+ vmov q3, q4 @ S1sr = S2sr < U >
+ .else
+ vld1.16 {d29[3]}, [\src1]! @ S1prev[7] = src[counter & 7 - 1 - srcstride]; src1 += 2
+ .endif
+
+ add \reg1, \reg1, #1 @ reg1 = counter & 7
+
+ vld1.16 {d25[3]}, [\src2]! @ S2prev[7] = src[counter & 7 - 1]; src2 += 2
+
+ bic \counter, \counter, #7
+
+ .ifeqs "\qH", "q15"
+ vld1.16 {d31[3]}, [\src3]! @ S3prev[7] = src[counter & 7 - 1 + srcstride]; src3 += 2
+ .endif
+
+ ___neon_scale3x_16_16_slice \counter, \dst1, \dst2, \dst3, \reg1, \dB0, \dB1, \dH0, \dH1, 1, 0
+
+
+ @ counter is aligned to 16 bytes
+
+ 1:
+ .ifeqs "\qB", "q14"
+ vld1.16 {q8}, [\alsrc1]! @ S1 = [src - srcstride]; src1 += 2*8
+ .endif
+ vld1.16 {q9}, [\alsrc2]! @ S2 = [src ]; src2 += 2*8
+ .ifeqs "\qH", "q15"
+ vld1.16 {q10}, [\alsrc3]! @ S3 = [src + srcstride]; src3 += 2*8
+ .endif
+
+ @ inner loop (8 pixels per iteration)
+ 2:
+
+ .ifeqs "\qB", "q14"
+ vext.8 q0, \qB, q8, #14 @ S1sl = S1prev[7] | (S1 << 16) < A >
+ vmov \qB, q8 @ S1prev = S1 < B >
+ .endif
+
+ vext.8 q1, q12, q9, #14 @ S2sl = S2prev[7] | (S2 << 16) < D >
+ vmov q12, q9 @ S2prev = S2 < E >
+
+ .ifeqs "\qH", "q15"
+ vext.8 q2, \qH, q10, #14 @ S3sl = S3prev[7] | (S3 << 16) < G >
+ vmov \qH, q10 @ S3prev = S3 < H >
+ .endif
+
+ .ifeqs "\qB", "q14"
+ vld1.16 {q8}, [\alsrc1]! @ S1 = [src - srcstride]; src1 += 2*8
+ vext.8 q3, \qB, q8, #2 @ S1sr = (S1prev >> 16) | S1[0] < C >
+ .endif
+
+ vld1.16 {q9}, [\alsrc2]! @ S2 = [src ]; src2 += 2*8
+ vext.8 q4, q12, q9, #2 @ S2sr = (S2prev >> 16) | S2[0] < F >
+
+ .ifeqs "\qH", "q15"
+ vld1.16 {q10}, [\alsrc3]! @ S3 = [src + srcstride]; src3 += 2*8
+ vext.8 q5, \qH, q10, #2 @ S3sr = (S3prev >> 16) | S3[0] < I >
+ .else
+ vmov q2, q1 @ S3sl = S2sl < G >
+
+ vmov q5, q4 @ S3sr = S2sr < I >
+ .endif
+
+ .ifnes "\qB", "q14"
+ vmov q0, q1 @ S1sl = S2sl < A >
+
+ vmov q3, q4 @ S1sr = S2sr < C >
+ .endif
+
+ ___neon_scale3x_16_16_slice \counter, \aldst1, \aldst2, \aldst3, \reg1, \dB0, \dB1, \dH0, \dH1, 0, 0
+
+ bhi 2b
+
+ @ last 8 pixels
+
+ .ifeqs "\qB", "q14"
+ vext.8 q0, \qB, q8, #14 @ S1sl = S1prev[7] | (S1 << 16) < A >
+ vmov \qB, q8 @ S1prev = S1 < B >
+ .endif
+
+ vext.8 q1, q12, q9, #14 @ S2sl = S2prev[7] | (S2 << 16) < D >
+ vmov q12, q9 @ S2prev = S2 < E >
+
+ .ifeqs "\qH", "q15"
+ vext.8 q2, \qH, q10, #14 @ S3sl = S3prev[7] | (S3 << 16) < G >
+ vmov \qH, q10 @ S3prev = S3 < H >
+ .endif
+
+ .ifeqs "\qB", "q14"
+ vshr.u64 d16, d17, #(64-16) @ S1[0] = S1[7] | ...
+ .endif
+
+ vshr.u64 d18, d19, #(64-16) @ S2[0] = S2[7] | ...
+
+ .ifeqs "\qH", "q15"
+ vshr.u64 d20, d21, #(64-16) @ S3[0] = S3[7] | ...
+ .endif
+ .ifeqs "\qB", "q14"
+ vext.8 q3, \qB, q8, #2 @ S1sr = (S1prev >> 16) | S1[0] < C >
+ .endif
+
+ vext.8 q4, q12, q9, #2 @ S2sr = (S2prev >> 16) | S2[0] < F >
+
+ .ifeqs "\qH", "q15"
+ vext.8 q5, \qH, q10, #2 @ S3sr = (S3prev >> 16) | S3[0] < I >
+ .else
+ vmov q2, q1 @ S3sl = S2sl < G >
+
+ vmov q5, q4 @ S3sr = S2sr < I >
+ .endif
+
+ .ifnes "\qB", "q14"
+ vmov q0, q1 @ S1sl = S2sl < A >
+
+ vmov q3, q4 @ S1sr = S2sr < C >
+ .endif
+
+ ___neon_scale3x_16_16_slice \counter, \aldst1, \aldst2, \aldst3, \reg1, \dB0, \dB1, \dH0, \dH1, 0, 1
+
+.endm
+
+.macro _neon_scale3x_16_16_line_first src1, src2, src3, counter, dst1, dst2, dst3, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2, aldst3
+ __neon_scale3x_16_16_line \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, q12, q15, d24, d25, d30, d31, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2, \aldst3
+.endm
+
+.macro _neon_scale3x_16_16_line_middle src1, src2, src3, counter, dst1, dst2, dst3, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2, aldst3
+ __neon_scale3x_16_16_line \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, q14, q15, d28, d29, d30, d31, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2, \aldst3
+.endm
+
+.macro _neon_scale3x_16_16_line_last src1, src2, src3, counter, dst1, dst2, dst3, reg1, alsrc1, alsrc2, alsrc3, aldst1, aldst2, aldst3
+ __neon_scale3x_16_16_line \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, q14, q12, d28, d29, d24, d25, \alsrc1, \alsrc2, \alsrc3, \aldst1, \aldst2, \aldst3
+.endm
+
+.macro neon_scale3x_16_16_line part, src1, src2, src3, counter, dst1, dst2, dst3, reg1, srcalign16, dstalign8
+ .ifeq \srcalign16
+
+ .ifeq \dstalign8
+ _neon_scale3x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, \src1, \src2, \src3, \dst1, \dst2, \dst3
+ .else
+ _neon_scale3x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, \src1, \src2, \src3, \dst1:64, \dst2:64, \dst3:64
+ .endif
+
+ .else
+
+ .ifeq \dstalign8
+ _neon_scale3x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, \src1:128, \src2:128, \src3:128, \dst1, \dst2, \dst3
+ .else
+ _neon_scale3x_16_16_line_\part \src1, \src2, \src3, \counter, \dst1, \dst2, \dst3, \reg1, \src1:128, \src2:128, \src3:128, \dst1:64, \dst2:64, \dst3:64
+ .endif
+
+ .endif
+.endm
+