blob: 52335da2ef65245b7ef168eb4ad839ebc1da4f35 [file] [log] [blame]
// Copyright ©2015 The Gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// Some of the loop unrolling code is copied from:
// http://golang.org/src/math/big/arith_amd64.s
// which is distributed under these terms:
//
// Copyright (c) 2012 The Go Authors. All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//+build !noasm,!appengine
#include "textflag.h"
#define X_PTR SI
#define Y_PTR DI
#define X_PTR_INC4 R11
#define Y_PTR_INC4 R12
#define DST_PTR DX
#define IDX AX
#define LEN CX
#define TAIL BX
#define INC_X R8
#define INCx3_X R11
#define INC_Y R9
#define INCx3_Y R12
#define INC_DST R10
#define INCx3_DST R13
#define ALPHA X0
#define ALPHA_Y Y0
#define X_IDX Y1
#define X_IDX2 X1
#define Y_IDX Y2
#define Y_IDX2 X2
// func AxpyIncToAVX(dst []float64, incDst, idst uintptr, alpha float64, x, y []float64, n, incX, incY, ix, iy uintptr)
TEXT ·AxpyIncToAVX(SB), NOSPLIT, $0
MOVQ dst_base+0(FP), DST_PTR // DST_PTR := &dst
MOVQ x_base+48(FP), X_PTR // X_PTR := &x
MOVQ y_base+72(FP), Y_PTR // Y_PTR := &y
MOVQ n+96(FP), LEN // LEN := n
CMPQ LEN, $0 // if LEN == 0 { return }
JE end
MOVQ ix+120(FP), INC_X
LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(x[ix])
MOVQ iy+128(FP), INC_Y
LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(dst[idst])
MOVQ idst+32(FP), INC_DST
LEAQ (DST_PTR)(INC_DST*8), DST_PTR // DST_PTR = &(y[iy])
MOVQ incX+104(FP), INC_X // INC_X = incX * sizeof(float64)
SHLQ $3, INC_X
MOVQ incY+112(FP), INC_Y // INC_Y = incY * sizeof(float64)
SHLQ $3, INC_Y
MOVQ incDst+24(FP), INC_DST // INC_DST = incDst * sizeof(float64)
SHLQ $3, INC_DST
MOVSD alpha+40(FP), ALPHA
MOVQ LEN, TAIL // TAIL = LEN
SHRQ $2, LEN // LEN = floor( n / 4 )
JZ tail_start // if LEN == 0 { goto tail_start }
LEAQ (INC_X)(INC_X*2), INCx3_X // INCx3_X = INC_X * 3
LEAQ (INC_Y)(INC_Y*2), INCx3_Y // INCx3_Y = INC_Y * 3
LEAQ (INC_DST)(INC_DST*2), INCx3_DST // INCx3_DST = INC_DST * 3
CMPQ INC_DST, $8
JNE loop
CMPQ LEN, $0
JLE loop
JMP axpy_gather
loop: // do { // y[i] += alpha * x[i] unrolled 2x.
VMOVSD (X_PTR), X2 // X_i = x[i]
VMOVSD (X_PTR)(INC_X*1), X3
VMOVSD (X_PTR)(INC_X*2), X4
VMOVSD (X_PTR)(INCx3_X*1), X5
VFMADD213SD (Y_PTR), ALPHA, X2 // X_i = X_i * a + y[i]
VFMADD213SD (Y_PTR)(INC_Y*1), ALPHA, X3
VFMADD213SD (Y_PTR)(INC_Y*2), ALPHA, X4
VFMADD213SD (Y_PTR)(INCx3_Y*1), ALPHA, X5
VMOVSD X2, (DST_PTR) // dst[i] = X_i
VMOVSD X3, (DST_PTR)(INC_DST*1)
VMOVSD X4, (DST_PTR)(INC_DST*2)
VMOVSD X5, (DST_PTR)(INCx3_DST*1)
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4])
LEAQ (DST_PTR)(INC_DST*4), DST_PTR // DST_PTR = &(DST_PTR[incDst*4]
DECQ LEN
JNZ loop // } while --LEN > 0
CMPQ TAIL, $0 // if TAIL == 0 { return }
JE end
tail_start: // Reset Loop registers
TESTQ $2, TAIL // if TAIL & 2 == 0 { goto tail_one }
JZ tail_one
VMOVSD (X_PTR), X2 // X_i = x[i]
VMOVSD (X_PTR)(INC_X*1), X3
VFMADD213SD (Y_PTR), ALPHA, X2 // X_i = X_i * a + y[i]
VFMADD213SD (Y_PTR)(INC_Y*1), ALPHA, X3
VMOVSD X2, (DST_PTR) // dst[i] = X_i
VMOVSD X3, (DST_PTR)(INC_DST*1)
LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2])
LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2])
LEAQ (DST_PTR)(INC_DST*2), DST_PTR // DST_PTR = &(DST_PTR[incY*2]
tail_one:
TESTQ $1, TAIL
JZ end // if TAIL & 1 == 0 { goto end }
VMOVSD (X_PTR), X2 // X2 = x[i]
VFMADD213SD (Y_PTR), ALPHA, X2 // X_i = X_i * a + y[i]
VMOVSD X2, (DST_PTR) // dst[i] = X2
end:
RET
axpy_gather:
XORQ IDX, IDX // IDX = 0
VPXOR X1, X1, X1 // X1 = { 0, 0 }
VPXOR X2, X2, X2 // X2 = { 0, 0 }
VMOVQ INC_X, X1 // X1 = { INC_X, 0 }
VMOVQ INC_Y, X2 // X2 = { INC_Y, 0 }
VMOVQ INCx3_X, X3 // X3 = { 3 * INC_X, 0 }
VMOVQ INCx3_Y, X4 // X4 = { 3 * INC_Y, 0 }
VPADDQ X1, X1, X5 // X5 = 2 * INC_X
VPADDQ X2, X2, X6 // X7 = 2 * INC_Y
VSHUFPD $1, X1, X1, X1 // X1 = { 0, INC_X }
VSHUFPD $1, X2, X2, X2 // X2 = { 0, INC_Y }
VSHUFPD $0, X3, X5, X3 // X3 = { 2 * INC_X, 3 * INC_X }
VSHUFPD $0, X4, X6, X4 // X4 = { 2 * INC_Y, 3 * INC_Y }
VINSERTI128 $1, X3, X_IDX, X_IDX // X_IDX = { 0, INC_X, 2 * INC_X, 3 * INC_X }
VINSERTI128 $1, X4, Y_IDX, Y_IDX // Y_IDX = { 0, INC_X, 2 * INC_X, 3 * INC_X }
VPCMPEQD Y12, Y12, Y12 // set mask register to all 1's
VBROADCASTSD ALPHA, ALPHA_Y // ALPHA_Y = { alpha, alpha, alpha, alpha }
SHRQ $1, LEN // LEN = floor( n / 4 )
JZ g_tail4
LEAQ (X_PTR)(INC_X*4), X_PTR_INC4
LEAQ (Y_PTR)(INC_Y*4), Y_PTR_INC4
g_loop:
VMOVUPS Y12, Y10
VMOVUPS Y12, Y9
VMOVUPS Y12, Y8
VMOVUPS Y12, Y7
VGATHERQPD Y10, (X_PTR)(X_IDX * 1), Y3 // Y_i = X[IDX:IDX+3]
VGATHERQPD Y9, (X_PTR_INC4)(X_IDX * 1), Y4
VGATHERQPD Y8, (Y_PTR)(Y_IDX * 1), Y5 // Y_i = X[IDX:IDX+3]
VGATHERQPD Y7, (Y_PTR_INC4)(Y_IDX * 1), Y6
VFMADD213PD Y5, ALPHA_Y, Y3 // Y_i = Y_i * a + y[i]
VFMADD213PD Y6, ALPHA_Y, Y4
VMOVUPS Y3, (DST_PTR)(IDX*8) // y[i] = Y_i
VMOVUPS Y4, 32(DST_PTR)(IDX*8)
LEAQ (X_PTR)(INC_X*8), X_PTR // X_PTR = &(X_PTR[incX*8])
LEAQ (Y_PTR)(INC_Y*8), Y_PTR // Y_PTR = &(Y_PTR[incY*8])
LEAQ (X_PTR_INC4)(INC_X*8), X_PTR_INC4
LEAQ (Y_PTR_INC4)(INC_Y*8), Y_PTR_INC4
ADDQ $8, IDX // i += 8
DECQ LEN
JNZ g_loop
ANDQ $7, TAIL // if TAIL & 7 == 0 { return }
JE g_end
g_tail4:
TESTQ $4, TAIL
JZ g_tail2
VMOVUPS Y12, Y10
VMOVUPS Y12, Y8
VGATHERQPD Y10, (X_PTR)(X_IDX * 1), Y3 // Y_i = X[IDX:IDX+3]
VGATHERQPD Y8, (Y_PTR)(Y_IDX * 1), Y5 // Y_i = X[IDX:IDX+3]
VFMADD213PD Y5, ALPHA_Y, Y3 // Y_i = Y_i * a + y[i]
VMOVUPS Y3, (DST_PTR)(IDX*8) // y[i] = Y_i
LEAQ (X_PTR)(INC_X*4), X_PTR // X_PTR = &(X_PTR[incX*4])
LEAQ (Y_PTR)(INC_Y*4), Y_PTR // Y_PTR = &(Y_PTR[incY*4])
ADDQ $4, IDX // i += 4
g_tail2:
TESTQ $2, TAIL
JZ g_tail1
VMOVUPS X12, X10
VMOVUPS X12, X8
VGATHERQPD X10, (X_PTR)(X_IDX2 * 1), X3 // X_i = X[IDX:IDX+1]
VGATHERQPD X8, (Y_PTR)(Y_IDX2 * 1), X5 // X_i = X[IDX:IDX+1]
VFMADD213PD X5, ALPHA, X3 // X_i = X_i * a + x[i]
VMOVUPS X3, (DST_PTR)(IDX*8) // x[i] = X_i
LEAQ (X_PTR)(INC_X*2), X_PTR // X_PTR = &(X_PTR[incX*2])
LEAQ (Y_PTR)(INC_Y*2), Y_PTR // Y_PTR = &(Y_PTR[incY*2])
ADDQ $2, IDX // i += 2
g_tail1:
TESTQ $1, TAIL
JZ g_end
VMOVSD (X_PTR), X2
VFMADD213SD (Y_PTR), ALPHA, X2
VMOVSD X2, (DST_PTR)(IDX*8)
g_end:
VZEROALL
RET