Ver Fonte

Use same vendored x/crypto packages as go 1.8 tls

Rod Hynes há 9 anos atrás
pai
commit
ddef54c69e
29 ficheiros alterados com 8726 adições e 2 exclusões
  1. 1 1
      psiphon/common/tls/cipher_suites.go
  2. 83 0
      psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305.go
  3. 80 0
      psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_amd64.go
  4. 2721 0
      psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_amd64.s
  5. 70 0
      psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_generic.go
  6. 15 0
      psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_noasm.go
  7. 182 0
      psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_test.go
  8. 94 0
      psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_test_vectors.go
  9. 199 0
      psiphon/common/tls/crypto/chacha20poly1305/internal/chacha20/chacha_generic.go
  10. 29 0
      psiphon/common/tls/crypto/chacha20poly1305/internal/chacha20/chacha_test.go
  11. 8 0
      psiphon/common/tls/crypto/curve25519/const_amd64.h
  12. 20 0
      psiphon/common/tls/crypto/curve25519/const_amd64.s
  13. 88 0
      psiphon/common/tls/crypto/curve25519/cswap_amd64.s
  14. 841 0
      psiphon/common/tls/crypto/curve25519/curve25519.go
  15. 29 0
      psiphon/common/tls/crypto/curve25519/curve25519_test.go
  16. 23 0
      psiphon/common/tls/crypto/curve25519/doc.go
  17. 73 0
      psiphon/common/tls/crypto/curve25519/freeze_amd64.s
  18. 1377 0
      psiphon/common/tls/crypto/curve25519/ladderstep_amd64.s
  19. 240 0
      psiphon/common/tls/crypto/curve25519/mont25519_amd64.go
  20. 169 0
      psiphon/common/tls/crypto/curve25519/mul_amd64.s
  21. 132 0
      psiphon/common/tls/crypto/curve25519/square_amd64.s
  22. 32 0
      psiphon/common/tls/crypto/poly1305/poly1305.go
  23. 92 0
      psiphon/common/tls/crypto/poly1305/poly1305_test.go
  24. 22 0
      psiphon/common/tls/crypto/poly1305/sum_amd64.go
  25. 125 0
      psiphon/common/tls/crypto/poly1305/sum_amd64.s
  26. 22 0
      psiphon/common/tls/crypto/poly1305/sum_arm.go
  27. 427 0
      psiphon/common/tls/crypto/poly1305/sum_arm.s
  28. 1531 0
      psiphon/common/tls/crypto/poly1305/sum_ref.go
  29. 1 1
      psiphon/common/tls/key_agreement.go

+ 1 - 1
psiphon/common/tls/cipher_suites.go

@@ -15,7 +15,7 @@ import (
 	"crypto/x509"
 	"hash"
 
-	"github.com/Psiphon-Inc/crypto/chacha20poly1305"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tls/crypto/chacha20poly1305"
 )
 
 // a keyAgreement implements the client and server side of a TLS key agreement

+ 83 - 0
psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305.go

@@ -0,0 +1,83 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package chacha20poly1305 implements the ChaCha20-Poly1305 AEAD as specified in RFC 7539.
+package chacha20poly1305
+
+import (
+	"crypto/cipher"
+	"errors"
+)
+
+const (
+	// KeySize is the size of the key used by this AEAD, in bytes.
+	KeySize = 32
+	// NonceSize is the size of the nonce used with this AEAD, in bytes.
+	NonceSize = 12
+)
+
+type chacha20poly1305 struct {
+	key [32]byte
+}
+
+// New returns a ChaCha20-Poly1305 AEAD that uses the given, 256-bit key.
+func New(key []byte) (cipher.AEAD, error) {
+	if len(key) != KeySize {
+		return nil, errors.New("chacha20poly1305: bad key length")
+	}
+	ret := new(chacha20poly1305)
+	copy(ret.key[:], key)
+	return ret, nil
+}
+
+func (c *chacha20poly1305) NonceSize() int {
+	return NonceSize
+}
+
+func (c *chacha20poly1305) Overhead() int {
+	return 16
+}
+
+func (c *chacha20poly1305) Seal(dst, nonce, plaintext, additionalData []byte) []byte {
+	if len(nonce) != NonceSize {
+		panic("chacha20poly1305: bad nonce length passed to Seal")
+	}
+
+	if uint64(len(plaintext)) > (1<<38)-64 {
+		panic("chacha20poly1305: plaintext too large")
+	}
+
+	return c.seal(dst, nonce, plaintext, additionalData)
+}
+
+var errOpen = errors.New("chacha20poly1305: message authentication failed")
+
+func (c *chacha20poly1305) Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+	if len(nonce) != NonceSize {
+		panic("chacha20poly1305: bad nonce length passed to Open")
+	}
+	if len(ciphertext) < 16 {
+		return nil, errOpen
+	}
+	if uint64(len(ciphertext)) > (1<<38)-48 {
+		panic("chacha20poly1305: ciphertext too large")
+	}
+
+	return c.open(dst, nonce, ciphertext, additionalData)
+}
+
+// sliceForAppend takes a slice and a requested number of bytes. It returns a
+// slice with the contents of the given slice followed by that many bytes and a
+// second slice that aliases into it and contains only the extra bytes. If the
+// original slice has sufficient capacity then no allocation is performed.
+func sliceForAppend(in []byte, n int) (head, tail []byte) {
+	if total := len(in) + n; cap(in) >= total {
+		head = in[:total]
+	} else {
+		head = make([]byte, total)
+		copy(head, in)
+	}
+	tail = head[len(in):]
+	return
+}

+ 80 - 0
psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_amd64.go

@@ -0,0 +1,80 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7,amd64,!gccgo,!appengine
+
+package chacha20poly1305
+
+import "encoding/binary"
+
+//go:noescape
+func chacha20Poly1305Open(dst []byte, key []uint32, src, ad []byte) bool
+
+//go:noescape
+func chacha20Poly1305Seal(dst []byte, key []uint32, src, ad []byte)
+
+//go:noescape
+func haveSSSE3() bool
+
+var canUseASM bool
+
+func init() {
+	canUseASM = haveSSSE3()
+}
+
+// setupState writes a ChaCha20 input matrix to state. See
+// https://tools.ietf.org/html/rfc7539#section-2.3.
+func setupState(state *[16]uint32, key *[32]byte, nonce []byte) {
+	state[0] = 0x61707865
+	state[1] = 0x3320646e
+	state[2] = 0x79622d32
+	state[3] = 0x6b206574
+
+	state[4] = binary.LittleEndian.Uint32(key[:4])
+	state[5] = binary.LittleEndian.Uint32(key[4:8])
+	state[6] = binary.LittleEndian.Uint32(key[8:12])
+	state[7] = binary.LittleEndian.Uint32(key[12:16])
+	state[8] = binary.LittleEndian.Uint32(key[16:20])
+	state[9] = binary.LittleEndian.Uint32(key[20:24])
+	state[10] = binary.LittleEndian.Uint32(key[24:28])
+	state[11] = binary.LittleEndian.Uint32(key[28:32])
+
+	state[12] = 0
+	state[13] = binary.LittleEndian.Uint32(nonce[:4])
+	state[14] = binary.LittleEndian.Uint32(nonce[4:8])
+	state[15] = binary.LittleEndian.Uint32(nonce[8:12])
+}
+
+func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte {
+	if !canUseASM {
+		return c.sealGeneric(dst, nonce, plaintext, additionalData)
+	}
+
+	var state [16]uint32
+	setupState(&state, &c.key, nonce)
+
+	ret, out := sliceForAppend(dst, len(plaintext)+16)
+	chacha20Poly1305Seal(out[:], state[:], plaintext, additionalData)
+	return ret
+}
+
+func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+	if !canUseASM {
+		return c.openGeneric(dst, nonce, ciphertext, additionalData)
+	}
+
+	var state [16]uint32
+	setupState(&state, &c.key, nonce)
+
+	ciphertext = ciphertext[:len(ciphertext)-16]
+	ret, out := sliceForAppend(dst, len(ciphertext))
+	if !chacha20Poly1305Open(out, state[:], ciphertext, additionalData) {
+		for i := range out {
+			out[i] = 0
+		}
+		return nil, errOpen
+	}
+
+	return ret, nil
+}

+ 2721 - 0
psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_amd64.s

@@ -0,0 +1,2721 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file was originally from https://golang.org/cl/24717 by Vlad Krasnov of CloudFlare.
+
+// +build go1.7,amd64,!gccgo,!appengine
+
+#include "textflag.h"
+// General register allocation
+#define oup DI
+#define inp SI
+#define inl BX
+#define adp CX // free to reuse, after we hash the additional data
+#define keyp R8 // free to reuse, when we copy the key to stack
+#define itr2 R9 // general iterator
+#define itr1 CX // general iterator
+#define acc0 R10
+#define acc1 R11
+#define acc2 R12
+#define t0 R13
+#define t1 R14
+#define t2 R15
+#define t3 R8
+// Register and stack allocation for the SSE code
+#define rStore (0*16)(BP)
+#define sStore (1*16)(BP)
+#define state1Store (2*16)(BP)
+#define state2Store (3*16)(BP)
+#define tmpStore (4*16)(BP)
+#define ctr0Store (5*16)(BP)
+#define ctr1Store (6*16)(BP)
+#define ctr2Store (7*16)(BP)
+#define ctr3Store (8*16)(BP)
+#define A0 X0
+#define A1 X1
+#define A2 X2
+#define B0 X3
+#define B1 X4
+#define B2 X5
+#define C0 X6
+#define C1 X7
+#define C2 X8
+#define D0 X9
+#define D1 X10
+#define D2 X11
+#define T0 X12
+#define T1 X13
+#define T2 X14
+#define T3 X15
+#define A3 T0
+#define B3 T1
+#define C3 T2
+#define D3 T3
+// Register and stack allocation for the AVX2 code
+#define rsStoreAVX2 (0*32)(BP)
+#define state1StoreAVX2 (1*32)(BP)
+#define state2StoreAVX2 (2*32)(BP)
+#define ctr0StoreAVX2 (3*32)(BP)
+#define ctr1StoreAVX2 (4*32)(BP)
+#define ctr2StoreAVX2 (5*32)(BP)
+#define ctr3StoreAVX2 (6*32)(BP)
+#define tmpStoreAVX2 (7*32)(BP) // 256 bytes on stack
+#define AA0 Y0
+#define AA1 Y5
+#define AA2 Y6
+#define AA3 Y7
+#define BB0 Y14
+#define BB1 Y9
+#define BB2 Y10
+#define BB3 Y11
+#define CC0 Y12
+#define CC1 Y13
+#define CC2 Y8
+#define CC3 Y15
+#define DD0 Y4
+#define DD1 Y1
+#define DD2 Y2
+#define DD3 Y3
+#define TT0 DD3
+#define TT1 AA3
+#define TT2 BB3
+#define TT3 CC3
+// ChaCha20 constants
+DATA ·chacha20Constants<>+0x00(SB)/4, $0x61707865
+DATA ·chacha20Constants<>+0x04(SB)/4, $0x3320646e
+DATA ·chacha20Constants<>+0x08(SB)/4, $0x79622d32
+DATA ·chacha20Constants<>+0x0c(SB)/4, $0x6b206574
+DATA ·chacha20Constants<>+0x10(SB)/4, $0x61707865
+DATA ·chacha20Constants<>+0x14(SB)/4, $0x3320646e
+DATA ·chacha20Constants<>+0x18(SB)/4, $0x79622d32
+DATA ·chacha20Constants<>+0x1c(SB)/4, $0x6b206574
+// <<< 16 with PSHUFB
+DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302
+DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
+DATA ·rol16<>+0x10(SB)/8, $0x0504070601000302
+DATA ·rol16<>+0x18(SB)/8, $0x0D0C0F0E09080B0A
+// <<< 8 with PSHUFB
+DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003
+DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B
+DATA ·rol8<>+0x10(SB)/8, $0x0605040702010003
+DATA ·rol8<>+0x18(SB)/8, $0x0E0D0C0F0A09080B
+
+DATA ·avx2InitMask<>+0x00(SB)/8, $0x0
+DATA ·avx2InitMask<>+0x08(SB)/8, $0x0
+DATA ·avx2InitMask<>+0x10(SB)/8, $0x1
+DATA ·avx2InitMask<>+0x18(SB)/8, $0x0
+
+DATA ·avx2IncMask<>+0x00(SB)/8, $0x2
+DATA ·avx2IncMask<>+0x08(SB)/8, $0x0
+DATA ·avx2IncMask<>+0x10(SB)/8, $0x2
+DATA ·avx2IncMask<>+0x18(SB)/8, $0x0
+// Poly1305 key clamp
+DATA ·polyClampMask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
+DATA ·polyClampMask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
+DATA ·polyClampMask<>+0x10(SB)/8, $0xFFFFFFFFFFFFFFFF
+DATA ·polyClampMask<>+0x18(SB)/8, $0xFFFFFFFFFFFFFFFF
+
+DATA ·sseIncMask<>+0x00(SB)/8, $0x1
+DATA ·sseIncMask<>+0x08(SB)/8, $0x0
+// To load/store the last < 16 bytes in a buffer
+DATA ·andMask<>+0x00(SB)/8, $0x00000000000000ff
+DATA ·andMask<>+0x08(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x10(SB)/8, $0x000000000000ffff
+DATA ·andMask<>+0x18(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x20(SB)/8, $0x0000000000ffffff
+DATA ·andMask<>+0x28(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x30(SB)/8, $0x00000000ffffffff
+DATA ·andMask<>+0x38(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x40(SB)/8, $0x000000ffffffffff
+DATA ·andMask<>+0x48(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x50(SB)/8, $0x0000ffffffffffff
+DATA ·andMask<>+0x58(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x60(SB)/8, $0x00ffffffffffffff
+DATA ·andMask<>+0x68(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x70(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0x78(SB)/8, $0x0000000000000000
+DATA ·andMask<>+0x80(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0x88(SB)/8, $0x00000000000000ff
+DATA ·andMask<>+0x90(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0x98(SB)/8, $0x000000000000ffff
+DATA ·andMask<>+0xa0(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0xa8(SB)/8, $0x0000000000ffffff
+DATA ·andMask<>+0xb0(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0xb8(SB)/8, $0x00000000ffffffff
+DATA ·andMask<>+0xc0(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0xc8(SB)/8, $0x000000ffffffffff
+DATA ·andMask<>+0xd0(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0xd8(SB)/8, $0x0000ffffffffffff
+DATA ·andMask<>+0xe0(SB)/8, $0xffffffffffffffff
+DATA ·andMask<>+0xe8(SB)/8, $0x00ffffffffffffff
+
+GLOBL ·chacha20Constants<>(SB), (NOPTR+RODATA), $32
+GLOBL ·rol16<>(SB), (NOPTR+RODATA), $32
+GLOBL ·rol8<>(SB), (NOPTR+RODATA), $32
+GLOBL ·sseIncMask<>(SB), (NOPTR+RODATA), $16
+GLOBL ·avx2IncMask<>(SB), (NOPTR+RODATA), $32
+GLOBL ·avx2InitMask<>(SB), (NOPTR+RODATA), $32
+GLOBL ·polyClampMask<>(SB), (NOPTR+RODATA), $32
+GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240
+// No PALIGNR in Go ASM yet (but VPALIGNR is present).
+#define shiftB0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X3, X3
+#define shiftB1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x04 // PALIGNR $4, X4, X4
+#define shiftB2Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X5, X5
+#define shiftB3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x04 // PALIGNR $4, X13, X13
+#define shiftC0Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X6, X6
+#define shiftC1Left BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x08 // PALIGNR $8, X7, X7
+#define shiftC2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc0; BYTE $0x08 // PALIGNR $8, X8, X8
+#define shiftC3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xf6; BYTE $0x08 // PALIGNR $8, X14, X14
+#define shiftD0Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x0c // PALIGNR $12, X9, X9
+#define shiftD1Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x0c // PALIGNR $12, X10, X10
+#define shiftD2Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X11, X11
+#define shiftD3Left BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x0c // PALIGNR $12, X15, X15
+#define shiftB0Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x0c // PALIGNR $12, X3, X3
+#define shiftB1Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xe4; BYTE $0x0c // PALIGNR $12, X4, X4
+#define shiftB2Right BYTE $0x66; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X5, X5
+#define shiftB3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xed; BYTE $0x0c // PALIGNR $12, X13, X13
+#define shiftC0Right shiftC0Left
+#define shiftC1Right shiftC1Left
+#define shiftC2Right shiftC2Left
+#define shiftC3Right shiftC3Left
+#define shiftD0Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xc9; BYTE $0x04 // PALIGNR $4, X9, X9
+#define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10
+#define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11
+#define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15
+// Some macros
+#define chachaQR(A, B, C, D, T) \
+	PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D                            \
+	PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \
+	PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D                             \
+	PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B
+
+#define chachaQR_AVX2(A, B, C, D, T) \
+	VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol16<>(SB), D, D                         \
+	VPADDD D, C, C; VPXOR C, B, B; VPSLLD $12, B, T; VPSRLD $20, B, B; VPXOR T, B, B \
+	VPADDD B, A, A; VPXOR A, D, D; VPSHUFB ·rol8<>(SB), D, D                          \
+	VPADDD D, C, C; VPXOR C, B, B; VPSLLD $7, B, T; VPSRLD $25, B, B; VPXOR T, B, B
+
+#define polyAdd(S) ADDQ S, acc0; ADCQ 8+S, acc1; ADCQ $1, acc2
+#define polyMulStage1 MOVQ (0*8)(BP), AX; MOVQ AX, t2; MULQ acc0; MOVQ AX, t0; MOVQ DX, t1; MOVQ (0*8)(BP), AX; MULQ acc1; IMULQ acc2, t2; ADDQ AX, t1; ADCQ DX, t2
+#define polyMulStage2 MOVQ (1*8)(BP), AX; MOVQ AX, t3; MULQ acc0; ADDQ AX, t1; ADCQ $0, DX; MOVQ DX, acc0; MOVQ (1*8)(BP), AX; MULQ acc1; ADDQ AX, t2; ADCQ $0, DX
+#define polyMulStage3 IMULQ acc2, t3; ADDQ acc0, t2; ADCQ DX, t3
+#define polyMulReduceStage MOVQ t0, acc0; MOVQ t1, acc1; MOVQ t2, acc2; ANDQ $3, acc2; MOVQ t2, t0; ANDQ $-4, t0; MOVQ t3, t1; SHRQ $2, t2:t3; SHRQ $2, t3; ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $0, acc2; ADDQ t2, acc0; ADCQ t3, acc1; ADCQ $0, acc2
+
+#define polyMulStage1_AVX2 MOVQ (0*8)(BP), DX; MOVQ DX, t2; MULXQ acc0, t0, t1; IMULQ acc2, t2; MULXQ acc1, AX, DX; ADDQ AX, t1; ADCQ DX, t2
+#define polyMulStage2_AVX2 MOVQ (1*8)(BP), DX; MULXQ acc0, acc0, AX; ADDQ acc0, t1; MULXQ acc1, acc1, t3; ADCQ acc1, t2; ADCQ $0, t3
+#define polyMulStage3_AVX2 IMULQ acc2, DX; ADDQ AX, t2; ADCQ DX, t3
+
+#define polyMul polyMulStage1; polyMulStage2; polyMulStage3; polyMulReduceStage
+#define polyMulAVX2 polyMulStage1_AVX2; polyMulStage2_AVX2; polyMulStage3_AVX2; polyMulReduceStage
+// ----------------------------------------------------------------------------
+TEXT polyHashADInternal<>(SB), NOSPLIT, $0
+	// adp points to beginning of additional data
+	// itr2 holds ad length
+	XORQ acc0, acc0
+	XORQ acc1, acc1
+	XORQ acc2, acc2
+	CMPQ itr2, $13
+	JNE  hashADLoop
+
+openFastTLSAD:
+	// Special treatment for the TLS case of 13 bytes
+	MOVQ (adp), acc0
+	MOVQ 5(adp), acc1
+	SHRQ $24, acc1
+	MOVQ $1, acc2
+	polyMul
+	RET
+
+hashADLoop:
+	// Hash in 16 byte chunks
+	CMPQ itr2, $16
+	JB   hashADTail
+	polyAdd(0(adp))
+	LEAQ (1*16)(adp), adp
+	SUBQ $16, itr2
+	polyMul
+	JMP  hashADLoop
+
+hashADTail:
+	CMPQ itr2, $0
+	JE   hashADDone
+
+	// Hash last < 16 byte tail
+	XORQ t0, t0
+	XORQ t1, t1
+	XORQ t2, t2
+	ADDQ itr2, adp
+
+hashADTailLoop:
+	SHLQ $8, t1:t0
+	SHLQ $8, t0
+	MOVB -1(adp), t2
+	XORQ t2, t0
+	DECQ adp
+	DECQ itr2
+	JNE  hashADTailLoop
+
+hashADTailFinish:
+	ADDQ t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2
+	polyMul
+
+	// Finished AD
+hashADDone:
+	RET
+
+// ----------------------------------------------------------------------------
+// func chacha20Poly1305Open(dst, key, src, ad []byte) bool
+TEXT ·chacha20Poly1305Open(SB), 0, $288-97
+	// For aligned stack access
+	MOVQ SP, BP
+	ADDQ $32, BP
+	ANDQ $-32, BP
+	MOVQ dst+0(FP), oup
+	MOVQ key+24(FP), keyp
+	MOVQ src+48(FP), inp
+	MOVQ src_len+56(FP), inl
+	MOVQ ad+72(FP), adp
+
+	// Check for AVX2 support
+	CMPB runtime·support_avx2(SB), $0
+	JE   noavx2bmi2Open
+
+	// Check BMI2 bit for MULXQ.
+	// runtime·cpuid_ebx7 is always available here
+	// because it passed avx2 check
+	TESTL $(1<<8), runtime·cpuid_ebx7(SB)
+	JNE   chacha20Poly1305Open_AVX2
+noavx2bmi2Open:
+
+	// Special optimization, for very short buffers
+	CMPQ inl, $128
+	JBE  openSSE128 // About 16% faster
+
+	// For long buffers, prepare the poly key first
+	MOVOU ·chacha20Constants<>(SB), A0
+	MOVOU (1*16)(keyp), B0
+	MOVOU (2*16)(keyp), C0
+	MOVOU (3*16)(keyp), D0
+	MOVO  D0, T1
+
+	// Store state on stack for future use
+	MOVO B0, state1Store
+	MOVO C0, state2Store
+	MOVO D0, ctr3Store
+	MOVQ $10, itr2
+
+openSSEPreparePolyKey:
+	chachaQR(A0, B0, C0, D0, T0)
+	shiftB0Left;  shiftC0Left; shiftD0Left
+	chachaQR(A0, B0, C0, D0, T0)
+	shiftB0Right; shiftC0Right; shiftD0Right
+	DECQ          itr2
+	JNE           openSSEPreparePolyKey
+
+	// A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded
+	PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0
+
+	// Clamp and store the key
+	PAND ·polyClampMask<>(SB), A0
+	MOVO A0, rStore; MOVO B0, sStore
+
+	// Hash AAD
+	MOVQ ad_len+80(FP), itr2
+	CALL polyHashADInternal<>(SB)
+
+openSSEMainLoop:
+	CMPQ inl, $256
+	JB   openSSEMainLoopDone
+
+	// Load state, increment counter blocks
+	MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0
+	MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+	MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+	MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3
+
+	// Store counters
+	MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store
+
+	// There are 10 ChaCha20 iterations of 2QR each, so for 6 iterations we hash 2 blocks, and for the remaining 4 only 1 block - for a total of 16
+	MOVQ $4, itr1
+	MOVQ inp, itr2
+
+openSSEInternalLoop:
+	MOVO          C3, tmpStore
+	chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+	MOVO          tmpStore, C3
+	MOVO          C1, tmpStore
+	chachaQR(A3, B3, C3, D3, C1)
+	MOVO          tmpStore, C1
+	polyAdd(0(itr2))
+	shiftB0Left;  shiftB1Left; shiftB2Left; shiftB3Left
+	shiftC0Left;  shiftC1Left; shiftC2Left; shiftC3Left
+	shiftD0Left;  shiftD1Left; shiftD2Left; shiftD3Left
+	polyMulStage1
+	polyMulStage2
+	LEAQ          (2*8)(itr2), itr2
+	MOVO          C3, tmpStore
+	chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+	MOVO          tmpStore, C3
+	MOVO          C1, tmpStore
+	polyMulStage3
+	chachaQR(A3, B3, C3, D3, C1)
+	MOVO          tmpStore, C1
+	polyMulReduceStage
+	shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right
+	shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right
+	shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right
+	DECQ          itr1
+	JGE           openSSEInternalLoop
+
+	polyAdd(0(itr2))
+	polyMul
+	LEAQ (2*8)(itr2), itr2
+
+	CMPQ itr1, $-6
+	JG   openSSEInternalLoop
+
+	// Add in the state
+	PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3
+	PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3
+	PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3
+	PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3
+
+	// Load - xor - store
+	MOVO  D3, tmpStore
+	MOVOU (0*16)(inp), D3; PXOR D3, A0; MOVOU A0, (0*16)(oup)
+	MOVOU (1*16)(inp), D3; PXOR D3, B0; MOVOU B0, (1*16)(oup)
+	MOVOU (2*16)(inp), D3; PXOR D3, C0; MOVOU C0, (2*16)(oup)
+	MOVOU (3*16)(inp), D3; PXOR D3, D0; MOVOU D0, (3*16)(oup)
+	MOVOU (4*16)(inp), D0; PXOR D0, A1; MOVOU A1, (4*16)(oup)
+	MOVOU (5*16)(inp), D0; PXOR D0, B1; MOVOU B1, (5*16)(oup)
+	MOVOU (6*16)(inp), D0; PXOR D0, C1; MOVOU C1, (6*16)(oup)
+	MOVOU (7*16)(inp), D0; PXOR D0, D1; MOVOU D1, (7*16)(oup)
+	MOVOU (8*16)(inp), D0; PXOR D0, A2; MOVOU A2, (8*16)(oup)
+	MOVOU (9*16)(inp), D0; PXOR D0, B2; MOVOU B2, (9*16)(oup)
+	MOVOU (10*16)(inp), D0; PXOR D0, C2; MOVOU C2, (10*16)(oup)
+	MOVOU (11*16)(inp), D0; PXOR D0, D2; MOVOU D2, (11*16)(oup)
+	MOVOU (12*16)(inp), D0; PXOR D0, A3; MOVOU A3, (12*16)(oup)
+	MOVOU (13*16)(inp), D0; PXOR D0, B3; MOVOU B3, (13*16)(oup)
+	MOVOU (14*16)(inp), D0; PXOR D0, C3; MOVOU C3, (14*16)(oup)
+	MOVOU (15*16)(inp), D0; PXOR tmpStore, D0; MOVOU D0, (15*16)(oup)
+	LEAQ  256(inp), inp
+	LEAQ  256(oup), oup
+	SUBQ  $256, inl
+	JMP   openSSEMainLoop
+
+openSSEMainLoopDone:
+	// Handle the various tail sizes efficiently
+	TESTQ inl, inl
+	JE    openSSEFinalize
+	CMPQ  inl, $64
+	JBE   openSSETail64
+	CMPQ  inl, $128
+	JBE   openSSETail128
+	CMPQ  inl, $192
+	JBE   openSSETail192
+	JMP   openSSETail256
+
+openSSEFinalize:
+	// Hash in the PT, AAD lengths
+	ADDQ ad_len+80(FP), acc0; ADCQ src_len+56(FP), acc1; ADCQ $1, acc2
+	polyMul
+
+	// Final reduce
+	MOVQ    acc0, t0
+	MOVQ    acc1, t1
+	MOVQ    acc2, t2
+	SUBQ    $-5, acc0
+	SBBQ    $-1, acc1
+	SBBQ    $3, acc2
+	CMOVQCS t0, acc0
+	CMOVQCS t1, acc1
+	CMOVQCS t2, acc2
+
+	// Add in the "s" part of the key
+	ADDQ 0+sStore, acc0
+	ADCQ 8+sStore, acc1
+
+	// Finally, constant time compare to the tag at the end of the message
+	XORQ    AX, AX
+	MOVQ    $1, DX
+	XORQ    (0*8)(inp), acc0
+	XORQ    (1*8)(inp), acc1
+	ORQ     acc1, acc0
+	CMOVQEQ DX, AX
+
+	// Return true iff tags are equal
+	MOVB AX, ret+96(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+// Special optimization for buffers smaller than 129 bytes
+openSSE128:
+	// For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks
+	MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0
+	MOVO  A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+	MOVO  A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+	MOVO  B0, T1; MOVO C0, T2; MOVO D1, T3
+	MOVQ  $10, itr2
+
+openSSE128InnerCipherLoop:
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+	shiftB0Left;  shiftB1Left; shiftB2Left
+	shiftC0Left;  shiftC1Left; shiftC2Left
+	shiftD0Left;  shiftD1Left; shiftD2Left
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+	shiftB0Right; shiftB1Right; shiftB2Right
+	shiftC0Right; shiftC1Right; shiftC2Right
+	shiftD0Right; shiftD1Right; shiftD2Right
+	DECQ          itr2
+	JNE           openSSE128InnerCipherLoop
+
+	// A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded
+	PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2
+	PADDL T1, B0; PADDL T1, B1; PADDL T1, B2
+	PADDL T2, C1; PADDL T2, C2
+	PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2
+
+	// Clamp and store the key
+	PAND  ·polyClampMask<>(SB), A0
+	MOVOU A0, rStore; MOVOU B0, sStore
+
+	// Hash
+	MOVQ ad_len+80(FP), itr2
+	CALL polyHashADInternal<>(SB)
+
+openSSE128Open:
+	CMPQ inl, $16
+	JB   openSSETail16
+	SUBQ $16, inl
+
+	// Load for hashing
+	polyAdd(0(inp))
+
+	// Load for decryption
+	MOVOU (inp), T0; PXOR T0, A1; MOVOU A1, (oup)
+	LEAQ  (1*16)(inp), inp
+	LEAQ  (1*16)(oup), oup
+	polyMul
+
+	// Shift the stream "left"
+	MOVO B1, A1
+	MOVO C1, B1
+	MOVO D1, C1
+	MOVO A2, D1
+	MOVO B2, A2
+	MOVO C2, B2
+	MOVO D2, C2
+	JMP  openSSE128Open
+
+openSSETail16:
+	TESTQ inl, inl
+	JE    openSSEFinalize
+
+	// We can safely load the CT from the end, because it is padded with the MAC
+	MOVQ   inl, itr2
+	SHLQ   $4, itr2
+	LEAQ   ·andMask<>(SB), t0
+	MOVOU  (inp), T0
+	ADDQ   inl, inp
+	PAND   -16(t0)(itr2*1), T0
+	MOVO   T0, 0+tmpStore
+	MOVQ   T0, t0
+	MOVQ   8+tmpStore, t1
+	PXOR   A1, T0
+
+	// We can only store one byte at a time, since plaintext can be shorter than 16 bytes
+openSSETail16Store:
+	MOVQ T0, t3
+	MOVB t3, (oup)
+	PSRLDQ $1, T0
+	INCQ   oup
+	DECQ   inl
+	JNE    openSSETail16Store
+	ADDQ   t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2
+	polyMul
+	JMP    openSSEFinalize
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 64 bytes of ciphertext
+openSSETail64:
+	// Need to decrypt up to 64 bytes - prepare single block
+	MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store
+	XORQ itr2, itr2
+	MOVQ inl, itr1
+	CMPQ itr1, $16
+	JB   openSSETail64LoopB
+
+openSSETail64LoopA:
+	// Perform ChaCha rounds, while hashing the remaining input
+	polyAdd(0(inp)(itr2*1))
+	polyMul
+	SUBQ $16, itr1
+
+openSSETail64LoopB:
+	ADDQ          $16, itr2
+	chachaQR(A0, B0, C0, D0, T0)
+	shiftB0Left;  shiftC0Left; shiftD0Left
+	chachaQR(A0, B0, C0, D0, T0)
+	shiftB0Right; shiftC0Right; shiftD0Right
+
+	CMPQ itr1, $16
+	JAE  openSSETail64LoopA
+
+	CMPQ itr2, $160
+	JNE  openSSETail64LoopB
+
+	PADDL ·chacha20Constants<>(SB), A0; PADDL state1Store, B0; PADDL state2Store, C0; PADDL ctr0Store, D0
+
+openSSETail64DecLoop:
+	CMPQ  inl, $16
+	JB    openSSETail64DecLoopDone
+	SUBQ  $16, inl
+	MOVOU (inp), T0
+	PXOR  T0, A0
+	MOVOU A0, (oup)
+	LEAQ  16(inp), inp
+	LEAQ  16(oup), oup
+	MOVO  B0, A0
+	MOVO  C0, B0
+	MOVO  D0, C0
+	JMP   openSSETail64DecLoop
+
+openSSETail64DecLoopDone:
+	MOVO A0, A1
+	JMP  openSSETail16
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 128 bytes of ciphertext
+openSSETail128:
+	// Need to decrypt up to 128 bytes - prepare two blocks
+	MOVO ·chacha20Constants<>(SB), A1; MOVO state1Store, B1; MOVO state2Store, C1; MOVO ctr3Store, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr0Store
+	MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr1Store
+	XORQ itr2, itr2
+	MOVQ inl, itr1
+	ANDQ $-16, itr1
+
+openSSETail128LoopA:
+	// Perform ChaCha rounds, while hashing the remaining input
+	polyAdd(0(inp)(itr2*1))
+	polyMul
+
+openSSETail128LoopB:
+	ADDQ          $16, itr2
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0)
+	shiftB0Left;  shiftC0Left; shiftD0Left
+	shiftB1Left;  shiftC1Left; shiftD1Left
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0)
+	shiftB0Right; shiftC0Right; shiftD0Right
+	shiftB1Right; shiftC1Right; shiftD1Right
+
+	CMPQ itr2, itr1
+	JB   openSSETail128LoopA
+
+	CMPQ itr2, $160
+	JNE  openSSETail128LoopB
+
+	PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1
+	PADDL state1Store, B0; PADDL state1Store, B1
+	PADDL state2Store, C0; PADDL state2Store, C1
+	PADDL ctr1Store, D0; PADDL ctr0Store, D1
+
+	MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3
+	PXOR  T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1
+	MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup)
+
+	SUBQ $64, inl
+	LEAQ 64(inp), inp
+	LEAQ 64(oup), oup
+	JMP  openSSETail64DecLoop
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 192 bytes of ciphertext
+openSSETail192:
+	// Need to decrypt up to 192 bytes - prepare three blocks
+	MOVO ·chacha20Constants<>(SB), A2; MOVO state1Store, B2; MOVO state2Store, C2; MOVO ctr3Store, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr0Store
+	MOVO A2, A1; MOVO B2, B1; MOVO C2, C1; MOVO D2, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store
+	MOVO A1, A0; MOVO B1, B0; MOVO C1, C0; MOVO D1, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr2Store
+
+	MOVQ    inl, itr1
+	MOVQ    $160, itr2
+	CMPQ    itr1, $160
+	CMOVQGT itr2, itr1
+	ANDQ    $-16, itr1
+	XORQ    itr2, itr2
+
+openSSLTail192LoopA:
+	// Perform ChaCha rounds, while hashing the remaining input
+	polyAdd(0(inp)(itr2*1))
+	polyMul
+
+openSSLTail192LoopB:
+	ADDQ         $16, itr2
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+	shiftB0Left; shiftC0Left; shiftD0Left
+	shiftB1Left; shiftC1Left; shiftD1Left
+	shiftB2Left; shiftC2Left; shiftD2Left
+
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+	shiftB0Right; shiftC0Right; shiftD0Right
+	shiftB1Right; shiftC1Right; shiftD1Right
+	shiftB2Right; shiftC2Right; shiftD2Right
+
+	CMPQ itr2, itr1
+	JB   openSSLTail192LoopA
+
+	CMPQ itr2, $160
+	JNE  openSSLTail192LoopB
+
+	CMPQ inl, $176
+	JB   openSSLTail192Store
+
+	polyAdd(160(inp))
+	polyMul
+
+	CMPQ inl, $192
+	JB   openSSLTail192Store
+
+	polyAdd(176(inp))
+	polyMul
+
+openSSLTail192Store:
+	PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2
+	PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2
+	PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2
+	PADDL ctr2Store, D0; PADDL ctr1Store, D1; PADDL ctr0Store, D2
+
+	MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3
+	PXOR  T0, A2; PXOR T1, B2; PXOR T2, C2; PXOR T3, D2
+	MOVOU A2, (0*16)(oup); MOVOU B2, (1*16)(oup); MOVOU C2, (2*16)(oup); MOVOU D2, (3*16)(oup)
+
+	MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3
+	PXOR  T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1
+	MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup)
+
+	SUBQ $128, inl
+	LEAQ 128(inp), inp
+	LEAQ 128(oup), oup
+	JMP  openSSETail64DecLoop
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 256 bytes of ciphertext
+openSSETail256:
+	// Need to decrypt up to 256 bytes - prepare four blocks
+	MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0
+	MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+	MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+	MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3
+
+	// Store counters
+	MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store
+	XORQ itr2, itr2
+
+openSSETail256Loop:
+	// This loop inteleaves 8 ChaCha quarter rounds with 1 poly multiplication
+	polyAdd(0(inp)(itr2*1))
+	MOVO          C3, tmpStore
+	chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+	MOVO          tmpStore, C3
+	MOVO          C1, tmpStore
+	chachaQR(A3, B3, C3, D3, C1)
+	MOVO          tmpStore, C1
+	shiftB0Left;  shiftB1Left; shiftB2Left; shiftB3Left
+	shiftC0Left;  shiftC1Left; shiftC2Left; shiftC3Left
+	shiftD0Left;  shiftD1Left; shiftD2Left; shiftD3Left
+	polyMulStage1
+	polyMulStage2
+	MOVO          C3, tmpStore
+	chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+	MOVO          tmpStore, C3
+	MOVO          C1, tmpStore
+	chachaQR(A3, B3, C3, D3, C1)
+	MOVO          tmpStore, C1
+	polyMulStage3
+	polyMulReduceStage
+	shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right
+	shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right
+	shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right
+	ADDQ          $2*8, itr2
+	CMPQ          itr2, $160
+	JB            openSSETail256Loop
+	MOVQ          inl, itr1
+	ANDQ          $-16, itr1
+
+openSSETail256HashLoop:
+	polyAdd(0(inp)(itr2*1))
+	polyMul
+	ADDQ $2*8, itr2
+	CMPQ itr2, itr1
+	JB   openSSETail256HashLoop
+
+	// Add in the state
+	PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3
+	PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3
+	PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3
+	PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3
+	MOVO  D3, tmpStore
+
+	// Load - xor - store
+	MOVOU (0*16)(inp), D3; PXOR D3, A0
+	MOVOU (1*16)(inp), D3; PXOR D3, B0
+	MOVOU (2*16)(inp), D3; PXOR D3, C0
+	MOVOU (3*16)(inp), D3; PXOR D3, D0
+	MOVOU A0, (0*16)(oup)
+	MOVOU B0, (1*16)(oup)
+	MOVOU C0, (2*16)(oup)
+	MOVOU D0, (3*16)(oup)
+	MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0
+	PXOR  A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1
+	MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup)
+	MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0
+	PXOR  A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2
+	MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup)
+	LEAQ  192(inp), inp
+	LEAQ  192(oup), oup
+	SUBQ  $192, inl
+	MOVO  A3, A0
+	MOVO  B3, B0
+	MOVO  C3, C0
+	MOVO  tmpStore, D0
+
+	JMP openSSETail64DecLoop
+
+// ----------------------------------------------------------------------------
+// ------------------------- AVX2 Code ----------------------------------------
+chacha20Poly1305Open_AVX2:
+	VZEROUPPER
+	VMOVDQU ·chacha20Constants<>(SB), AA0
+	BYTE    $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14
+	BYTE    $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12
+	BYTE    $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4
+	VPADDD  ·avx2InitMask<>(SB), DD0, DD0
+
+	// Special optimization, for very short buffers
+	CMPQ inl, $192
+	JBE  openAVX2192
+	CMPQ inl, $320
+	JBE  openAVX2320
+
+	// For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream
+	VMOVDQA BB0, state1StoreAVX2
+	VMOVDQA CC0, state2StoreAVX2
+	VMOVDQA DD0, ctr3StoreAVX2
+	MOVQ    $10, itr2
+
+openAVX2PreparePolyKey:
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0)
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0)
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0
+	DECQ     itr2
+	JNE      openAVX2PreparePolyKey
+
+	VPADDD ·chacha20Constants<>(SB), AA0, AA0
+	VPADDD state1StoreAVX2, BB0, BB0
+	VPADDD state2StoreAVX2, CC0, CC0
+	VPADDD ctr3StoreAVX2, DD0, DD0
+
+	VPERM2I128 $0x02, AA0, BB0, TT0
+
+	// Clamp and store poly key
+	VPAND   ·polyClampMask<>(SB), TT0, TT0
+	VMOVDQA TT0, rsStoreAVX2
+
+	// Stream for the first 64 bytes
+	VPERM2I128 $0x13, AA0, BB0, AA0
+	VPERM2I128 $0x13, CC0, DD0, BB0
+
+	// Hash AD + first 64 bytes
+	MOVQ ad_len+80(FP), itr2
+	CALL polyHashADInternal<>(SB)
+	XORQ itr1, itr1
+
+openAVX2InitialHash64:
+	polyAdd(0(inp)(itr1*1))
+	polyMulAVX2
+	ADDQ $16, itr1
+	CMPQ itr1, $64
+	JNE  openAVX2InitialHash64
+
+	// Decrypt the first 64 bytes
+	VPXOR   (0*32)(inp), AA0, AA0
+	VPXOR   (1*32)(inp), BB0, BB0
+	VMOVDQU AA0, (0*32)(oup)
+	VMOVDQU BB0, (1*32)(oup)
+	LEAQ    (2*32)(inp), inp
+	LEAQ    (2*32)(oup), oup
+	SUBQ    $64, inl
+
+openAVX2MainLoop:
+	CMPQ inl, $512
+	JB   openAVX2MainLoopDone
+
+	// Load state, increment counter blocks, store the incremented counters
+	VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+	VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3
+	VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3
+	VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3
+	VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2
+	XORQ    itr1, itr1
+
+openAVX2InternalLoop:
+	// Lets just say this spaghetti loop interleaves 2 quarter rounds with 3 poly multiplications
+	// Effectively per 512 bytes of stream we hash 480 bytes of ciphertext
+	polyAdd(0*8(inp)(itr1*1))
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	polyMulStage1_AVX2
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+	polyMulStage2_AVX2
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	polyMulStage3_AVX2
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	polyMulReduceStage
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+	polyAdd(2*8(inp)(itr1*1))
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	polyMulStage1_AVX2
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	polyMulStage2_AVX2
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+	VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	polyMulStage3_AVX2
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+	polyMulReduceStage
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	polyAdd(4*8(inp)(itr1*1))
+	LEAQ     (6*8)(itr1), itr1
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	polyMulStage1_AVX2
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	polyMulStage2_AVX2
+	VPSHUFB  ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	polyMulStage3_AVX2
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	polyMulReduceStage
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+	VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3
+	CMPQ     itr1, $480
+	JNE      openAVX2InternalLoop
+
+	VPADDD  ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3
+	VPADDD  state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3
+	VPADDD  state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3
+	VPADDD  ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3
+	VMOVDQA CC3, tmpStoreAVX2
+
+	// We only hashed 480 of the 512 bytes available - hash the remaining 32 here
+	polyAdd(480(inp))
+	polyMulAVX2
+	VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0
+	VPXOR      (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0
+	VMOVDQU    CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup)
+	VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+	VPXOR      (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup)
+
+	// and here
+	polyAdd(496(inp))
+	polyMulAVX2
+	VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0
+	VPXOR      (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup)
+	VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0
+	VPXOR      (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup)
+	LEAQ       (32*16)(inp), inp
+	LEAQ       (32*16)(oup), oup
+	SUBQ       $(32*16), inl
+	JMP        openAVX2MainLoop
+
+openAVX2MainLoopDone:
+	// Handle the various tail sizes efficiently
+	TESTQ inl, inl
+	JE    openSSEFinalize
+	CMPQ  inl, $128
+	JBE   openAVX2Tail128
+	CMPQ  inl, $256
+	JBE   openAVX2Tail256
+	CMPQ  inl, $384
+	JBE   openAVX2Tail384
+	JMP   openAVX2Tail512
+
+// ----------------------------------------------------------------------------
+// Special optimization for buffers smaller than 193 bytes
+openAVX2192:
+	// For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks
+	VMOVDQA AA0, AA1
+	VMOVDQA BB0, BB1
+	VMOVDQA CC0, CC1
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD1
+	VMOVDQA AA0, AA2
+	VMOVDQA BB0, BB2
+	VMOVDQA CC0, CC2
+	VMOVDQA DD0, DD2
+	VMOVDQA DD1, TT3
+	MOVQ    $10, itr2
+
+openAVX2192InnerCipherLoop:
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+	VPALIGNR   $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1
+	VPALIGNR   $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+	VPALIGNR   $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+	VPALIGNR   $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1
+	VPALIGNR   $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+	VPALIGNR   $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1
+	DECQ       itr2
+	JNE        openAVX2192InnerCipherLoop
+	VPADDD     AA2, AA0, AA0; VPADDD AA2, AA1, AA1
+	VPADDD     BB2, BB0, BB0; VPADDD BB2, BB1, BB1
+	VPADDD     CC2, CC0, CC0; VPADDD CC2, CC1, CC1
+	VPADDD     DD2, DD0, DD0; VPADDD TT3, DD1, DD1
+	VPERM2I128 $0x02, AA0, BB0, TT0
+
+	// Clamp and store poly key
+	VPAND   ·polyClampMask<>(SB), TT0, TT0
+	VMOVDQA TT0, rsStoreAVX2
+
+	// Stream for up to 192 bytes
+	VPERM2I128 $0x13, AA0, BB0, AA0
+	VPERM2I128 $0x13, CC0, DD0, BB0
+	VPERM2I128 $0x02, AA1, BB1, CC0
+	VPERM2I128 $0x02, CC1, DD1, DD0
+	VPERM2I128 $0x13, AA1, BB1, AA1
+	VPERM2I128 $0x13, CC1, DD1, BB1
+
+openAVX2ShortOpen:
+	// Hash
+	MOVQ ad_len+80(FP), itr2
+	CALL polyHashADInternal<>(SB)
+
+openAVX2ShortOpenLoop:
+	CMPQ inl, $32
+	JB   openAVX2ShortTail32
+	SUBQ $32, inl
+
+	// Load for hashing
+	polyAdd(0*8(inp))
+	polyMulAVX2
+	polyAdd(2*8(inp))
+	polyMulAVX2
+
+	// Load for decryption
+	VPXOR   (inp), AA0, AA0
+	VMOVDQU AA0, (oup)
+	LEAQ    (1*32)(inp), inp
+	LEAQ    (1*32)(oup), oup
+
+	// Shift stream left
+	VMOVDQA BB0, AA0
+	VMOVDQA CC0, BB0
+	VMOVDQA DD0, CC0
+	VMOVDQA AA1, DD0
+	VMOVDQA BB1, AA1
+	VMOVDQA CC1, BB1
+	VMOVDQA DD1, CC1
+	VMOVDQA AA2, DD1
+	VMOVDQA BB2, AA2
+	JMP     openAVX2ShortOpenLoop
+
+openAVX2ShortTail32:
+	CMPQ    inl, $16
+	VMOVDQA A0, A1
+	JB      openAVX2ShortDone
+
+	SUBQ $16, inl
+
+	// Load for hashing
+	polyAdd(0*8(inp))
+	polyMulAVX2
+
+	// Load for decryption
+	VPXOR      (inp), A0, T0
+	VMOVDQU    T0, (oup)
+	LEAQ       (1*16)(inp), inp
+	LEAQ       (1*16)(oup), oup
+	VPERM2I128 $0x11, AA0, AA0, AA0
+	VMOVDQA    A0, A1
+
+openAVX2ShortDone:
+	VZEROUPPER
+	JMP openSSETail16
+
+// ----------------------------------------------------------------------------
+// Special optimization for buffers smaller than 321 bytes
+openAVX2320:
+	// For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks
+	VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1
+	VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2
+	VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3
+	MOVQ    $10, itr2
+
+openAVX2320InnerCipherLoop:
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+	VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+	VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2
+	DECQ     itr2
+	JNE      openAVX2320InnerCipherLoop
+
+	VMOVDQA ·chacha20Constants<>(SB), TT0
+	VPADDD  TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2
+	VPADDD  TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2
+	VPADDD  TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2
+	VMOVDQA ·avx2IncMask<>(SB), TT0
+	VPADDD  TT3, DD0, DD0; VPADDD TT0, TT3, TT3
+	VPADDD  TT3, DD1, DD1; VPADDD TT0, TT3, TT3
+	VPADDD  TT3, DD2, DD2
+
+	// Clamp and store poly key
+	VPERM2I128 $0x02, AA0, BB0, TT0
+	VPAND      ·polyClampMask<>(SB), TT0, TT0
+	VMOVDQA    TT0, rsStoreAVX2
+
+	// Stream for up to 320 bytes
+	VPERM2I128 $0x13, AA0, BB0, AA0
+	VPERM2I128 $0x13, CC0, DD0, BB0
+	VPERM2I128 $0x02, AA1, BB1, CC0
+	VPERM2I128 $0x02, CC1, DD1, DD0
+	VPERM2I128 $0x13, AA1, BB1, AA1
+	VPERM2I128 $0x13, CC1, DD1, BB1
+	VPERM2I128 $0x02, AA2, BB2, CC1
+	VPERM2I128 $0x02, CC2, DD2, DD1
+	VPERM2I128 $0x13, AA2, BB2, AA2
+	VPERM2I128 $0x13, CC2, DD2, BB2
+	JMP        openAVX2ShortOpen
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 128 bytes of ciphertext
+openAVX2Tail128:
+	// Need to decrypt up to 128 bytes - prepare two blocks
+	VMOVDQA ·chacha20Constants<>(SB), AA1
+	VMOVDQA state1StoreAVX2, BB1
+	VMOVDQA state2StoreAVX2, CC1
+	VMOVDQA ctr3StoreAVX2, DD1
+	VPADDD  ·avx2IncMask<>(SB), DD1, DD1
+	VMOVDQA DD1, DD0
+
+	XORQ  itr2, itr2
+	MOVQ  inl, itr1
+	ANDQ  $-16, itr1
+	TESTQ itr1, itr1
+	JE    openAVX2Tail128LoopB
+
+openAVX2Tail128LoopA:
+	// Perform ChaCha rounds, while hashing the remaining input
+	polyAdd(0(inp)(itr2*1))
+	polyMulAVX2
+
+openAVX2Tail128LoopB:
+	ADDQ     $16, itr2
+	chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+	VPALIGNR $4, BB1, BB1, BB1
+	VPALIGNR $8, CC1, CC1, CC1
+	VPALIGNR $12, DD1, DD1, DD1
+	chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+	VPALIGNR $12, BB1, BB1, BB1
+	VPALIGNR $8, CC1, CC1, CC1
+	VPALIGNR $4, DD1, DD1, DD1
+	CMPQ     itr2, itr1
+	JB       openAVX2Tail128LoopA
+	CMPQ     itr2, $160
+	JNE      openAVX2Tail128LoopB
+
+	VPADDD     ·chacha20Constants<>(SB), AA1, AA1
+	VPADDD     state1StoreAVX2, BB1, BB1
+	VPADDD     state2StoreAVX2, CC1, CC1
+	VPADDD     DD0, DD1, DD1
+	VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+
+openAVX2TailLoop:
+	CMPQ inl, $32
+	JB   openAVX2Tail
+	SUBQ $32, inl
+
+	// Load for decryption
+	VPXOR   (inp), AA0, AA0
+	VMOVDQU AA0, (oup)
+	LEAQ    (1*32)(inp), inp
+	LEAQ    (1*32)(oup), oup
+	VMOVDQA BB0, AA0
+	VMOVDQA CC0, BB0
+	VMOVDQA DD0, CC0
+	JMP     openAVX2TailLoop
+
+openAVX2Tail:
+	CMPQ    inl, $16
+	VMOVDQA A0, A1
+	JB      openAVX2TailDone
+	SUBQ    $16, inl
+
+	// Load for decryption
+	VPXOR      (inp), A0, T0
+	VMOVDQU    T0, (oup)
+	LEAQ       (1*16)(inp), inp
+	LEAQ       (1*16)(oup), oup
+	VPERM2I128 $0x11, AA0, AA0, AA0
+	VMOVDQA    A0, A1
+
+openAVX2TailDone:
+	VZEROUPPER
+	JMP openSSETail16
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 256 bytes of ciphertext
+openAVX2Tail256:
+	// Need to decrypt up to 256 bytes - prepare four blocks
+	VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1
+	VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1
+	VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1
+	VMOVDQA ctr3StoreAVX2, DD0
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD0
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD1
+	VMOVDQA DD0, TT1
+	VMOVDQA DD1, TT2
+
+	// Compute the number of iterations that will hash data
+	MOVQ    inl, tmpStoreAVX2
+	MOVQ    inl, itr1
+	SUBQ    $128, itr1
+	SHRQ    $4, itr1
+	MOVQ    $10, itr2
+	CMPQ    itr1, $10
+	CMOVQGT itr2, itr1
+	MOVQ    inp, inl
+	XORQ    itr2, itr2
+
+openAVX2Tail256LoopA:
+	polyAdd(0(inl))
+	polyMulAVX2
+	LEAQ 16(inl), inl
+
+	// Perform ChaCha rounds, while hashing the remaining input
+openAVX2Tail256LoopB:
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+	VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1
+	INCQ     itr2
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+	VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1
+	CMPQ     itr2, itr1
+	JB       openAVX2Tail256LoopA
+
+	CMPQ itr2, $10
+	JNE  openAVX2Tail256LoopB
+
+	MOVQ inl, itr2
+	SUBQ inp, inl
+	MOVQ inl, itr1
+	MOVQ tmpStoreAVX2, inl
+
+	// Hash the remainder of data (if any)
+openAVX2Tail256Hash:
+	ADDQ $16, itr1
+	CMPQ itr1, inl
+	JGT  openAVX2Tail256HashEnd
+	polyAdd (0(itr2))
+	polyMulAVX2
+	LEAQ 16(itr2), itr2
+	JMP  openAVX2Tail256Hash
+
+// Store 128 bytes safely, then go to store loop
+openAVX2Tail256HashEnd:
+	VPADDD     ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1
+	VPADDD     state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1
+	VPADDD     state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1
+	VPADDD     TT1, DD0, DD0; VPADDD TT2, DD1, DD1
+	VPERM2I128 $0x02, AA0, BB0, AA2; VPERM2I128 $0x02, CC0, DD0, BB2; VPERM2I128 $0x13, AA0, BB0, CC2; VPERM2I128 $0x13, CC0, DD0, DD2
+	VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+
+	VPXOR   (0*32)(inp), AA2, AA2; VPXOR (1*32)(inp), BB2, BB2; VPXOR (2*32)(inp), CC2, CC2; VPXOR (3*32)(inp), DD2, DD2
+	VMOVDQU AA2, (0*32)(oup); VMOVDQU BB2, (1*32)(oup); VMOVDQU CC2, (2*32)(oup); VMOVDQU DD2, (3*32)(oup)
+	LEAQ    (4*32)(inp), inp
+	LEAQ    (4*32)(oup), oup
+	SUBQ    $4*32, inl
+
+	JMP openAVX2TailLoop
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 384 bytes of ciphertext
+openAVX2Tail384:
+	// Need to decrypt up to 384 bytes - prepare six blocks
+	VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2
+	VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2
+	VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2
+	VMOVDQA ctr3StoreAVX2, DD0
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD0
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD1
+	VPADDD  ·avx2IncMask<>(SB), DD1, DD2
+	VMOVDQA DD0, ctr0StoreAVX2
+	VMOVDQA DD1, ctr1StoreAVX2
+	VMOVDQA DD2, ctr2StoreAVX2
+
+	// Compute the number of iterations that will hash two blocks of data
+	MOVQ    inl, tmpStoreAVX2
+	MOVQ    inl, itr1
+	SUBQ    $256, itr1
+	SHRQ    $4, itr1
+	ADDQ    $6, itr1
+	MOVQ    $10, itr2
+	CMPQ    itr1, $10
+	CMOVQGT itr2, itr1
+	MOVQ    inp, inl
+	XORQ    itr2, itr2
+
+	// Perform ChaCha rounds, while hashing the remaining input
+openAVX2Tail384LoopB:
+	polyAdd(0(inl))
+	polyMulAVX2
+	LEAQ 16(inl), inl
+
+openAVX2Tail384LoopA:
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+	VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2
+	polyAdd(0(inl))
+	polyMulAVX2
+	LEAQ     16(inl), inl
+	INCQ     itr2
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+	VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2
+
+	CMPQ itr2, itr1
+	JB   openAVX2Tail384LoopB
+
+	CMPQ itr2, $10
+	JNE  openAVX2Tail384LoopA
+
+	MOVQ inl, itr2
+	SUBQ inp, inl
+	MOVQ inl, itr1
+	MOVQ tmpStoreAVX2, inl
+
+openAVX2Tail384Hash:
+	ADDQ $16, itr1
+	CMPQ itr1, inl
+	JGT  openAVX2Tail384HashEnd
+	polyAdd(0(itr2))
+	polyMulAVX2
+	LEAQ 16(itr2), itr2
+	JMP  openAVX2Tail384Hash
+
+// Store 256 bytes safely, then go to store loop
+openAVX2Tail384HashEnd:
+	VPADDD     ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2
+	VPADDD     state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2
+	VPADDD     state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2
+	VPADDD     ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2
+	VPERM2I128 $0x02, AA0, BB0, TT0; VPERM2I128 $0x02, CC0, DD0, TT1; VPERM2I128 $0x13, AA0, BB0, TT2; VPERM2I128 $0x13, CC0, DD0, TT3
+	VPXOR      (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3
+	VMOVDQU    TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup)
+	VPERM2I128 $0x02, AA1, BB1, TT0; VPERM2I128 $0x02, CC1, DD1, TT1; VPERM2I128 $0x13, AA1, BB1, TT2; VPERM2I128 $0x13, CC1, DD1, TT3
+	VPXOR      (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3
+	VMOVDQU    TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup)
+	VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0
+	LEAQ       (8*32)(inp), inp
+	LEAQ       (8*32)(oup), oup
+	SUBQ       $8*32, inl
+	JMP        openAVX2TailLoop
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 512 bytes of ciphertext
+openAVX2Tail512:
+	VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+	VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3
+	VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3
+	VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3
+	VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2
+	XORQ    itr1, itr1
+	MOVQ    inp, itr2
+
+openAVX2Tail512LoopB:
+	polyAdd(0(itr2))
+	polyMulAVX2
+	LEAQ (2*8)(itr2), itr2
+
+openAVX2Tail512LoopA:
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	polyAdd(0*8(itr2))
+	polyMulAVX2
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+	VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	polyAdd(2*8(itr2))
+	polyMulAVX2
+	LEAQ     (4*8)(itr2), itr2
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+	VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3
+	INCQ     itr1
+	CMPQ     itr1, $4
+	JLT      openAVX2Tail512LoopB
+
+	CMPQ itr1, $10
+	JNE  openAVX2Tail512LoopA
+
+	MOVQ inl, itr1
+	SUBQ $384, itr1
+	ANDQ $-16, itr1
+
+openAVX2Tail512HashLoop:
+	TESTQ itr1, itr1
+	JE    openAVX2Tail512HashEnd
+	polyAdd(0(itr2))
+	polyMulAVX2
+	LEAQ  16(itr2), itr2
+	SUBQ  $16, itr1
+	JMP   openAVX2Tail512HashLoop
+
+openAVX2Tail512HashEnd:
+	VPADDD     ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3
+	VPADDD     state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3
+	VPADDD     state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3
+	VPADDD     ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3
+	VMOVDQA    CC3, tmpStoreAVX2
+	VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0
+	VPXOR      (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0
+	VMOVDQU    CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup)
+	VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+	VPXOR      (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup)
+	VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0
+	VPXOR      (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup)
+	VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0
+
+	LEAQ (12*32)(inp), inp
+	LEAQ (12*32)(oup), oup
+	SUBQ $12*32, inl
+
+	JMP openAVX2TailLoop
+
+// ----------------------------------------------------------------------------
+// ----------------------------------------------------------------------------
+// func chacha20Poly1305Seal(dst, key, src, ad []byte)
+TEXT ·chacha20Poly1305Seal(SB), 0, $288-96
+	// For aligned stack access
+	MOVQ SP, BP
+	ADDQ $32, BP
+	ANDQ $-32, BP
+	MOVQ dst+0(FP), oup
+	MOVQ key+24(FP), keyp
+	MOVQ src+48(FP), inp
+	MOVQ src_len+56(FP), inl
+	MOVQ ad+72(FP), adp
+
+	// Check for AVX2 support
+	CMPB runtime·support_avx2(SB), $0
+	JE   noavx2bmi2Seal
+
+	// Check BMI2 bit for MULXQ.
+	// runtime·cpuid_ebx7 is always available here
+	// because it passed avx2 check
+	TESTL $(1<<8), runtime·cpuid_ebx7(SB)
+	JNE   chacha20Poly1305Seal_AVX2
+noavx2bmi2Seal:
+
+	// Special optimization, for very short buffers
+	CMPQ inl, $128
+	JBE  sealSSE128 // About 15% faster
+
+	// In the seal case - prepare the poly key + 3 blocks of stream in the first iteration
+	MOVOU ·chacha20Constants<>(SB), A0
+	MOVOU (1*16)(keyp), B0
+	MOVOU (2*16)(keyp), C0
+	MOVOU (3*16)(keyp), D0
+
+	// Store state on stack for future use
+	MOVO B0, state1Store
+	MOVO C0, state2Store
+
+	// Load state, increment counter blocks
+	MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+	MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+	MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3
+
+	// Store counters
+	MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store
+	MOVQ $10, itr2
+
+sealSSEIntroLoop:
+	MOVO         C3, tmpStore
+	chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+	MOVO         tmpStore, C3
+	MOVO         C1, tmpStore
+	chachaQR(A3, B3, C3, D3, C1)
+	MOVO         tmpStore, C1
+	shiftB0Left; shiftB1Left; shiftB2Left; shiftB3Left
+	shiftC0Left; shiftC1Left; shiftC2Left; shiftC3Left
+	shiftD0Left; shiftD1Left; shiftD2Left; shiftD3Left
+
+	MOVO          C3, tmpStore
+	chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+	MOVO          tmpStore, C3
+	MOVO          C1, tmpStore
+	chachaQR(A3, B3, C3, D3, C1)
+	MOVO          tmpStore, C1
+	shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right
+	shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right
+	shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right
+	DECQ          itr2
+	JNE           sealSSEIntroLoop
+
+	// Add in the state
+	PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3
+	PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3
+	PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3
+	PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3
+
+	// Clamp and store the key
+	PAND ·polyClampMask<>(SB), A0
+	MOVO A0, rStore
+	MOVO B0, sStore
+
+	// Hash AAD
+	MOVQ ad_len+80(FP), itr2
+	CALL polyHashADInternal<>(SB)
+
+	MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0
+	PXOR  A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1
+	MOVOU A1, (0*16)(oup); MOVOU B1, (1*16)(oup); MOVOU C1, (2*16)(oup); MOVOU D1, (3*16)(oup)
+	MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0
+	PXOR  A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2
+	MOVOU A2, (4*16)(oup); MOVOU B2, (5*16)(oup); MOVOU C2, (6*16)(oup); MOVOU D2, (7*16)(oup)
+
+	MOVQ $128, itr1
+	SUBQ $128, inl
+	LEAQ 128(inp), inp
+
+	MOVO A3, A1; MOVO B3, B1; MOVO C3, C1; MOVO D3, D1
+
+	CMPQ inl, $64
+	JBE  sealSSE128SealHash
+
+	MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0
+	PXOR  A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3
+	MOVOU A3, (8*16)(oup); MOVOU B3, (9*16)(oup); MOVOU C3, (10*16)(oup); MOVOU D3, (11*16)(oup)
+
+	ADDQ $64, itr1
+	SUBQ $64, inl
+	LEAQ 64(inp), inp
+
+	MOVQ $2, itr1
+	MOVQ $8, itr2
+
+	CMPQ inl, $64
+	JBE  sealSSETail64
+	CMPQ inl, $128
+	JBE  sealSSETail128
+	CMPQ inl, $192
+	JBE  sealSSETail192
+
+sealSSEMainLoop:
+	// Load state, increment counter blocks
+	MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0
+	MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+	MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+	MOVO A2, A3; MOVO B2, B3; MOVO C2, C3; MOVO D2, D3; PADDL ·sseIncMask<>(SB), D3
+
+	// Store counters
+	MOVO D0, ctr0Store; MOVO D1, ctr1Store; MOVO D2, ctr2Store; MOVO D3, ctr3Store
+
+sealSSEInnerLoop:
+	MOVO          C3, tmpStore
+	chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+	MOVO          tmpStore, C3
+	MOVO          C1, tmpStore
+	chachaQR(A3, B3, C3, D3, C1)
+	MOVO          tmpStore, C1
+	polyAdd(0(oup))
+	shiftB0Left;  shiftB1Left; shiftB2Left; shiftB3Left
+	shiftC0Left;  shiftC1Left; shiftC2Left; shiftC3Left
+	shiftD0Left;  shiftD1Left; shiftD2Left; shiftD3Left
+	polyMulStage1
+	polyMulStage2
+	LEAQ          (2*8)(oup), oup
+	MOVO          C3, tmpStore
+	chachaQR(A0, B0, C0, D0, C3); chachaQR(A1, B1, C1, D1, C3); chachaQR(A2, B2, C2, D2, C3)
+	MOVO          tmpStore, C3
+	MOVO          C1, tmpStore
+	polyMulStage3
+	chachaQR(A3, B3, C3, D3, C1)
+	MOVO          tmpStore, C1
+	polyMulReduceStage
+	shiftB0Right; shiftB1Right; shiftB2Right; shiftB3Right
+	shiftC0Right; shiftC1Right; shiftC2Right; shiftC3Right
+	shiftD0Right; shiftD1Right; shiftD2Right; shiftD3Right
+	DECQ          itr2
+	JGE           sealSSEInnerLoop
+	polyAdd(0(oup))
+	polyMul
+	LEAQ          (2*8)(oup), oup
+	DECQ          itr1
+	JG            sealSSEInnerLoop
+
+	// Add in the state
+	PADDD ·chacha20Constants<>(SB), A0; PADDD ·chacha20Constants<>(SB), A1; PADDD ·chacha20Constants<>(SB), A2; PADDD ·chacha20Constants<>(SB), A3
+	PADDD state1Store, B0; PADDD state1Store, B1; PADDD state1Store, B2; PADDD state1Store, B3
+	PADDD state2Store, C0; PADDD state2Store, C1; PADDD state2Store, C2; PADDD state2Store, C3
+	PADDD ctr0Store, D0; PADDD ctr1Store, D1; PADDD ctr2Store, D2; PADDD ctr3Store, D3
+	MOVO  D3, tmpStore
+
+	// Load - xor - store
+	MOVOU (0*16)(inp), D3; PXOR D3, A0
+	MOVOU (1*16)(inp), D3; PXOR D3, B0
+	MOVOU (2*16)(inp), D3; PXOR D3, C0
+	MOVOU (3*16)(inp), D3; PXOR D3, D0
+	MOVOU A0, (0*16)(oup)
+	MOVOU B0, (1*16)(oup)
+	MOVOU C0, (2*16)(oup)
+	MOVOU D0, (3*16)(oup)
+	MOVO  tmpStore, D3
+
+	MOVOU (4*16)(inp), A0; MOVOU (5*16)(inp), B0; MOVOU (6*16)(inp), C0; MOVOU (7*16)(inp), D0
+	PXOR  A0, A1; PXOR B0, B1; PXOR C0, C1; PXOR D0, D1
+	MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup)
+	MOVOU (8*16)(inp), A0; MOVOU (9*16)(inp), B0; MOVOU (10*16)(inp), C0; MOVOU (11*16)(inp), D0
+	PXOR  A0, A2; PXOR B0, B2; PXOR C0, C2; PXOR D0, D2
+	MOVOU A2, (8*16)(oup); MOVOU B2, (9*16)(oup); MOVOU C2, (10*16)(oup); MOVOU D2, (11*16)(oup)
+	ADDQ  $192, inp
+	MOVQ  $192, itr1
+	SUBQ  $192, inl
+	MOVO  A3, A1
+	MOVO  B3, B1
+	MOVO  C3, C1
+	MOVO  D3, D1
+	CMPQ  inl, $64
+	JBE   sealSSE128SealHash
+	MOVOU (0*16)(inp), A0; MOVOU (1*16)(inp), B0; MOVOU (2*16)(inp), C0; MOVOU (3*16)(inp), D0
+	PXOR  A0, A3; PXOR B0, B3; PXOR C0, C3; PXOR D0, D3
+	MOVOU A3, (12*16)(oup); MOVOU B3, (13*16)(oup); MOVOU C3, (14*16)(oup); MOVOU D3, (15*16)(oup)
+	LEAQ  64(inp), inp
+	SUBQ  $64, inl
+	MOVQ  $6, itr1
+	MOVQ  $4, itr2
+	CMPQ  inl, $192
+	JG    sealSSEMainLoop
+
+	MOVQ  inl, itr1
+	TESTQ inl, inl
+	JE    sealSSE128SealHash
+	MOVQ  $6, itr1
+	CMPQ  inl, $64
+	JBE   sealSSETail64
+	CMPQ  inl, $128
+	JBE   sealSSETail128
+	JMP   sealSSETail192
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 64 bytes of plaintext
+sealSSETail64:
+	// Need to encrypt up to 64 bytes - prepare single block, hash 192 or 256 bytes
+	MOVO  ·chacha20Constants<>(SB), A1
+	MOVO  state1Store, B1
+	MOVO  state2Store, C1
+	MOVO  ctr3Store, D1
+	PADDL ·sseIncMask<>(SB), D1
+	MOVO  D1, ctr0Store
+
+sealSSETail64LoopA:
+	// Perform ChaCha rounds, while hashing the previously encrypted ciphertext
+	polyAdd(0(oup))
+	polyMul
+	LEAQ 16(oup), oup
+
+sealSSETail64LoopB:
+	chachaQR(A1, B1, C1, D1, T1)
+	shiftB1Left;  shiftC1Left; shiftD1Left
+	chachaQR(A1, B1, C1, D1, T1)
+	shiftB1Right; shiftC1Right; shiftD1Right
+	polyAdd(0(oup))
+	polyMul
+	LEAQ          16(oup), oup
+
+	DECQ itr1
+	JG   sealSSETail64LoopA
+
+	DECQ  itr2
+	JGE   sealSSETail64LoopB
+	PADDL ·chacha20Constants<>(SB), A1
+	PADDL state1Store, B1
+	PADDL state2Store, C1
+	PADDL ctr0Store, D1
+
+	JMP sealSSE128Seal
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 128 bytes of plaintext
+sealSSETail128:
+	// Need to encrypt up to 128 bytes - prepare two blocks, hash 192 or 256 bytes
+	MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store
+	MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store
+
+sealSSETail128LoopA:
+	// Perform ChaCha rounds, while hashing the previously encrypted ciphertext
+	polyAdd(0(oup))
+	polyMul
+	LEAQ 16(oup), oup
+
+sealSSETail128LoopB:
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0)
+	shiftB0Left;  shiftC0Left; shiftD0Left
+	shiftB1Left;  shiftC1Left; shiftD1Left
+	polyAdd(0(oup))
+	polyMul
+	LEAQ          16(oup), oup
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0)
+	shiftB0Right; shiftC0Right; shiftD0Right
+	shiftB1Right; shiftC1Right; shiftD1Right
+
+	DECQ itr1
+	JG   sealSSETail128LoopA
+
+	DECQ itr2
+	JGE  sealSSETail128LoopB
+
+	PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1
+	PADDL state1Store, B0; PADDL state1Store, B1
+	PADDL state2Store, C0; PADDL state2Store, C1
+	PADDL ctr0Store, D0; PADDL ctr1Store, D1
+
+	MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3
+	PXOR  T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0
+	MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup)
+
+	MOVQ $64, itr1
+	LEAQ 64(inp), inp
+	SUBQ $64, inl
+
+	JMP sealSSE128SealHash
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 192 bytes of plaintext
+sealSSETail192:
+	// Need to encrypt up to 192 bytes - prepare three blocks, hash 192 or 256 bytes
+	MOVO ·chacha20Constants<>(SB), A0; MOVO state1Store, B0; MOVO state2Store, C0; MOVO ctr3Store, D0; PADDL ·sseIncMask<>(SB), D0; MOVO D0, ctr0Store
+	MOVO A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1; MOVO D1, ctr1Store
+	MOVO A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2; MOVO D2, ctr2Store
+
+sealSSETail192LoopA:
+	// Perform ChaCha rounds, while hashing the previously encrypted ciphertext
+	polyAdd(0(oup))
+	polyMul
+	LEAQ 16(oup), oup
+
+sealSSETail192LoopB:
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+	shiftB0Left; shiftC0Left; shiftD0Left
+	shiftB1Left; shiftC1Left; shiftD1Left
+	shiftB2Left; shiftC2Left; shiftD2Left
+
+	polyAdd(0(oup))
+	polyMul
+	LEAQ 16(oup), oup
+
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+	shiftB0Right; shiftC0Right; shiftD0Right
+	shiftB1Right; shiftC1Right; shiftD1Right
+	shiftB2Right; shiftC2Right; shiftD2Right
+
+	DECQ itr1
+	JG   sealSSETail192LoopA
+
+	DECQ itr2
+	JGE  sealSSETail192LoopB
+
+	PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2
+	PADDL state1Store, B0; PADDL state1Store, B1; PADDL state1Store, B2
+	PADDL state2Store, C0; PADDL state2Store, C1; PADDL state2Store, C2
+	PADDL ctr0Store, D0; PADDL ctr1Store, D1; PADDL ctr2Store, D2
+
+	MOVOU (0*16)(inp), T0; MOVOU (1*16)(inp), T1; MOVOU (2*16)(inp), T2; MOVOU (3*16)(inp), T3
+	PXOR  T0, A0; PXOR T1, B0; PXOR T2, C0; PXOR T3, D0
+	MOVOU A0, (0*16)(oup); MOVOU B0, (1*16)(oup); MOVOU C0, (2*16)(oup); MOVOU D0, (3*16)(oup)
+	MOVOU (4*16)(inp), T0; MOVOU (5*16)(inp), T1; MOVOU (6*16)(inp), T2; MOVOU (7*16)(inp), T3
+	PXOR  T0, A1; PXOR T1, B1; PXOR T2, C1; PXOR T3, D1
+	MOVOU A1, (4*16)(oup); MOVOU B1, (5*16)(oup); MOVOU C1, (6*16)(oup); MOVOU D1, (7*16)(oup)
+
+	MOVO A2, A1
+	MOVO B2, B1
+	MOVO C2, C1
+	MOVO D2, D1
+	MOVQ $128, itr1
+	LEAQ 128(inp), inp
+	SUBQ $128, inl
+
+	JMP sealSSE128SealHash
+
+// ----------------------------------------------------------------------------
+// Special seal optimization for buffers smaller than 129 bytes
+sealSSE128:
+	// For up to 128 bytes of ciphertext and 64 bytes for the poly key, we require to process three blocks
+	MOVOU ·chacha20Constants<>(SB), A0; MOVOU (1*16)(keyp), B0; MOVOU (2*16)(keyp), C0; MOVOU (3*16)(keyp), D0
+	MOVO  A0, A1; MOVO B0, B1; MOVO C0, C1; MOVO D0, D1; PADDL ·sseIncMask<>(SB), D1
+	MOVO  A1, A2; MOVO B1, B2; MOVO C1, C2; MOVO D1, D2; PADDL ·sseIncMask<>(SB), D2
+	MOVO  B0, T1; MOVO C0, T2; MOVO D1, T3
+	MOVQ  $10, itr2
+
+sealSSE128InnerCipherLoop:
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+	shiftB0Left;  shiftB1Left; shiftB2Left
+	shiftC0Left;  shiftC1Left; shiftC2Left
+	shiftD0Left;  shiftD1Left; shiftD2Left
+	chachaQR(A0, B0, C0, D0, T0); chachaQR(A1, B1, C1, D1, T0); chachaQR(A2, B2, C2, D2, T0)
+	shiftB0Right; shiftB1Right; shiftB2Right
+	shiftC0Right; shiftC1Right; shiftC2Right
+	shiftD0Right; shiftD1Right; shiftD2Right
+	DECQ          itr2
+	JNE           sealSSE128InnerCipherLoop
+
+	// A0|B0 hold the Poly1305 32-byte key, C0,D0 can be discarded
+	PADDL ·chacha20Constants<>(SB), A0; PADDL ·chacha20Constants<>(SB), A1; PADDL ·chacha20Constants<>(SB), A2
+	PADDL T1, B0; PADDL T1, B1; PADDL T1, B2
+	PADDL T2, C1; PADDL T2, C2
+	PADDL T3, D1; PADDL ·sseIncMask<>(SB), T3; PADDL T3, D2
+	PAND  ·polyClampMask<>(SB), A0
+	MOVOU A0, rStore
+	MOVOU B0, sStore
+
+	// Hash
+	MOVQ ad_len+80(FP), itr2
+	CALL polyHashADInternal<>(SB)
+	XORQ itr1, itr1
+
+sealSSE128SealHash:
+	// itr1 holds the number of bytes encrypted but not yet hashed
+	CMPQ itr1, $16
+	JB   sealSSE128Seal
+	polyAdd(0(oup))
+	polyMul
+
+	SUBQ $16, itr1
+	ADDQ $16, oup
+
+	JMP sealSSE128SealHash
+
+sealSSE128Seal:
+	CMPQ inl, $16
+	JB   sealSSETail
+	SUBQ $16, inl
+
+	// Load for decryption
+	MOVOU (inp), T0
+	PXOR  T0, A1
+	MOVOU A1, (oup)
+	LEAQ  (1*16)(inp), inp
+	LEAQ  (1*16)(oup), oup
+
+	// Extract for hashing
+	MOVQ   A1, t0
+	PSRLDQ $8, A1
+	MOVQ A1, t1
+	ADDQ   t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2
+	polyMul
+
+	// Shift the stream "left"
+	MOVO B1, A1
+	MOVO C1, B1
+	MOVO D1, C1
+	MOVO A2, D1
+	MOVO B2, A2
+	MOVO C2, B2
+	MOVO D2, C2
+	JMP  sealSSE128Seal
+
+sealSSETail:
+	TESTQ inl, inl
+	JE    sealSSEFinalize
+
+	// We can only load the PT one byte at a time to avoid read after end of buffer
+	MOVQ inl, itr2
+	SHLQ $4, itr2
+	LEAQ ·andMask<>(SB), t0
+	MOVQ inl, itr1
+	LEAQ -1(inp)(inl*1), inp
+	XORQ t2, t2
+	XORQ t3, t3
+	XORQ AX, AX
+
+sealSSETailLoadLoop:
+	SHLQ $8, t2, t3
+	SHLQ $8, t2
+	MOVB (inp), AX
+	XORQ AX, t2
+	LEAQ   -1(inp), inp
+	DECQ   itr1
+	JNE    sealSSETailLoadLoop
+	MOVQ t2, 0+tmpStore
+	MOVQ t3, 8+tmpStore
+	PXOR 0+tmpStore, A1
+	MOVOU  A1, (oup)
+	MOVOU  -16(t0)(itr2*1), T0
+	PAND   T0, A1
+	MOVQ   A1, t0
+	PSRLDQ $8, A1
+	MOVQ   A1, t1
+	ADDQ   t0, acc0; ADCQ t1, acc1; ADCQ $1, acc2
+	polyMul
+
+	ADDQ inl, oup
+
+sealSSEFinalize:
+	// Hash in the buffer lengths
+	ADDQ ad_len+80(FP), acc0
+	ADCQ src_len+56(FP), acc1
+	ADCQ $1, acc2
+	polyMul
+
+	// Final reduce
+	MOVQ    acc0, t0
+	MOVQ    acc1, t1
+	MOVQ    acc2, t2
+	SUBQ    $-5, acc0
+	SBBQ    $-1, acc1
+	SBBQ    $3, acc2
+	CMOVQCS t0, acc0
+	CMOVQCS t1, acc1
+	CMOVQCS t2, acc2
+
+	// Add in the "s" part of the key
+	ADDQ 0+sStore, acc0
+	ADCQ 8+sStore, acc1
+
+	// Finally store the tag at the end of the message
+	MOVQ acc0, (0*8)(oup)
+	MOVQ acc1, (1*8)(oup)
+	RET
+
+// ----------------------------------------------------------------------------
+// ------------------------- AVX2 Code ----------------------------------------
+chacha20Poly1305Seal_AVX2:
+	VZEROUPPER
+	VMOVDQU ·chacha20Constants<>(SB), AA0
+	BYTE    $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x70; BYTE $0x10 // broadcasti128 16(r8), ymm14
+	BYTE    $0xc4; BYTE $0x42; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x20 // broadcasti128 32(r8), ymm12
+	BYTE    $0xc4; BYTE $0xc2; BYTE $0x7d; BYTE $0x5a; BYTE $0x60; BYTE $0x30 // broadcasti128 48(r8), ymm4
+	VPADDD  ·avx2InitMask<>(SB), DD0, DD0
+
+	// Special optimizations, for very short buffers
+	CMPQ inl, $192
+	JBE  seal192AVX2 // 33% faster
+	CMPQ inl, $320
+	JBE  seal320AVX2 // 17% faster
+
+	// For the general key prepare the key first - as a byproduct we have 64 bytes of cipher stream
+	VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+	VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3; VMOVDQA BB0, state1StoreAVX2
+	VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3; VMOVDQA CC0, state2StoreAVX2
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD1; VMOVDQA DD0, ctr0StoreAVX2
+	VPADDD  ·avx2IncMask<>(SB), DD1, DD2; VMOVDQA DD1, ctr1StoreAVX2
+	VPADDD  ·avx2IncMask<>(SB), DD2, DD3; VMOVDQA DD2, ctr2StoreAVX2
+	VMOVDQA DD3, ctr3StoreAVX2
+	MOVQ    $10, itr2
+
+sealAVX2IntroLoop:
+	VMOVDQA CC3, tmpStoreAVX2
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3)
+	VMOVDQA tmpStoreAVX2, CC3
+	VMOVDQA CC1, tmpStoreAVX2
+	chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1)
+	VMOVDQA tmpStoreAVX2, CC1
+
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0
+	VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1
+	VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2
+	VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3
+
+	VMOVDQA CC3, tmpStoreAVX2
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3)
+	VMOVDQA tmpStoreAVX2, CC3
+	VMOVDQA CC1, tmpStoreAVX2
+	chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1)
+	VMOVDQA tmpStoreAVX2, CC1
+
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0
+	VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1
+	VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2
+	VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3
+	DECQ     itr2
+	JNE      sealAVX2IntroLoop
+
+	VPADDD ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3
+	VPADDD state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3
+	VPADDD state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3
+	VPADDD ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3
+
+	VPERM2I128 $0x13, CC0, DD0, CC0 // Stream bytes 96 - 127
+	VPERM2I128 $0x02, AA0, BB0, DD0 // The Poly1305 key
+	VPERM2I128 $0x13, AA0, BB0, AA0 // Stream bytes 64 - 95
+
+	// Clamp and store poly key
+	VPAND   ·polyClampMask<>(SB), DD0, DD0
+	VMOVDQA DD0, rsStoreAVX2
+
+	// Hash AD
+	MOVQ ad_len+80(FP), itr2
+	CALL polyHashADInternal<>(SB)
+
+	// Can store at least 320 bytes
+	VPXOR   (0*32)(inp), AA0, AA0
+	VPXOR   (1*32)(inp), CC0, CC0
+	VMOVDQU AA0, (0*32)(oup)
+	VMOVDQU CC0, (1*32)(oup)
+
+	VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+	VPXOR      (2*32)(inp), AA0, AA0; VPXOR (3*32)(inp), BB0, BB0; VPXOR (4*32)(inp), CC0, CC0; VPXOR (5*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (2*32)(oup); VMOVDQU BB0, (3*32)(oup); VMOVDQU CC0, (4*32)(oup); VMOVDQU DD0, (5*32)(oup)
+	VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0
+	VPXOR      (6*32)(inp), AA0, AA0; VPXOR (7*32)(inp), BB0, BB0; VPXOR (8*32)(inp), CC0, CC0; VPXOR (9*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (6*32)(oup); VMOVDQU BB0, (7*32)(oup); VMOVDQU CC0, (8*32)(oup); VMOVDQU DD0, (9*32)(oup)
+
+	MOVQ $320, itr1
+	SUBQ $320, inl
+	LEAQ 320(inp), inp
+
+	VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, CC3, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, CC3, DD3, DD0
+	CMPQ       inl, $128
+	JBE        sealAVX2SealHash
+
+	VPXOR   (0*32)(inp), AA0, AA0; VPXOR (1*32)(inp), BB0, BB0; VPXOR (2*32)(inp), CC0, CC0; VPXOR (3*32)(inp), DD0, DD0
+	VMOVDQU AA0, (10*32)(oup); VMOVDQU BB0, (11*32)(oup); VMOVDQU CC0, (12*32)(oup); VMOVDQU DD0, (13*32)(oup)
+	SUBQ    $128, inl
+	LEAQ    128(inp), inp
+
+	MOVQ $8, itr1
+	MOVQ $2, itr2
+
+	CMPQ inl, $128
+	JBE  sealAVX2Tail128
+	CMPQ inl, $256
+	JBE  sealAVX2Tail256
+	CMPQ inl, $384
+	JBE  sealAVX2Tail384
+	CMPQ inl, $512
+	JBE  sealAVX2Tail512
+
+	// We have 448 bytes to hash, but main loop hashes 512 bytes at a time - perform some rounds, before the main loop
+	VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+	VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3
+	VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3
+	VMOVDQA ctr3StoreAVX2, DD0
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3
+	VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2
+
+	VMOVDQA CC3, tmpStoreAVX2
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3)
+	VMOVDQA tmpStoreAVX2, CC3
+	VMOVDQA CC1, tmpStoreAVX2
+	chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1)
+	VMOVDQA tmpStoreAVX2, CC1
+
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $12, DD0, DD0, DD0
+	VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $12, DD1, DD1, DD1
+	VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $12, DD2, DD2, DD2
+	VPALIGNR $4, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $12, DD3, DD3, DD3
+
+	VMOVDQA CC3, tmpStoreAVX2
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, CC3); chachaQR_AVX2(AA1, BB1, CC1, DD1, CC3); chachaQR_AVX2(AA2, BB2, CC2, DD2, CC3)
+	VMOVDQA tmpStoreAVX2, CC3
+	VMOVDQA CC1, tmpStoreAVX2
+	chachaQR_AVX2(AA3, BB3, CC3, DD3, CC1)
+	VMOVDQA tmpStoreAVX2, CC1
+
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $4, DD0, DD0, DD0
+	VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $4, DD1, DD1, DD1
+	VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $4, DD2, DD2, DD2
+	VPALIGNR $12, BB3, BB3, BB3; VPALIGNR $8, CC3, CC3, CC3; VPALIGNR $4, DD3, DD3, DD3
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+
+	SUBQ $16, oup                  // Adjust the pointer
+	MOVQ $9, itr1
+	JMP  sealAVX2InternalLoopStart
+
+sealAVX2MainLoop:
+	// Load state, increment counter blocks, store the incremented counters
+	VMOVDQU ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+	VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3
+	VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3
+	VMOVDQA ctr3StoreAVX2, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3
+	VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2
+	MOVQ    $10, itr1
+
+sealAVX2InternalLoop:
+	polyAdd(0*8(oup))
+	VPADDD  BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	polyMulStage1_AVX2
+	VPXOR   AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+	polyMulStage2_AVX2
+	VPADDD  DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR   CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	polyMulStage3_AVX2
+	VMOVDQA CC3, tmpStoreAVX2
+	VPSLLD  $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD  $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD  $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD  $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA tmpStoreAVX2, CC3
+	polyMulReduceStage
+
+sealAVX2InternalLoopStart:
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+	polyAdd(2*8(oup))
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	polyMulStage1_AVX2
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	polyMulStage2_AVX2
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+	VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	polyMulStage3_AVX2
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+	polyMulReduceStage
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	polyAdd(4*8(oup))
+	LEAQ     (6*8)(oup), oup
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	polyMulStage1_AVX2
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	polyMulStage2_AVX2
+	VPSHUFB  ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	polyMulStage3_AVX2
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	polyMulReduceStage
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+	VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3
+	DECQ     itr1
+	JNE      sealAVX2InternalLoop
+
+	VPADDD  ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3
+	VPADDD  state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3
+	VPADDD  state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3
+	VPADDD  ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3
+	VMOVDQA CC3, tmpStoreAVX2
+
+	// We only hashed 480 of the 512 bytes available - hash the remaining 32 here
+	polyAdd(0*8(oup))
+	polyMulAVX2
+	LEAQ       (4*8)(oup), oup
+	VPERM2I128 $0x02, AA0, BB0, CC3; VPERM2I128 $0x13, AA0, BB0, BB0; VPERM2I128 $0x02, CC0, DD0, AA0; VPERM2I128 $0x13, CC0, DD0, CC0
+	VPXOR      (0*32)(inp), CC3, CC3; VPXOR (1*32)(inp), AA0, AA0; VPXOR (2*32)(inp), BB0, BB0; VPXOR (3*32)(inp), CC0, CC0
+	VMOVDQU    CC3, (0*32)(oup); VMOVDQU AA0, (1*32)(oup); VMOVDQU BB0, (2*32)(oup); VMOVDQU CC0, (3*32)(oup)
+	VPERM2I128 $0x02, AA1, BB1, AA0; VPERM2I128 $0x02, CC1, DD1, BB0; VPERM2I128 $0x13, AA1, BB1, CC0; VPERM2I128 $0x13, CC1, DD1, DD0
+	VPXOR      (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup)
+
+	// and here
+	polyAdd(-2*8(oup))
+	polyMulAVX2
+	VPERM2I128 $0x02, AA2, BB2, AA0; VPERM2I128 $0x02, CC2, DD2, BB0; VPERM2I128 $0x13, AA2, BB2, CC0; VPERM2I128 $0x13, CC2, DD2, DD0
+	VPXOR      (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup)
+	VPERM2I128 $0x02, AA3, BB3, AA0; VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0; VPERM2I128 $0x13, AA3, BB3, CC0; VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0
+	VPXOR      (12*32)(inp), AA0, AA0; VPXOR (13*32)(inp), BB0, BB0; VPXOR (14*32)(inp), CC0, CC0; VPXOR (15*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (12*32)(oup); VMOVDQU BB0, (13*32)(oup); VMOVDQU CC0, (14*32)(oup); VMOVDQU DD0, (15*32)(oup)
+	LEAQ       (32*16)(inp), inp
+	SUBQ       $(32*16), inl
+	CMPQ       inl, $512
+	JG         sealAVX2MainLoop
+
+	// Tail can only hash 480 bytes
+	polyAdd(0*8(oup))
+	polyMulAVX2
+	polyAdd(2*8(oup))
+	polyMulAVX2
+	LEAQ 32(oup), oup
+
+	MOVQ $10, itr1
+	MOVQ $0, itr2
+	CMPQ inl, $128
+	JBE  sealAVX2Tail128
+	CMPQ inl, $256
+	JBE  sealAVX2Tail256
+	CMPQ inl, $384
+	JBE  sealAVX2Tail384
+	JMP  sealAVX2Tail512
+
+// ----------------------------------------------------------------------------
+// Special optimization for buffers smaller than 193 bytes
+seal192AVX2:
+	// For up to 192 bytes of ciphertext and 64 bytes for the poly key, we process four blocks
+	VMOVDQA AA0, AA1
+	VMOVDQA BB0, BB1
+	VMOVDQA CC0, CC1
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD1
+	VMOVDQA AA0, AA2
+	VMOVDQA BB0, BB2
+	VMOVDQA CC0, CC2
+	VMOVDQA DD0, DD2
+	VMOVDQA DD1, TT3
+	MOVQ    $10, itr2
+
+sealAVX2192InnerCipherLoop:
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+	VPALIGNR   $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1
+	VPALIGNR   $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+	VPALIGNR   $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+	VPALIGNR   $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1
+	VPALIGNR   $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+	VPALIGNR   $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1
+	DECQ       itr2
+	JNE        sealAVX2192InnerCipherLoop
+	VPADDD     AA2, AA0, AA0; VPADDD AA2, AA1, AA1
+	VPADDD     BB2, BB0, BB0; VPADDD BB2, BB1, BB1
+	VPADDD     CC2, CC0, CC0; VPADDD CC2, CC1, CC1
+	VPADDD     DD2, DD0, DD0; VPADDD TT3, DD1, DD1
+	VPERM2I128 $0x02, AA0, BB0, TT0
+
+	// Clamp and store poly key
+	VPAND   ·polyClampMask<>(SB), TT0, TT0
+	VMOVDQA TT0, rsStoreAVX2
+
+	// Stream for up to 192 bytes
+	VPERM2I128 $0x13, AA0, BB0, AA0
+	VPERM2I128 $0x13, CC0, DD0, BB0
+	VPERM2I128 $0x02, AA1, BB1, CC0
+	VPERM2I128 $0x02, CC1, DD1, DD0
+	VPERM2I128 $0x13, AA1, BB1, AA1
+	VPERM2I128 $0x13, CC1, DD1, BB1
+
+sealAVX2ShortSeal:
+	// Hash aad
+	MOVQ ad_len+80(FP), itr2
+	CALL polyHashADInternal<>(SB)
+	XORQ itr1, itr1
+
+sealAVX2SealHash:
+	// itr1 holds the number of bytes encrypted but not yet hashed
+	CMPQ itr1, $16
+	JB   sealAVX2ShortSealLoop
+	polyAdd(0(oup))
+	polyMul
+	SUBQ $16, itr1
+	ADDQ $16, oup
+	JMP  sealAVX2SealHash
+
+sealAVX2ShortSealLoop:
+	CMPQ inl, $32
+	JB   sealAVX2ShortTail32
+	SUBQ $32, inl
+
+	// Load for encryption
+	VPXOR   (inp), AA0, AA0
+	VMOVDQU AA0, (oup)
+	LEAQ    (1*32)(inp), inp
+
+	// Now can hash
+	polyAdd(0*8(oup))
+	polyMulAVX2
+	polyAdd(2*8(oup))
+	polyMulAVX2
+	LEAQ (1*32)(oup), oup
+
+	// Shift stream left
+	VMOVDQA BB0, AA0
+	VMOVDQA CC0, BB0
+	VMOVDQA DD0, CC0
+	VMOVDQA AA1, DD0
+	VMOVDQA BB1, AA1
+	VMOVDQA CC1, BB1
+	VMOVDQA DD1, CC1
+	VMOVDQA AA2, DD1
+	VMOVDQA BB2, AA2
+	JMP     sealAVX2ShortSealLoop
+
+sealAVX2ShortTail32:
+	CMPQ    inl, $16
+	VMOVDQA A0, A1
+	JB      sealAVX2ShortDone
+
+	SUBQ $16, inl
+
+	// Load for encryption
+	VPXOR   (inp), A0, T0
+	VMOVDQU T0, (oup)
+	LEAQ    (1*16)(inp), inp
+
+	// Hash
+	polyAdd(0*8(oup))
+	polyMulAVX2
+	LEAQ       (1*16)(oup), oup
+	VPERM2I128 $0x11, AA0, AA0, AA0
+	VMOVDQA    A0, A1
+
+sealAVX2ShortDone:
+	VZEROUPPER
+	JMP sealSSETail
+
+// ----------------------------------------------------------------------------
+// Special optimization for buffers smaller than 321 bytes
+seal320AVX2:
+	// For up to 320 bytes of ciphertext and 64 bytes for the poly key, we process six blocks
+	VMOVDQA AA0, AA1; VMOVDQA BB0, BB1; VMOVDQA CC0, CC1; VPADDD ·avx2IncMask<>(SB), DD0, DD1
+	VMOVDQA AA0, AA2; VMOVDQA BB0, BB2; VMOVDQA CC0, CC2; VPADDD ·avx2IncMask<>(SB), DD1, DD2
+	VMOVDQA BB0, TT1; VMOVDQA CC0, TT2; VMOVDQA DD0, TT3
+	MOVQ    $10, itr2
+
+sealAVX2320InnerCipherLoop:
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+	VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+	VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2
+	DECQ     itr2
+	JNE      sealAVX2320InnerCipherLoop
+
+	VMOVDQA ·chacha20Constants<>(SB), TT0
+	VPADDD  TT0, AA0, AA0; VPADDD TT0, AA1, AA1; VPADDD TT0, AA2, AA2
+	VPADDD  TT1, BB0, BB0; VPADDD TT1, BB1, BB1; VPADDD TT1, BB2, BB2
+	VPADDD  TT2, CC0, CC0; VPADDD TT2, CC1, CC1; VPADDD TT2, CC2, CC2
+	VMOVDQA ·avx2IncMask<>(SB), TT0
+	VPADDD  TT3, DD0, DD0; VPADDD TT0, TT3, TT3
+	VPADDD  TT3, DD1, DD1; VPADDD TT0, TT3, TT3
+	VPADDD  TT3, DD2, DD2
+
+	// Clamp and store poly key
+	VPERM2I128 $0x02, AA0, BB0, TT0
+	VPAND      ·polyClampMask<>(SB), TT0, TT0
+	VMOVDQA    TT0, rsStoreAVX2
+
+	// Stream for up to 320 bytes
+	VPERM2I128 $0x13, AA0, BB0, AA0
+	VPERM2I128 $0x13, CC0, DD0, BB0
+	VPERM2I128 $0x02, AA1, BB1, CC0
+	VPERM2I128 $0x02, CC1, DD1, DD0
+	VPERM2I128 $0x13, AA1, BB1, AA1
+	VPERM2I128 $0x13, CC1, DD1, BB1
+	VPERM2I128 $0x02, AA2, BB2, CC1
+	VPERM2I128 $0x02, CC2, DD2, DD1
+	VPERM2I128 $0x13, AA2, BB2, AA2
+	VPERM2I128 $0x13, CC2, DD2, BB2
+	JMP        sealAVX2ShortSeal
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 128 bytes of ciphertext
+sealAVX2Tail128:
+	// Need to decrypt up to 128 bytes - prepare two blocks
+	// If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed
+	// If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed
+	VMOVDQA ·chacha20Constants<>(SB), AA0
+	VMOVDQA state1StoreAVX2, BB0
+	VMOVDQA state2StoreAVX2, CC0
+	VMOVDQA ctr3StoreAVX2, DD0
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD0
+	VMOVDQA DD0, DD1
+
+sealAVX2Tail128LoopA:
+	polyAdd(0(oup))
+	polyMul
+	LEAQ 16(oup), oup
+
+sealAVX2Tail128LoopB:
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0)
+	polyAdd(0(oup))
+	polyMul
+	VPALIGNR $4, BB0, BB0, BB0
+	VPALIGNR $8, CC0, CC0, CC0
+	VPALIGNR $12, DD0, DD0, DD0
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0)
+	polyAdd(16(oup))
+	polyMul
+	LEAQ     32(oup), oup
+	VPALIGNR $12, BB0, BB0, BB0
+	VPALIGNR $8, CC0, CC0, CC0
+	VPALIGNR $4, DD0, DD0, DD0
+	DECQ     itr1
+	JG       sealAVX2Tail128LoopA
+	DECQ     itr2
+	JGE      sealAVX2Tail128LoopB
+
+	VPADDD ·chacha20Constants<>(SB), AA0, AA1
+	VPADDD state1StoreAVX2, BB0, BB1
+	VPADDD state2StoreAVX2, CC0, CC1
+	VPADDD DD1, DD0, DD1
+
+	VPERM2I128 $0x02, AA1, BB1, AA0
+	VPERM2I128 $0x02, CC1, DD1, BB0
+	VPERM2I128 $0x13, AA1, BB1, CC0
+	VPERM2I128 $0x13, CC1, DD1, DD0
+	JMP        sealAVX2ShortSealLoop
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 256 bytes of ciphertext
+sealAVX2Tail256:
+	// Need to decrypt up to 256 bytes - prepare two blocks
+	// If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed
+	// If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed
+	VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA ·chacha20Constants<>(SB), AA1
+	VMOVDQA state1StoreAVX2, BB0; VMOVDQA state1StoreAVX2, BB1
+	VMOVDQA state2StoreAVX2, CC0; VMOVDQA state2StoreAVX2, CC1
+	VMOVDQA ctr3StoreAVX2, DD0
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD0
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD1
+	VMOVDQA DD0, TT1
+	VMOVDQA DD1, TT2
+
+sealAVX2Tail256LoopA:
+	polyAdd(0(oup))
+	polyMul
+	LEAQ 16(oup), oup
+
+sealAVX2Tail256LoopB:
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+	polyAdd(0(oup))
+	polyMul
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+	VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0)
+	polyAdd(16(oup))
+	polyMul
+	LEAQ     32(oup), oup
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1
+	VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1
+	DECQ     itr1
+	JG       sealAVX2Tail256LoopA
+	DECQ     itr2
+	JGE      sealAVX2Tail256LoopB
+
+	VPADDD     ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1
+	VPADDD     state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1
+	VPADDD     state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1
+	VPADDD     TT1, DD0, DD0; VPADDD TT2, DD1, DD1
+	VPERM2I128 $0x02, AA0, BB0, TT0
+	VPERM2I128 $0x02, CC0, DD0, TT1
+	VPERM2I128 $0x13, AA0, BB0, TT2
+	VPERM2I128 $0x13, CC0, DD0, TT3
+	VPXOR      (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3
+	VMOVDQU    TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup)
+	MOVQ       $128, itr1
+	LEAQ       128(inp), inp
+	SUBQ       $128, inl
+	VPERM2I128 $0x02, AA1, BB1, AA0
+	VPERM2I128 $0x02, CC1, DD1, BB0
+	VPERM2I128 $0x13, AA1, BB1, CC0
+	VPERM2I128 $0x13, CC1, DD1, DD0
+
+	JMP sealAVX2SealHash
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 384 bytes of ciphertext
+sealAVX2Tail384:
+	// Need to decrypt up to 384 bytes - prepare two blocks
+	// If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed
+	// If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed
+	VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2
+	VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2
+	VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2
+	VMOVDQA ctr3StoreAVX2, DD0
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2
+	VMOVDQA DD0, TT1; VMOVDQA DD1, TT2; VMOVDQA DD2, TT3
+
+sealAVX2Tail384LoopA:
+	polyAdd(0(oup))
+	polyMul
+	LEAQ 16(oup), oup
+
+sealAVX2Tail384LoopB:
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+	polyAdd(0(oup))
+	polyMul
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+	VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2
+	chachaQR_AVX2(AA0, BB0, CC0, DD0, TT0); chachaQR_AVX2(AA1, BB1, CC1, DD1, TT0); chachaQR_AVX2(AA2, BB2, CC2, DD2, TT0)
+	polyAdd(16(oup))
+	polyMul
+	LEAQ     32(oup), oup
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2
+	VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2
+	DECQ     itr1
+	JG       sealAVX2Tail384LoopA
+	DECQ     itr2
+	JGE      sealAVX2Tail384LoopB
+
+	VPADDD     ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2
+	VPADDD     state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2
+	VPADDD     state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2
+	VPADDD     TT1, DD0, DD0; VPADDD TT2, DD1, DD1; VPADDD TT3, DD2, DD2
+	VPERM2I128 $0x02, AA0, BB0, TT0
+	VPERM2I128 $0x02, CC0, DD0, TT1
+	VPERM2I128 $0x13, AA0, BB0, TT2
+	VPERM2I128 $0x13, CC0, DD0, TT3
+	VPXOR      (0*32)(inp), TT0, TT0; VPXOR (1*32)(inp), TT1, TT1; VPXOR (2*32)(inp), TT2, TT2; VPXOR (3*32)(inp), TT3, TT3
+	VMOVDQU    TT0, (0*32)(oup); VMOVDQU TT1, (1*32)(oup); VMOVDQU TT2, (2*32)(oup); VMOVDQU TT3, (3*32)(oup)
+	VPERM2I128 $0x02, AA1, BB1, TT0
+	VPERM2I128 $0x02, CC1, DD1, TT1
+	VPERM2I128 $0x13, AA1, BB1, TT2
+	VPERM2I128 $0x13, CC1, DD1, TT3
+	VPXOR      (4*32)(inp), TT0, TT0; VPXOR (5*32)(inp), TT1, TT1; VPXOR (6*32)(inp), TT2, TT2; VPXOR (7*32)(inp), TT3, TT3
+	VMOVDQU    TT0, (4*32)(oup); VMOVDQU TT1, (5*32)(oup); VMOVDQU TT2, (6*32)(oup); VMOVDQU TT3, (7*32)(oup)
+	MOVQ       $256, itr1
+	LEAQ       256(inp), inp
+	SUBQ       $256, inl
+	VPERM2I128 $0x02, AA2, BB2, AA0
+	VPERM2I128 $0x02, CC2, DD2, BB0
+	VPERM2I128 $0x13, AA2, BB2, CC0
+	VPERM2I128 $0x13, CC2, DD2, DD0
+
+	JMP sealAVX2SealHash
+
+// ----------------------------------------------------------------------------
+// Special optimization for the last 512 bytes of ciphertext
+sealAVX2Tail512:
+	// Need to decrypt up to 512 bytes - prepare two blocks
+	// If we got here after the main loop - there are 512 encrypted bytes waiting to be hashed
+	// If we got here before the main loop - there are 448 encrpyred bytes waiting to be hashed
+	VMOVDQA ·chacha20Constants<>(SB), AA0; VMOVDQA AA0, AA1; VMOVDQA AA0, AA2; VMOVDQA AA0, AA3
+	VMOVDQA state1StoreAVX2, BB0; VMOVDQA BB0, BB1; VMOVDQA BB0, BB2; VMOVDQA BB0, BB3
+	VMOVDQA state2StoreAVX2, CC0; VMOVDQA CC0, CC1; VMOVDQA CC0, CC2; VMOVDQA CC0, CC3
+	VMOVDQA ctr3StoreAVX2, DD0
+	VPADDD  ·avx2IncMask<>(SB), DD0, DD0; VPADDD ·avx2IncMask<>(SB), DD0, DD1; VPADDD ·avx2IncMask<>(SB), DD1, DD2; VPADDD ·avx2IncMask<>(SB), DD2, DD3
+	VMOVDQA DD0, ctr0StoreAVX2; VMOVDQA DD1, ctr1StoreAVX2; VMOVDQA DD2, ctr2StoreAVX2; VMOVDQA DD3, ctr3StoreAVX2
+
+sealAVX2Tail512LoopA:
+	polyAdd(0(oup))
+	polyMul
+	LEAQ 16(oup), oup
+
+sealAVX2Tail512LoopB:
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	polyAdd(0*8(oup))
+	polyMulAVX2
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	VPALIGNR $4, BB0, BB0, BB0; VPALIGNR $4, BB1, BB1, BB1; VPALIGNR $4, BB2, BB2, BB2; VPALIGNR $4, BB3, BB3, BB3
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+	VPALIGNR $12, DD0, DD0, DD0; VPALIGNR $12, DD1, DD1, DD1; VPALIGNR $12, DD2, DD2, DD2; VPALIGNR $12, DD3, DD3, DD3
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol16<>(SB), DD0, DD0; VPSHUFB ·rol16<>(SB), DD1, DD1; VPSHUFB ·rol16<>(SB), DD2, DD2; VPSHUFB ·rol16<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	polyAdd(2*8(oup))
+	polyMulAVX2
+	LEAQ     (4*8)(oup), oup
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $12, BB0, CC3; VPSRLD $20, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $12, BB1, CC3; VPSRLD $20, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $12, BB2, CC3; VPSRLD $20, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $12, BB3, CC3; VPSRLD $20, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	VPADDD   BB0, AA0, AA0; VPADDD BB1, AA1, AA1; VPADDD BB2, AA2, AA2; VPADDD BB3, AA3, AA3
+	VPXOR    AA0, DD0, DD0; VPXOR AA1, DD1, DD1; VPXOR AA2, DD2, DD2; VPXOR AA3, DD3, DD3
+	VPSHUFB  ·rol8<>(SB), DD0, DD0; VPSHUFB ·rol8<>(SB), DD1, DD1; VPSHUFB ·rol8<>(SB), DD2, DD2; VPSHUFB ·rol8<>(SB), DD3, DD3
+	VPADDD   DD0, CC0, CC0; VPADDD DD1, CC1, CC1; VPADDD DD2, CC2, CC2; VPADDD DD3, CC3, CC3
+	VPXOR    CC0, BB0, BB0; VPXOR CC1, BB1, BB1; VPXOR CC2, BB2, BB2; VPXOR CC3, BB3, BB3
+	VMOVDQA  CC3, tmpStoreAVX2
+	VPSLLD   $7, BB0, CC3; VPSRLD $25, BB0, BB0; VPXOR CC3, BB0, BB0
+	VPSLLD   $7, BB1, CC3; VPSRLD $25, BB1, BB1; VPXOR CC3, BB1, BB1
+	VPSLLD   $7, BB2, CC3; VPSRLD $25, BB2, BB2; VPXOR CC3, BB2, BB2
+	VPSLLD   $7, BB3, CC3; VPSRLD $25, BB3, BB3; VPXOR CC3, BB3, BB3
+	VMOVDQA  tmpStoreAVX2, CC3
+	VPALIGNR $12, BB0, BB0, BB0; VPALIGNR $12, BB1, BB1, BB1; VPALIGNR $12, BB2, BB2, BB2; VPALIGNR $12, BB3, BB3, BB3
+	VPALIGNR $8, CC0, CC0, CC0; VPALIGNR $8, CC1, CC1, CC1; VPALIGNR $8, CC2, CC2, CC2; VPALIGNR $8, CC3, CC3, CC3
+	VPALIGNR $4, DD0, DD0, DD0; VPALIGNR $4, DD1, DD1, DD1; VPALIGNR $4, DD2, DD2, DD2; VPALIGNR $4, DD3, DD3, DD3
+
+	DECQ itr1
+	JG   sealAVX2Tail512LoopA
+	DECQ itr2
+	JGE  sealAVX2Tail512LoopB
+
+	VPADDD     ·chacha20Constants<>(SB), AA0, AA0; VPADDD ·chacha20Constants<>(SB), AA1, AA1; VPADDD ·chacha20Constants<>(SB), AA2, AA2; VPADDD ·chacha20Constants<>(SB), AA3, AA3
+	VPADDD     state1StoreAVX2, BB0, BB0; VPADDD state1StoreAVX2, BB1, BB1; VPADDD state1StoreAVX2, BB2, BB2; VPADDD state1StoreAVX2, BB3, BB3
+	VPADDD     state2StoreAVX2, CC0, CC0; VPADDD state2StoreAVX2, CC1, CC1; VPADDD state2StoreAVX2, CC2, CC2; VPADDD state2StoreAVX2, CC3, CC3
+	VPADDD     ctr0StoreAVX2, DD0, DD0; VPADDD ctr1StoreAVX2, DD1, DD1; VPADDD ctr2StoreAVX2, DD2, DD2; VPADDD ctr3StoreAVX2, DD3, DD3
+	VMOVDQA    CC3, tmpStoreAVX2
+	VPERM2I128 $0x02, AA0, BB0, CC3
+	VPXOR      (0*32)(inp), CC3, CC3
+	VMOVDQU    CC3, (0*32)(oup)
+	VPERM2I128 $0x02, CC0, DD0, CC3
+	VPXOR      (1*32)(inp), CC3, CC3
+	VMOVDQU    CC3, (1*32)(oup)
+	VPERM2I128 $0x13, AA0, BB0, CC3
+	VPXOR      (2*32)(inp), CC3, CC3
+	VMOVDQU    CC3, (2*32)(oup)
+	VPERM2I128 $0x13, CC0, DD0, CC3
+	VPXOR      (3*32)(inp), CC3, CC3
+	VMOVDQU    CC3, (3*32)(oup)
+
+	VPERM2I128 $0x02, AA1, BB1, AA0
+	VPERM2I128 $0x02, CC1, DD1, BB0
+	VPERM2I128 $0x13, AA1, BB1, CC0
+	VPERM2I128 $0x13, CC1, DD1, DD0
+	VPXOR      (4*32)(inp), AA0, AA0; VPXOR (5*32)(inp), BB0, BB0; VPXOR (6*32)(inp), CC0, CC0; VPXOR (7*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (4*32)(oup); VMOVDQU BB0, (5*32)(oup); VMOVDQU CC0, (6*32)(oup); VMOVDQU DD0, (7*32)(oup)
+
+	VPERM2I128 $0x02, AA2, BB2, AA0
+	VPERM2I128 $0x02, CC2, DD2, BB0
+	VPERM2I128 $0x13, AA2, BB2, CC0
+	VPERM2I128 $0x13, CC2, DD2, DD0
+	VPXOR      (8*32)(inp), AA0, AA0; VPXOR (9*32)(inp), BB0, BB0; VPXOR (10*32)(inp), CC0, CC0; VPXOR (11*32)(inp), DD0, DD0
+	VMOVDQU    AA0, (8*32)(oup); VMOVDQU BB0, (9*32)(oup); VMOVDQU CC0, (10*32)(oup); VMOVDQU DD0, (11*32)(oup)
+
+	MOVQ       $384, itr1
+	LEAQ       384(inp), inp
+	SUBQ       $384, inl
+	VPERM2I128 $0x02, AA3, BB3, AA0
+	VPERM2I128 $0x02, tmpStoreAVX2, DD3, BB0
+	VPERM2I128 $0x13, AA3, BB3, CC0
+	VPERM2I128 $0x13, tmpStoreAVX2, DD3, DD0
+
+	JMP sealAVX2SealHash
+
+// func haveSSSE3() bool
+TEXT ·haveSSSE3(SB), NOSPLIT, $0
+	XORQ AX, AX
+	INCL AX
+	CPUID
+	SHRQ $9, CX
+	ANDQ $1, CX
+	MOVB CX, ret+0(FP)
+	RET
+

+ 70 - 0
psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_generic.go

@@ -0,0 +1,70 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package chacha20poly1305
+
+import (
+	"encoding/binary"
+
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tls/crypto/chacha20poly1305/internal/chacha20"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tls/crypto/poly1305"
+)
+
+func roundTo16(n int) int {
+	return 16 * ((n + 15) / 16)
+}
+
+func (c *chacha20poly1305) sealGeneric(dst, nonce, plaintext, additionalData []byte) []byte {
+	var counter [16]byte
+	copy(counter[4:], nonce)
+
+	var polyKey [32]byte
+	chacha20.XORKeyStream(polyKey[:], polyKey[:], &counter, &c.key)
+
+	ret, out := sliceForAppend(dst, len(plaintext)+poly1305.TagSize)
+	counter[0] = 1
+	chacha20.XORKeyStream(out, plaintext, &counter, &c.key)
+
+	polyInput := make([]byte, roundTo16(len(additionalData))+roundTo16(len(plaintext))+8+8)
+	copy(polyInput, additionalData)
+	copy(polyInput[roundTo16(len(additionalData)):], out[:len(plaintext)])
+	binary.LittleEndian.PutUint64(polyInput[len(polyInput)-16:], uint64(len(additionalData)))
+	binary.LittleEndian.PutUint64(polyInput[len(polyInput)-8:], uint64(len(plaintext)))
+
+	var tag [poly1305.TagSize]byte
+	poly1305.Sum(&tag, polyInput, &polyKey)
+	copy(out[len(plaintext):], tag[:])
+
+	return ret
+}
+
+func (c *chacha20poly1305) openGeneric(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+	var tag [poly1305.TagSize]byte
+	copy(tag[:], ciphertext[len(ciphertext)-16:])
+	ciphertext = ciphertext[:len(ciphertext)-16]
+
+	var counter [16]byte
+	copy(counter[4:], nonce)
+
+	var polyKey [32]byte
+	chacha20.XORKeyStream(polyKey[:], polyKey[:], &counter, &c.key)
+
+	polyInput := make([]byte, roundTo16(len(additionalData))+roundTo16(len(ciphertext))+8+8)
+	copy(polyInput, additionalData)
+	copy(polyInput[roundTo16(len(additionalData)):], ciphertext)
+	binary.LittleEndian.PutUint64(polyInput[len(polyInput)-16:], uint64(len(additionalData)))
+	binary.LittleEndian.PutUint64(polyInput[len(polyInput)-8:], uint64(len(ciphertext)))
+
+	ret, out := sliceForAppend(dst, len(ciphertext))
+	if !poly1305.Verify(&tag, polyInput, &polyKey) {
+		for i := range out {
+			out[i] = 0
+		}
+		return nil, errOpen
+	}
+
+	counter[0] = 1
+	chacha20.XORKeyStream(out, ciphertext, &counter, &c.key)
+	return ret, nil
+}

+ 15 - 0
psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_noasm.go

@@ -0,0 +1,15 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 !go1.7 gccgo appengine
+
+package chacha20poly1305
+
+func (c *chacha20poly1305) seal(dst, nonce, plaintext, additionalData []byte) []byte {
+	return c.sealGeneric(dst, nonce, plaintext, additionalData)
+}
+
+func (c *chacha20poly1305) open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error) {
+	return c.openGeneric(dst, nonce, ciphertext, additionalData)
+}

+ 182 - 0
psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_test.go

@@ -0,0 +1,182 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package chacha20poly1305
+
+import (
+	"bytes"
+	cr "crypto/rand"
+	"encoding/hex"
+	mr "math/rand"
+	"testing"
+)
+
+func TestVectors(t *testing.T) {
+	for i, test := range chacha20Poly1305Tests {
+		key, _ := hex.DecodeString(test.key)
+		nonce, _ := hex.DecodeString(test.nonce)
+		ad, _ := hex.DecodeString(test.aad)
+		plaintext, _ := hex.DecodeString(test.plaintext)
+
+		aead, err := New(key)
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		ct := aead.Seal(nil, nonce, plaintext, ad)
+		if ctHex := hex.EncodeToString(ct); ctHex != test.out {
+			t.Errorf("#%d: got %s, want %s", i, ctHex, test.out)
+			continue
+		}
+
+		plaintext2, err := aead.Open(nil, nonce, ct, ad)
+		if err != nil {
+			t.Errorf("#%d: Open failed", i)
+			continue
+		}
+
+		if !bytes.Equal(plaintext, plaintext2) {
+			t.Errorf("#%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext)
+			continue
+		}
+
+		if len(ad) > 0 {
+			alterAdIdx := mr.Intn(len(ad))
+			ad[alterAdIdx] ^= 0x80
+			if _, err := aead.Open(nil, nonce, ct, ad); err == nil {
+				t.Errorf("#%d: Open was successful after altering additional data", i)
+			}
+			ad[alterAdIdx] ^= 0x80
+		}
+
+		alterNonceIdx := mr.Intn(aead.NonceSize())
+		nonce[alterNonceIdx] ^= 0x80
+		if _, err := aead.Open(nil, nonce, ct, ad); err == nil {
+			t.Errorf("#%d: Open was successful after altering nonce", i)
+		}
+		nonce[alterNonceIdx] ^= 0x80
+
+		alterCtIdx := mr.Intn(len(ct))
+		ct[alterCtIdx] ^= 0x80
+		if _, err := aead.Open(nil, nonce, ct, ad); err == nil {
+			t.Errorf("#%d: Open was successful after altering ciphertext", i)
+		}
+		ct[alterCtIdx] ^= 0x80
+	}
+}
+
+func TestRandom(t *testing.T) {
+	// Some random tests to verify Open(Seal) == Plaintext
+	for i := 0; i < 256; i++ {
+		var nonce [12]byte
+		var key [32]byte
+
+		al := mr.Intn(128)
+		pl := mr.Intn(16384)
+		ad := make([]byte, al)
+		plaintext := make([]byte, pl)
+		cr.Read(key[:])
+		cr.Read(nonce[:])
+		cr.Read(ad)
+		cr.Read(plaintext)
+
+		aead, err := New(key[:])
+		if err != nil {
+			t.Fatal(err)
+		}
+
+		ct := aead.Seal(nil, nonce[:], plaintext, ad)
+
+		plaintext2, err := aead.Open(nil, nonce[:], ct, ad)
+		if err != nil {
+			t.Errorf("Random #%d: Open failed", i)
+			continue
+		}
+
+		if !bytes.Equal(plaintext, plaintext2) {
+			t.Errorf("Random #%d: plaintext's don't match: got %x vs %x", i, plaintext2, plaintext)
+			continue
+		}
+
+		if len(ad) > 0 {
+			alterAdIdx := mr.Intn(len(ad))
+			ad[alterAdIdx] ^= 0x80
+			if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
+				t.Errorf("Random #%d: Open was successful after altering additional data", i)
+			}
+			ad[alterAdIdx] ^= 0x80
+		}
+
+		alterNonceIdx := mr.Intn(aead.NonceSize())
+		nonce[alterNonceIdx] ^= 0x80
+		if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
+			t.Errorf("Random #%d: Open was successful after altering nonce", i)
+		}
+		nonce[alterNonceIdx] ^= 0x80
+
+		alterCtIdx := mr.Intn(len(ct))
+		ct[alterCtIdx] ^= 0x80
+		if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil {
+			t.Errorf("Random #%d: Open was successful after altering ciphertext", i)
+		}
+		ct[alterCtIdx] ^= 0x80
+	}
+}
+
+func benchamarkChaCha20Poly1305Seal(b *testing.B, buf []byte) {
+	b.SetBytes(int64(len(buf)))
+
+	var key [32]byte
+	var nonce [12]byte
+	var ad [13]byte
+	var out []byte
+
+	aead, _ := New(key[:])
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		out = aead.Seal(out[:0], nonce[:], buf[:], ad[:])
+	}
+}
+
+func benchamarkChaCha20Poly1305Open(b *testing.B, buf []byte) {
+	b.SetBytes(int64(len(buf)))
+
+	var key [32]byte
+	var nonce [12]byte
+	var ad [13]byte
+	var ct []byte
+	var out []byte
+
+	aead, _ := New(key[:])
+	ct = aead.Seal(ct[:0], nonce[:], buf[:], ad[:])
+
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		out, _ = aead.Open(out[:0], nonce[:], ct[:], ad[:])
+	}
+}
+
+func BenchmarkChacha20Poly1305Open_64(b *testing.B) {
+	benchamarkChaCha20Poly1305Open(b, make([]byte, 64))
+}
+
+func BenchmarkChacha20Poly1305Seal_64(b *testing.B) {
+	benchamarkChaCha20Poly1305Seal(b, make([]byte, 64))
+}
+
+func BenchmarkChacha20Poly1305Open_1350(b *testing.B) {
+	benchamarkChaCha20Poly1305Open(b, make([]byte, 1350))
+}
+
+func BenchmarkChacha20Poly1305Seal_1350(b *testing.B) {
+	benchamarkChaCha20Poly1305Seal(b, make([]byte, 1350))
+}
+
+func BenchmarkChacha20Poly1305Open_8K(b *testing.B) {
+	benchamarkChaCha20Poly1305Open(b, make([]byte, 8*1024))
+}
+
+func BenchmarkChacha20Poly1305Seal_8K(b *testing.B) {
+	benchamarkChaCha20Poly1305Seal(b, make([]byte, 8*1024))
+}

Diff do ficheiro suprimidas por serem muito extensas
+ 94 - 0
psiphon/common/tls/crypto/chacha20poly1305/chacha20poly1305_test_vectors.go


+ 199 - 0
psiphon/common/tls/crypto/chacha20poly1305/internal/chacha20/chacha_generic.go

@@ -0,0 +1,199 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ChaCha20 implements the core ChaCha20 function as specified in https://tools.ietf.org/html/rfc7539#section-2.3.
+package chacha20
+
+import "encoding/binary"
+
+const rounds = 20
+
+// core applies the ChaCha20 core function to 16-byte input in, 32-byte key k,
+// and 16-byte constant c, and puts the result into 64-byte array out.
+func core(out *[64]byte, in *[16]byte, k *[32]byte) {
+	j0 := uint32(0x61707865)
+	j1 := uint32(0x3320646e)
+	j2 := uint32(0x79622d32)
+	j3 := uint32(0x6b206574)
+	j4 := binary.LittleEndian.Uint32(k[0:4])
+	j5 := binary.LittleEndian.Uint32(k[4:8])
+	j6 := binary.LittleEndian.Uint32(k[8:12])
+	j7 := binary.LittleEndian.Uint32(k[12:16])
+	j8 := binary.LittleEndian.Uint32(k[16:20])
+	j9 := binary.LittleEndian.Uint32(k[20:24])
+	j10 := binary.LittleEndian.Uint32(k[24:28])
+	j11 := binary.LittleEndian.Uint32(k[28:32])
+	j12 := binary.LittleEndian.Uint32(in[0:4])
+	j13 := binary.LittleEndian.Uint32(in[4:8])
+	j14 := binary.LittleEndian.Uint32(in[8:12])
+	j15 := binary.LittleEndian.Uint32(in[12:16])
+
+	x0, x1, x2, x3, x4, x5, x6, x7 := j0, j1, j2, j3, j4, j5, j6, j7
+	x8, x9, x10, x11, x12, x13, x14, x15 := j8, j9, j10, j11, j12, j13, j14, j15
+
+	for i := 0; i < rounds; i += 2 {
+		x0 += x4
+		x12 ^= x0
+		x12 = (x12 << 16) | (x12 >> (16))
+		x8 += x12
+		x4 ^= x8
+		x4 = (x4 << 12) | (x4 >> (20))
+		x0 += x4
+		x12 ^= x0
+		x12 = (x12 << 8) | (x12 >> (24))
+		x8 += x12
+		x4 ^= x8
+		x4 = (x4 << 7) | (x4 >> (25))
+		x1 += x5
+		x13 ^= x1
+		x13 = (x13 << 16) | (x13 >> 16)
+		x9 += x13
+		x5 ^= x9
+		x5 = (x5 << 12) | (x5 >> 20)
+		x1 += x5
+		x13 ^= x1
+		x13 = (x13 << 8) | (x13 >> 24)
+		x9 += x13
+		x5 ^= x9
+		x5 = (x5 << 7) | (x5 >> 25)
+		x2 += x6
+		x14 ^= x2
+		x14 = (x14 << 16) | (x14 >> 16)
+		x10 += x14
+		x6 ^= x10
+		x6 = (x6 << 12) | (x6 >> 20)
+		x2 += x6
+		x14 ^= x2
+		x14 = (x14 << 8) | (x14 >> 24)
+		x10 += x14
+		x6 ^= x10
+		x6 = (x6 << 7) | (x6 >> 25)
+		x3 += x7
+		x15 ^= x3
+		x15 = (x15 << 16) | (x15 >> 16)
+		x11 += x15
+		x7 ^= x11
+		x7 = (x7 << 12) | (x7 >> 20)
+		x3 += x7
+		x15 ^= x3
+		x15 = (x15 << 8) | (x15 >> 24)
+		x11 += x15
+		x7 ^= x11
+		x7 = (x7 << 7) | (x7 >> 25)
+		x0 += x5
+		x15 ^= x0
+		x15 = (x15 << 16) | (x15 >> 16)
+		x10 += x15
+		x5 ^= x10
+		x5 = (x5 << 12) | (x5 >> 20)
+		x0 += x5
+		x15 ^= x0
+		x15 = (x15 << 8) | (x15 >> 24)
+		x10 += x15
+		x5 ^= x10
+		x5 = (x5 << 7) | (x5 >> 25)
+		x1 += x6
+		x12 ^= x1
+		x12 = (x12 << 16) | (x12 >> 16)
+		x11 += x12
+		x6 ^= x11
+		x6 = (x6 << 12) | (x6 >> 20)
+		x1 += x6
+		x12 ^= x1
+		x12 = (x12 << 8) | (x12 >> 24)
+		x11 += x12
+		x6 ^= x11
+		x6 = (x6 << 7) | (x6 >> 25)
+		x2 += x7
+		x13 ^= x2
+		x13 = (x13 << 16) | (x13 >> 16)
+		x8 += x13
+		x7 ^= x8
+		x7 = (x7 << 12) | (x7 >> 20)
+		x2 += x7
+		x13 ^= x2
+		x13 = (x13 << 8) | (x13 >> 24)
+		x8 += x13
+		x7 ^= x8
+		x7 = (x7 << 7) | (x7 >> 25)
+		x3 += x4
+		x14 ^= x3
+		x14 = (x14 << 16) | (x14 >> 16)
+		x9 += x14
+		x4 ^= x9
+		x4 = (x4 << 12) | (x4 >> 20)
+		x3 += x4
+		x14 ^= x3
+		x14 = (x14 << 8) | (x14 >> 24)
+		x9 += x14
+		x4 ^= x9
+		x4 = (x4 << 7) | (x4 >> 25)
+	}
+
+	x0 += j0
+	x1 += j1
+	x2 += j2
+	x3 += j3
+	x4 += j4
+	x5 += j5
+	x6 += j6
+	x7 += j7
+	x8 += j8
+	x9 += j9
+	x10 += j10
+	x11 += j11
+	x12 += j12
+	x13 += j13
+	x14 += j14
+	x15 += j15
+
+	binary.LittleEndian.PutUint32(out[0:4], x0)
+	binary.LittleEndian.PutUint32(out[4:8], x1)
+	binary.LittleEndian.PutUint32(out[8:12], x2)
+	binary.LittleEndian.PutUint32(out[12:16], x3)
+	binary.LittleEndian.PutUint32(out[16:20], x4)
+	binary.LittleEndian.PutUint32(out[20:24], x5)
+	binary.LittleEndian.PutUint32(out[24:28], x6)
+	binary.LittleEndian.PutUint32(out[28:32], x7)
+	binary.LittleEndian.PutUint32(out[32:36], x8)
+	binary.LittleEndian.PutUint32(out[36:40], x9)
+	binary.LittleEndian.PutUint32(out[40:44], x10)
+	binary.LittleEndian.PutUint32(out[44:48], x11)
+	binary.LittleEndian.PutUint32(out[48:52], x12)
+	binary.LittleEndian.PutUint32(out[52:56], x13)
+	binary.LittleEndian.PutUint32(out[56:60], x14)
+	binary.LittleEndian.PutUint32(out[60:64], x15)
+}
+
+// XORKeyStream crypts bytes from in to out using the given key and counters.
+// In and out may be the same slice but otherwise should not overlap. Counter
+// contains the raw ChaCha20 counter bytes (i.e. block counter followed by
+// nonce).
+func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) {
+	var block [64]byte
+	var counterCopy [16]byte
+	copy(counterCopy[:], counter[:])
+
+	for len(in) >= 64 {
+		core(&block, &counterCopy, key)
+		for i, x := range block {
+			out[i] = in[i] ^ x
+		}
+		u := uint32(1)
+		for i := 0; i < 4; i++ {
+			u += uint32(counterCopy[i])
+			counterCopy[i] = byte(u)
+			u >>= 8
+		}
+		in = in[64:]
+		out = out[64:]
+	}
+
+	if len(in) > 0 {
+		core(&block, &counterCopy, key)
+		for i, v := range in {
+			out[i] = v ^ block[i]
+		}
+	}
+}

+ 29 - 0
psiphon/common/tls/crypto/chacha20poly1305/internal/chacha20/chacha_test.go

@@ -0,0 +1,29 @@
+package chacha20
+
+import (
+	"encoding/hex"
+	"testing"
+)
+
+func TestCore(t *testing.T) {
+	// This is just a smoke test that checks the example from
+	// https://tools.ietf.org/html/rfc7539#section-2.3.2. The
+	// chacha20poly1305 package contains much more extensive tests of this
+	// code.
+	var key [32]byte
+	for i := range key {
+		key[i] = byte(i)
+	}
+
+	var input [16]byte
+	input[0] = 1
+	input[7] = 9
+	input[11] = 0x4a
+
+	var out [64]byte
+	XORKeyStream(out[:], out[:], &input, &key)
+	const expected = "10f1e7e4d13b5915500fdd1fa32071c4c7d1f4c733c068030422aa9ac3d46c4ed2826446079faa0914c2d705d98b02a2b5129cd1de164eb9cbd083e8a2503c4e"
+	if result := hex.EncodeToString(out[:]); result != expected {
+		t.Errorf("wanted %x but got %x", expected, result)
+	}
+}

+ 8 - 0
psiphon/common/tls/crypto/curve25519/const_amd64.h

@@ -0,0 +1,8 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+#define REDMASK51     0x0007FFFFFFFFFFFF

+ 20 - 0
psiphon/common/tls/crypto/curve25519/const_amd64.s

@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// These constants cannot be encoded in non-MOVQ immediates.
+// We access them directly from memory instead.
+
+DATA ·_121666_213(SB)/8, $996687872
+GLOBL ·_121666_213(SB), 8, $8
+
+DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
+GLOBL ·_2P0(SB), 8, $8
+
+DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
+GLOBL ·_2P1234(SB), 8, $8

+ 88 - 0
psiphon/common/tls/crypto/curve25519/cswap_amd64.s

@@ -0,0 +1,88 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// func cswap(inout *[5]uint64, v uint64)
+TEXT ·cswap(SB),7,$0
+	MOVQ inout+0(FP),DI
+	MOVQ v+8(FP),SI
+
+	CMPQ SI,$1
+	MOVQ 0(DI),SI
+	MOVQ 80(DI),DX
+	MOVQ 8(DI),CX
+	MOVQ 88(DI),R8
+	MOVQ SI,R9
+	CMOVQEQ DX,SI
+	CMOVQEQ R9,DX
+	MOVQ CX,R9
+	CMOVQEQ R8,CX
+	CMOVQEQ R9,R8
+	MOVQ SI,0(DI)
+	MOVQ DX,80(DI)
+	MOVQ CX,8(DI)
+	MOVQ R8,88(DI)
+	MOVQ 16(DI),SI
+	MOVQ 96(DI),DX
+	MOVQ 24(DI),CX
+	MOVQ 104(DI),R8
+	MOVQ SI,R9
+	CMOVQEQ DX,SI
+	CMOVQEQ R9,DX
+	MOVQ CX,R9
+	CMOVQEQ R8,CX
+	CMOVQEQ R9,R8
+	MOVQ SI,16(DI)
+	MOVQ DX,96(DI)
+	MOVQ CX,24(DI)
+	MOVQ R8,104(DI)
+	MOVQ 32(DI),SI
+	MOVQ 112(DI),DX
+	MOVQ 40(DI),CX
+	MOVQ 120(DI),R8
+	MOVQ SI,R9
+	CMOVQEQ DX,SI
+	CMOVQEQ R9,DX
+	MOVQ CX,R9
+	CMOVQEQ R8,CX
+	CMOVQEQ R9,R8
+	MOVQ SI,32(DI)
+	MOVQ DX,112(DI)
+	MOVQ CX,40(DI)
+	MOVQ R8,120(DI)
+	MOVQ 48(DI),SI
+	MOVQ 128(DI),DX
+	MOVQ 56(DI),CX
+	MOVQ 136(DI),R8
+	MOVQ SI,R9
+	CMOVQEQ DX,SI
+	CMOVQEQ R9,DX
+	MOVQ CX,R9
+	CMOVQEQ R8,CX
+	CMOVQEQ R9,R8
+	MOVQ SI,48(DI)
+	MOVQ DX,128(DI)
+	MOVQ CX,56(DI)
+	MOVQ R8,136(DI)
+	MOVQ 64(DI),SI
+	MOVQ 144(DI),DX
+	MOVQ 72(DI),CX
+	MOVQ 152(DI),R8
+	MOVQ SI,R9
+	CMOVQEQ DX,SI
+	CMOVQEQ R9,DX
+	MOVQ CX,R9
+	CMOVQEQ R8,CX
+	CMOVQEQ R9,R8
+	MOVQ SI,64(DI)
+	MOVQ DX,144(DI)
+	MOVQ CX,72(DI)
+	MOVQ R8,152(DI)
+	MOVQ DI,AX
+	MOVQ SI,DX
+	RET

+ 841 - 0
psiphon/common/tls/crypto/curve25519/curve25519.go

@@ -0,0 +1,841 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// We have a implementation in amd64 assembly so this code is only run on
+// non-amd64 platforms. The amd64 assembly does not support gccgo.
+// +build !amd64 gccgo appengine
+
+package curve25519
+
+// This code is a port of the public domain, "ref10" implementation of
+// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
+
+// fieldElement represents an element of the field GF(2^255 - 19). An element
+// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
+// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
+// context.
+type fieldElement [10]int32
+
+func feZero(fe *fieldElement) {
+	for i := range fe {
+		fe[i] = 0
+	}
+}
+
+func feOne(fe *fieldElement) {
+	feZero(fe)
+	fe[0] = 1
+}
+
+func feAdd(dst, a, b *fieldElement) {
+	for i := range dst {
+		dst[i] = a[i] + b[i]
+	}
+}
+
+func feSub(dst, a, b *fieldElement) {
+	for i := range dst {
+		dst[i] = a[i] - b[i]
+	}
+}
+
+func feCopy(dst, src *fieldElement) {
+	for i := range dst {
+		dst[i] = src[i]
+	}
+}
+
+// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
+//
+// Preconditions: b in {0,1}.
+func feCSwap(f, g *fieldElement, b int32) {
+	var x fieldElement
+	b = -b
+	for i := range x {
+		x[i] = b & (f[i] ^ g[i])
+	}
+
+	for i := range f {
+		f[i] ^= x[i]
+	}
+	for i := range g {
+		g[i] ^= x[i]
+	}
+}
+
+// load3 reads a 24-bit, little-endian value from in.
+func load3(in []byte) int64 {
+	var r int64
+	r = int64(in[0])
+	r |= int64(in[1]) << 8
+	r |= int64(in[2]) << 16
+	return r
+}
+
+// load4 reads a 32-bit, little-endian value from in.
+func load4(in []byte) int64 {
+	var r int64
+	r = int64(in[0])
+	r |= int64(in[1]) << 8
+	r |= int64(in[2]) << 16
+	r |= int64(in[3]) << 24
+	return r
+}
+
+func feFromBytes(dst *fieldElement, src *[32]byte) {
+	h0 := load4(src[:])
+	h1 := load3(src[4:]) << 6
+	h2 := load3(src[7:]) << 5
+	h3 := load3(src[10:]) << 3
+	h4 := load3(src[13:]) << 2
+	h5 := load4(src[16:])
+	h6 := load3(src[20:]) << 7
+	h7 := load3(src[23:]) << 5
+	h8 := load3(src[26:]) << 4
+	h9 := load3(src[29:]) << 2
+
+	var carry [10]int64
+	carry[9] = (h9 + 1<<24) >> 25
+	h0 += carry[9] * 19
+	h9 -= carry[9] << 25
+	carry[1] = (h1 + 1<<24) >> 25
+	h2 += carry[1]
+	h1 -= carry[1] << 25
+	carry[3] = (h3 + 1<<24) >> 25
+	h4 += carry[3]
+	h3 -= carry[3] << 25
+	carry[5] = (h5 + 1<<24) >> 25
+	h6 += carry[5]
+	h5 -= carry[5] << 25
+	carry[7] = (h7 + 1<<24) >> 25
+	h8 += carry[7]
+	h7 -= carry[7] << 25
+
+	carry[0] = (h0 + 1<<25) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+	carry[2] = (h2 + 1<<25) >> 26
+	h3 += carry[2]
+	h2 -= carry[2] << 26
+	carry[4] = (h4 + 1<<25) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+	carry[6] = (h6 + 1<<25) >> 26
+	h7 += carry[6]
+	h6 -= carry[6] << 26
+	carry[8] = (h8 + 1<<25) >> 26
+	h9 += carry[8]
+	h8 -= carry[8] << 26
+
+	dst[0] = int32(h0)
+	dst[1] = int32(h1)
+	dst[2] = int32(h2)
+	dst[3] = int32(h3)
+	dst[4] = int32(h4)
+	dst[5] = int32(h5)
+	dst[6] = int32(h6)
+	dst[7] = int32(h7)
+	dst[8] = int32(h8)
+	dst[9] = int32(h9)
+}
+
+// feToBytes marshals h to s.
+// Preconditions:
+//   |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Write p=2^255-19; q=floor(h/p).
+// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
+//
+// Proof:
+//   Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
+//   Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
+//
+//   Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
+//   Then 0<y<1.
+//
+//   Write r=h-pq.
+//   Have 0<=r<=p-1=2^255-20.
+//   Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
+//
+//   Write x=r+19(2^-255)r+y.
+//   Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
+//
+//   Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
+//   so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
+func feToBytes(s *[32]byte, h *fieldElement) {
+	var carry [10]int32
+
+	q := (19*h[9] + (1 << 24)) >> 25
+	q = (h[0] + q) >> 26
+	q = (h[1] + q) >> 25
+	q = (h[2] + q) >> 26
+	q = (h[3] + q) >> 25
+	q = (h[4] + q) >> 26
+	q = (h[5] + q) >> 25
+	q = (h[6] + q) >> 26
+	q = (h[7] + q) >> 25
+	q = (h[8] + q) >> 26
+	q = (h[9] + q) >> 25
+
+	// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
+	h[0] += 19 * q
+	// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
+
+	carry[0] = h[0] >> 26
+	h[1] += carry[0]
+	h[0] -= carry[0] << 26
+	carry[1] = h[1] >> 25
+	h[2] += carry[1]
+	h[1] -= carry[1] << 25
+	carry[2] = h[2] >> 26
+	h[3] += carry[2]
+	h[2] -= carry[2] << 26
+	carry[3] = h[3] >> 25
+	h[4] += carry[3]
+	h[3] -= carry[3] << 25
+	carry[4] = h[4] >> 26
+	h[5] += carry[4]
+	h[4] -= carry[4] << 26
+	carry[5] = h[5] >> 25
+	h[6] += carry[5]
+	h[5] -= carry[5] << 25
+	carry[6] = h[6] >> 26
+	h[7] += carry[6]
+	h[6] -= carry[6] << 26
+	carry[7] = h[7] >> 25
+	h[8] += carry[7]
+	h[7] -= carry[7] << 25
+	carry[8] = h[8] >> 26
+	h[9] += carry[8]
+	h[8] -= carry[8] << 26
+	carry[9] = h[9] >> 25
+	h[9] -= carry[9] << 25
+	// h10 = carry9
+
+	// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
+	// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
+	// evidently 2^255 h10-2^255 q = 0.
+	// Goal: Output h[0]+...+2^230 h[9].
+
+	s[0] = byte(h[0] >> 0)
+	s[1] = byte(h[0] >> 8)
+	s[2] = byte(h[0] >> 16)
+	s[3] = byte((h[0] >> 24) | (h[1] << 2))
+	s[4] = byte(h[1] >> 6)
+	s[5] = byte(h[1] >> 14)
+	s[6] = byte((h[1] >> 22) | (h[2] << 3))
+	s[7] = byte(h[2] >> 5)
+	s[8] = byte(h[2] >> 13)
+	s[9] = byte((h[2] >> 21) | (h[3] << 5))
+	s[10] = byte(h[3] >> 3)
+	s[11] = byte(h[3] >> 11)
+	s[12] = byte((h[3] >> 19) | (h[4] << 6))
+	s[13] = byte(h[4] >> 2)
+	s[14] = byte(h[4] >> 10)
+	s[15] = byte(h[4] >> 18)
+	s[16] = byte(h[5] >> 0)
+	s[17] = byte(h[5] >> 8)
+	s[18] = byte(h[5] >> 16)
+	s[19] = byte((h[5] >> 24) | (h[6] << 1))
+	s[20] = byte(h[6] >> 7)
+	s[21] = byte(h[6] >> 15)
+	s[22] = byte((h[6] >> 23) | (h[7] << 3))
+	s[23] = byte(h[7] >> 5)
+	s[24] = byte(h[7] >> 13)
+	s[25] = byte((h[7] >> 21) | (h[8] << 4))
+	s[26] = byte(h[8] >> 4)
+	s[27] = byte(h[8] >> 12)
+	s[28] = byte((h[8] >> 20) | (h[9] << 6))
+	s[29] = byte(h[9] >> 2)
+	s[30] = byte(h[9] >> 10)
+	s[31] = byte(h[9] >> 18)
+}
+
+// feMul calculates h = f * g
+// Can overlap h with f or g.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//    |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Notes on implementation strategy:
+//
+// Using schoolbook multiplication.
+// Karatsuba would save a little in some cost models.
+//
+// Most multiplications by 2 and 19 are 32-bit precomputations;
+// cheaper than 64-bit postcomputations.
+//
+// There is one remaining multiplication by 19 in the carry chain;
+// one *19 precomputation can be merged into this,
+// but the resulting data flow is considerably less clean.
+//
+// There are 12 carries below.
+// 10 of them are 2-way parallelizable and vectorizable.
+// Can get away with 11 carries, but then data flow is much deeper.
+//
+// With tighter constraints on inputs can squeeze carries into int32.
+func feMul(h, f, g *fieldElement) {
+	f0 := f[0]
+	f1 := f[1]
+	f2 := f[2]
+	f3 := f[3]
+	f4 := f[4]
+	f5 := f[5]
+	f6 := f[6]
+	f7 := f[7]
+	f8 := f[8]
+	f9 := f[9]
+	g0 := g[0]
+	g1 := g[1]
+	g2 := g[2]
+	g3 := g[3]
+	g4 := g[4]
+	g5 := g[5]
+	g6 := g[6]
+	g7 := g[7]
+	g8 := g[8]
+	g9 := g[9]
+	g1_19 := 19 * g1 // 1.4*2^29
+	g2_19 := 19 * g2 // 1.4*2^30; still ok
+	g3_19 := 19 * g3
+	g4_19 := 19 * g4
+	g5_19 := 19 * g5
+	g6_19 := 19 * g6
+	g7_19 := 19 * g7
+	g8_19 := 19 * g8
+	g9_19 := 19 * g9
+	f1_2 := 2 * f1
+	f3_2 := 2 * f3
+	f5_2 := 2 * f5
+	f7_2 := 2 * f7
+	f9_2 := 2 * f9
+	f0g0 := int64(f0) * int64(g0)
+	f0g1 := int64(f0) * int64(g1)
+	f0g2 := int64(f0) * int64(g2)
+	f0g3 := int64(f0) * int64(g3)
+	f0g4 := int64(f0) * int64(g4)
+	f0g5 := int64(f0) * int64(g5)
+	f0g6 := int64(f0) * int64(g6)
+	f0g7 := int64(f0) * int64(g7)
+	f0g8 := int64(f0) * int64(g8)
+	f0g9 := int64(f0) * int64(g9)
+	f1g0 := int64(f1) * int64(g0)
+	f1g1_2 := int64(f1_2) * int64(g1)
+	f1g2 := int64(f1) * int64(g2)
+	f1g3_2 := int64(f1_2) * int64(g3)
+	f1g4 := int64(f1) * int64(g4)
+	f1g5_2 := int64(f1_2) * int64(g5)
+	f1g6 := int64(f1) * int64(g6)
+	f1g7_2 := int64(f1_2) * int64(g7)
+	f1g8 := int64(f1) * int64(g8)
+	f1g9_38 := int64(f1_2) * int64(g9_19)
+	f2g0 := int64(f2) * int64(g0)
+	f2g1 := int64(f2) * int64(g1)
+	f2g2 := int64(f2) * int64(g2)
+	f2g3 := int64(f2) * int64(g3)
+	f2g4 := int64(f2) * int64(g4)
+	f2g5 := int64(f2) * int64(g5)
+	f2g6 := int64(f2) * int64(g6)
+	f2g7 := int64(f2) * int64(g7)
+	f2g8_19 := int64(f2) * int64(g8_19)
+	f2g9_19 := int64(f2) * int64(g9_19)
+	f3g0 := int64(f3) * int64(g0)
+	f3g1_2 := int64(f3_2) * int64(g1)
+	f3g2 := int64(f3) * int64(g2)
+	f3g3_2 := int64(f3_2) * int64(g3)
+	f3g4 := int64(f3) * int64(g4)
+	f3g5_2 := int64(f3_2) * int64(g5)
+	f3g6 := int64(f3) * int64(g6)
+	f3g7_38 := int64(f3_2) * int64(g7_19)
+	f3g8_19 := int64(f3) * int64(g8_19)
+	f3g9_38 := int64(f3_2) * int64(g9_19)
+	f4g0 := int64(f4) * int64(g0)
+	f4g1 := int64(f4) * int64(g1)
+	f4g2 := int64(f4) * int64(g2)
+	f4g3 := int64(f4) * int64(g3)
+	f4g4 := int64(f4) * int64(g4)
+	f4g5 := int64(f4) * int64(g5)
+	f4g6_19 := int64(f4) * int64(g6_19)
+	f4g7_19 := int64(f4) * int64(g7_19)
+	f4g8_19 := int64(f4) * int64(g8_19)
+	f4g9_19 := int64(f4) * int64(g9_19)
+	f5g0 := int64(f5) * int64(g0)
+	f5g1_2 := int64(f5_2) * int64(g1)
+	f5g2 := int64(f5) * int64(g2)
+	f5g3_2 := int64(f5_2) * int64(g3)
+	f5g4 := int64(f5) * int64(g4)
+	f5g5_38 := int64(f5_2) * int64(g5_19)
+	f5g6_19 := int64(f5) * int64(g6_19)
+	f5g7_38 := int64(f5_2) * int64(g7_19)
+	f5g8_19 := int64(f5) * int64(g8_19)
+	f5g9_38 := int64(f5_2) * int64(g9_19)
+	f6g0 := int64(f6) * int64(g0)
+	f6g1 := int64(f6) * int64(g1)
+	f6g2 := int64(f6) * int64(g2)
+	f6g3 := int64(f6) * int64(g3)
+	f6g4_19 := int64(f6) * int64(g4_19)
+	f6g5_19 := int64(f6) * int64(g5_19)
+	f6g6_19 := int64(f6) * int64(g6_19)
+	f6g7_19 := int64(f6) * int64(g7_19)
+	f6g8_19 := int64(f6) * int64(g8_19)
+	f6g9_19 := int64(f6) * int64(g9_19)
+	f7g0 := int64(f7) * int64(g0)
+	f7g1_2 := int64(f7_2) * int64(g1)
+	f7g2 := int64(f7) * int64(g2)
+	f7g3_38 := int64(f7_2) * int64(g3_19)
+	f7g4_19 := int64(f7) * int64(g4_19)
+	f7g5_38 := int64(f7_2) * int64(g5_19)
+	f7g6_19 := int64(f7) * int64(g6_19)
+	f7g7_38 := int64(f7_2) * int64(g7_19)
+	f7g8_19 := int64(f7) * int64(g8_19)
+	f7g9_38 := int64(f7_2) * int64(g9_19)
+	f8g0 := int64(f8) * int64(g0)
+	f8g1 := int64(f8) * int64(g1)
+	f8g2_19 := int64(f8) * int64(g2_19)
+	f8g3_19 := int64(f8) * int64(g3_19)
+	f8g4_19 := int64(f8) * int64(g4_19)
+	f8g5_19 := int64(f8) * int64(g5_19)
+	f8g6_19 := int64(f8) * int64(g6_19)
+	f8g7_19 := int64(f8) * int64(g7_19)
+	f8g8_19 := int64(f8) * int64(g8_19)
+	f8g9_19 := int64(f8) * int64(g9_19)
+	f9g0 := int64(f9) * int64(g0)
+	f9g1_38 := int64(f9_2) * int64(g1_19)
+	f9g2_19 := int64(f9) * int64(g2_19)
+	f9g3_38 := int64(f9_2) * int64(g3_19)
+	f9g4_19 := int64(f9) * int64(g4_19)
+	f9g5_38 := int64(f9_2) * int64(g5_19)
+	f9g6_19 := int64(f9) * int64(g6_19)
+	f9g7_38 := int64(f9_2) * int64(g7_19)
+	f9g8_19 := int64(f9) * int64(g8_19)
+	f9g9_38 := int64(f9_2) * int64(g9_19)
+	h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
+	h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
+	h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
+	h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
+	h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
+	h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
+	h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
+	h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
+	h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
+	h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
+	var carry [10]int64
+
+	// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
+	//   i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
+	// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
+	//   i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
+
+	carry[0] = (h0 + (1 << 25)) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+	carry[4] = (h4 + (1 << 25)) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+	// |h0| <= 2^25
+	// |h4| <= 2^25
+	// |h1| <= 1.51*2^58
+	// |h5| <= 1.51*2^58
+
+	carry[1] = (h1 + (1 << 24)) >> 25
+	h2 += carry[1]
+	h1 -= carry[1] << 25
+	carry[5] = (h5 + (1 << 24)) >> 25
+	h6 += carry[5]
+	h5 -= carry[5] << 25
+	// |h1| <= 2^24; from now on fits into int32
+	// |h5| <= 2^24; from now on fits into int32
+	// |h2| <= 1.21*2^59
+	// |h6| <= 1.21*2^59
+
+	carry[2] = (h2 + (1 << 25)) >> 26
+	h3 += carry[2]
+	h2 -= carry[2] << 26
+	carry[6] = (h6 + (1 << 25)) >> 26
+	h7 += carry[6]
+	h6 -= carry[6] << 26
+	// |h2| <= 2^25; from now on fits into int32 unchanged
+	// |h6| <= 2^25; from now on fits into int32 unchanged
+	// |h3| <= 1.51*2^58
+	// |h7| <= 1.51*2^58
+
+	carry[3] = (h3 + (1 << 24)) >> 25
+	h4 += carry[3]
+	h3 -= carry[3] << 25
+	carry[7] = (h7 + (1 << 24)) >> 25
+	h8 += carry[7]
+	h7 -= carry[7] << 25
+	// |h3| <= 2^24; from now on fits into int32 unchanged
+	// |h7| <= 2^24; from now on fits into int32 unchanged
+	// |h4| <= 1.52*2^33
+	// |h8| <= 1.52*2^33
+
+	carry[4] = (h4 + (1 << 25)) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+	carry[8] = (h8 + (1 << 25)) >> 26
+	h9 += carry[8]
+	h8 -= carry[8] << 26
+	// |h4| <= 2^25; from now on fits into int32 unchanged
+	// |h8| <= 2^25; from now on fits into int32 unchanged
+	// |h5| <= 1.01*2^24
+	// |h9| <= 1.51*2^58
+
+	carry[9] = (h9 + (1 << 24)) >> 25
+	h0 += carry[9] * 19
+	h9 -= carry[9] << 25
+	// |h9| <= 2^24; from now on fits into int32 unchanged
+	// |h0| <= 1.8*2^37
+
+	carry[0] = (h0 + (1 << 25)) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+	// |h0| <= 2^25; from now on fits into int32 unchanged
+	// |h1| <= 1.01*2^24
+
+	h[0] = int32(h0)
+	h[1] = int32(h1)
+	h[2] = int32(h2)
+	h[3] = int32(h3)
+	h[4] = int32(h4)
+	h[5] = int32(h5)
+	h[6] = int32(h6)
+	h[7] = int32(h7)
+	h[8] = int32(h8)
+	h[9] = int32(h9)
+}
+
+// feSquare calculates h = f*f. Can overlap h with f.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func feSquare(h, f *fieldElement) {
+	f0 := f[0]
+	f1 := f[1]
+	f2 := f[2]
+	f3 := f[3]
+	f4 := f[4]
+	f5 := f[5]
+	f6 := f[6]
+	f7 := f[7]
+	f8 := f[8]
+	f9 := f[9]
+	f0_2 := 2 * f0
+	f1_2 := 2 * f1
+	f2_2 := 2 * f2
+	f3_2 := 2 * f3
+	f4_2 := 2 * f4
+	f5_2 := 2 * f5
+	f6_2 := 2 * f6
+	f7_2 := 2 * f7
+	f5_38 := 38 * f5 // 1.31*2^30
+	f6_19 := 19 * f6 // 1.31*2^30
+	f7_38 := 38 * f7 // 1.31*2^30
+	f8_19 := 19 * f8 // 1.31*2^30
+	f9_38 := 38 * f9 // 1.31*2^30
+	f0f0 := int64(f0) * int64(f0)
+	f0f1_2 := int64(f0_2) * int64(f1)
+	f0f2_2 := int64(f0_2) * int64(f2)
+	f0f3_2 := int64(f0_2) * int64(f3)
+	f0f4_2 := int64(f0_2) * int64(f4)
+	f0f5_2 := int64(f0_2) * int64(f5)
+	f0f6_2 := int64(f0_2) * int64(f6)
+	f0f7_2 := int64(f0_2) * int64(f7)
+	f0f8_2 := int64(f0_2) * int64(f8)
+	f0f9_2 := int64(f0_2) * int64(f9)
+	f1f1_2 := int64(f1_2) * int64(f1)
+	f1f2_2 := int64(f1_2) * int64(f2)
+	f1f3_4 := int64(f1_2) * int64(f3_2)
+	f1f4_2 := int64(f1_2) * int64(f4)
+	f1f5_4 := int64(f1_2) * int64(f5_2)
+	f1f6_2 := int64(f1_2) * int64(f6)
+	f1f7_4 := int64(f1_2) * int64(f7_2)
+	f1f8_2 := int64(f1_2) * int64(f8)
+	f1f9_76 := int64(f1_2) * int64(f9_38)
+	f2f2 := int64(f2) * int64(f2)
+	f2f3_2 := int64(f2_2) * int64(f3)
+	f2f4_2 := int64(f2_2) * int64(f4)
+	f2f5_2 := int64(f2_2) * int64(f5)
+	f2f6_2 := int64(f2_2) * int64(f6)
+	f2f7_2 := int64(f2_2) * int64(f7)
+	f2f8_38 := int64(f2_2) * int64(f8_19)
+	f2f9_38 := int64(f2) * int64(f9_38)
+	f3f3_2 := int64(f3_2) * int64(f3)
+	f3f4_2 := int64(f3_2) * int64(f4)
+	f3f5_4 := int64(f3_2) * int64(f5_2)
+	f3f6_2 := int64(f3_2) * int64(f6)
+	f3f7_76 := int64(f3_2) * int64(f7_38)
+	f3f8_38 := int64(f3_2) * int64(f8_19)
+	f3f9_76 := int64(f3_2) * int64(f9_38)
+	f4f4 := int64(f4) * int64(f4)
+	f4f5_2 := int64(f4_2) * int64(f5)
+	f4f6_38 := int64(f4_2) * int64(f6_19)
+	f4f7_38 := int64(f4) * int64(f7_38)
+	f4f8_38 := int64(f4_2) * int64(f8_19)
+	f4f9_38 := int64(f4) * int64(f9_38)
+	f5f5_38 := int64(f5) * int64(f5_38)
+	f5f6_38 := int64(f5_2) * int64(f6_19)
+	f5f7_76 := int64(f5_2) * int64(f7_38)
+	f5f8_38 := int64(f5_2) * int64(f8_19)
+	f5f9_76 := int64(f5_2) * int64(f9_38)
+	f6f6_19 := int64(f6) * int64(f6_19)
+	f6f7_38 := int64(f6) * int64(f7_38)
+	f6f8_38 := int64(f6_2) * int64(f8_19)
+	f6f9_38 := int64(f6) * int64(f9_38)
+	f7f7_38 := int64(f7) * int64(f7_38)
+	f7f8_38 := int64(f7_2) * int64(f8_19)
+	f7f9_76 := int64(f7_2) * int64(f9_38)
+	f8f8_19 := int64(f8) * int64(f8_19)
+	f8f9_38 := int64(f8) * int64(f9_38)
+	f9f9_38 := int64(f9) * int64(f9_38)
+	h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
+	h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
+	h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
+	h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
+	h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
+	h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
+	h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
+	h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
+	h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
+	h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
+	var carry [10]int64
+
+	carry[0] = (h0 + (1 << 25)) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+	carry[4] = (h4 + (1 << 25)) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+
+	carry[1] = (h1 + (1 << 24)) >> 25
+	h2 += carry[1]
+	h1 -= carry[1] << 25
+	carry[5] = (h5 + (1 << 24)) >> 25
+	h6 += carry[5]
+	h5 -= carry[5] << 25
+
+	carry[2] = (h2 + (1 << 25)) >> 26
+	h3 += carry[2]
+	h2 -= carry[2] << 26
+	carry[6] = (h6 + (1 << 25)) >> 26
+	h7 += carry[6]
+	h6 -= carry[6] << 26
+
+	carry[3] = (h3 + (1 << 24)) >> 25
+	h4 += carry[3]
+	h3 -= carry[3] << 25
+	carry[7] = (h7 + (1 << 24)) >> 25
+	h8 += carry[7]
+	h7 -= carry[7] << 25
+
+	carry[4] = (h4 + (1 << 25)) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+	carry[8] = (h8 + (1 << 25)) >> 26
+	h9 += carry[8]
+	h8 -= carry[8] << 26
+
+	carry[9] = (h9 + (1 << 24)) >> 25
+	h0 += carry[9] * 19
+	h9 -= carry[9] << 25
+
+	carry[0] = (h0 + (1 << 25)) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+
+	h[0] = int32(h0)
+	h[1] = int32(h1)
+	h[2] = int32(h2)
+	h[3] = int32(h3)
+	h[4] = int32(h4)
+	h[5] = int32(h5)
+	h[6] = int32(h6)
+	h[7] = int32(h7)
+	h[8] = int32(h8)
+	h[9] = int32(h9)
+}
+
+// feMul121666 calculates h = f * 121666. Can overlap h with f.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func feMul121666(h, f *fieldElement) {
+	h0 := int64(f[0]) * 121666
+	h1 := int64(f[1]) * 121666
+	h2 := int64(f[2]) * 121666
+	h3 := int64(f[3]) * 121666
+	h4 := int64(f[4]) * 121666
+	h5 := int64(f[5]) * 121666
+	h6 := int64(f[6]) * 121666
+	h7 := int64(f[7]) * 121666
+	h8 := int64(f[8]) * 121666
+	h9 := int64(f[9]) * 121666
+	var carry [10]int64
+
+	carry[9] = (h9 + (1 << 24)) >> 25
+	h0 += carry[9] * 19
+	h9 -= carry[9] << 25
+	carry[1] = (h1 + (1 << 24)) >> 25
+	h2 += carry[1]
+	h1 -= carry[1] << 25
+	carry[3] = (h3 + (1 << 24)) >> 25
+	h4 += carry[3]
+	h3 -= carry[3] << 25
+	carry[5] = (h5 + (1 << 24)) >> 25
+	h6 += carry[5]
+	h5 -= carry[5] << 25
+	carry[7] = (h7 + (1 << 24)) >> 25
+	h8 += carry[7]
+	h7 -= carry[7] << 25
+
+	carry[0] = (h0 + (1 << 25)) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+	carry[2] = (h2 + (1 << 25)) >> 26
+	h3 += carry[2]
+	h2 -= carry[2] << 26
+	carry[4] = (h4 + (1 << 25)) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+	carry[6] = (h6 + (1 << 25)) >> 26
+	h7 += carry[6]
+	h6 -= carry[6] << 26
+	carry[8] = (h8 + (1 << 25)) >> 26
+	h9 += carry[8]
+	h8 -= carry[8] << 26
+
+	h[0] = int32(h0)
+	h[1] = int32(h1)
+	h[2] = int32(h2)
+	h[3] = int32(h3)
+	h[4] = int32(h4)
+	h[5] = int32(h5)
+	h[6] = int32(h6)
+	h[7] = int32(h7)
+	h[8] = int32(h8)
+	h[9] = int32(h9)
+}
+
+// feInvert sets out = z^-1.
+func feInvert(out, z *fieldElement) {
+	var t0, t1, t2, t3 fieldElement
+	var i int
+
+	feSquare(&t0, z)
+	for i = 1; i < 1; i++ {
+		feSquare(&t0, &t0)
+	}
+	feSquare(&t1, &t0)
+	for i = 1; i < 2; i++ {
+		feSquare(&t1, &t1)
+	}
+	feMul(&t1, z, &t1)
+	feMul(&t0, &t0, &t1)
+	feSquare(&t2, &t0)
+	for i = 1; i < 1; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t1, &t1, &t2)
+	feSquare(&t2, &t1)
+	for i = 1; i < 5; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t1, &t2, &t1)
+	feSquare(&t2, &t1)
+	for i = 1; i < 10; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t2, &t2, &t1)
+	feSquare(&t3, &t2)
+	for i = 1; i < 20; i++ {
+		feSquare(&t3, &t3)
+	}
+	feMul(&t2, &t3, &t2)
+	feSquare(&t2, &t2)
+	for i = 1; i < 10; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t1, &t2, &t1)
+	feSquare(&t2, &t1)
+	for i = 1; i < 50; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t2, &t2, &t1)
+	feSquare(&t3, &t2)
+	for i = 1; i < 100; i++ {
+		feSquare(&t3, &t3)
+	}
+	feMul(&t2, &t3, &t2)
+	feSquare(&t2, &t2)
+	for i = 1; i < 50; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t1, &t2, &t1)
+	feSquare(&t1, &t1)
+	for i = 1; i < 5; i++ {
+		feSquare(&t1, &t1)
+	}
+	feMul(out, &t1, &t0)
+}
+
+func scalarMult(out, in, base *[32]byte) {
+	var e [32]byte
+
+	copy(e[:], in[:])
+	e[0] &= 248
+	e[31] &= 127
+	e[31] |= 64
+
+	var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
+	feFromBytes(&x1, base)
+	feOne(&x2)
+	feCopy(&x3, &x1)
+	feOne(&z3)
+
+	swap := int32(0)
+	for pos := 254; pos >= 0; pos-- {
+		b := e[pos/8] >> uint(pos&7)
+		b &= 1
+		swap ^= int32(b)
+		feCSwap(&x2, &x3, swap)
+		feCSwap(&z2, &z3, swap)
+		swap = int32(b)
+
+		feSub(&tmp0, &x3, &z3)
+		feSub(&tmp1, &x2, &z2)
+		feAdd(&x2, &x2, &z2)
+		feAdd(&z2, &x3, &z3)
+		feMul(&z3, &tmp0, &x2)
+		feMul(&z2, &z2, &tmp1)
+		feSquare(&tmp0, &tmp1)
+		feSquare(&tmp1, &x2)
+		feAdd(&x3, &z3, &z2)
+		feSub(&z2, &z3, &z2)
+		feMul(&x2, &tmp1, &tmp0)
+		feSub(&tmp1, &tmp1, &tmp0)
+		feSquare(&z2, &z2)
+		feMul121666(&z3, &tmp1)
+		feSquare(&x3, &x3)
+		feAdd(&tmp0, &tmp0, &z3)
+		feMul(&z3, &x1, &z2)
+		feMul(&z2, &tmp1, &tmp0)
+	}
+
+	feCSwap(&x2, &x3, swap)
+	feCSwap(&z2, &z3, swap)
+
+	feInvert(&z2, &z2)
+	feMul(&x2, &x2, &z2)
+	feToBytes(out, &x2)
+}

+ 29 - 0
psiphon/common/tls/crypto/curve25519/curve25519_test.go

@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package curve25519
+
+import (
+	"fmt"
+	"testing"
+)
+
+const expectedHex = "89161fde887b2b53de549af483940106ecc114d6982daa98256de23bdf77661a"
+
+func TestBaseScalarMult(t *testing.T) {
+	var a, b [32]byte
+	in := &a
+	out := &b
+	a[0] = 1
+
+	for i := 0; i < 200; i++ {
+		ScalarBaseMult(out, in)
+		in, out = out, in
+	}
+
+	result := fmt.Sprintf("%x", in[:])
+	if result != expectedHex {
+		t.Errorf("incorrect result: got %s, want %s", result, expectedHex)
+	}
+}

+ 23 - 0
psiphon/common/tls/crypto/curve25519/doc.go

@@ -0,0 +1,23 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package curve25519 provides an implementation of scalar multiplication on
+// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html
+package curve25519 // import "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tls/crypto/curve25519"
+
+// basePoint is the x coordinate of the generator of the curve.
+var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+// ScalarMult sets dst to the product in*base where dst and base are the x
+// coordinates of group points and all values are in little-endian form.
+func ScalarMult(dst, in, base *[32]byte) {
+	scalarMult(dst, in, base)
+}
+
+// ScalarBaseMult sets dst to the product in*base where dst and base are the x
+// coordinates of group points, base is the standard generator and all values
+// are in little-endian form.
+func ScalarBaseMult(dst, in *[32]byte) {
+	ScalarMult(dst, in, &basePoint)
+}

+ 73 - 0
psiphon/common/tls/crypto/curve25519/freeze_amd64.s

@@ -0,0 +1,73 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func freeze(inout *[5]uint64)
+TEXT ·freeze(SB),7,$0-8
+	MOVQ inout+0(FP), DI
+
+	MOVQ 0(DI),SI
+	MOVQ 8(DI),DX
+	MOVQ 16(DI),CX
+	MOVQ 24(DI),R8
+	MOVQ 32(DI),R9
+	MOVQ $REDMASK51,AX
+	MOVQ AX,R10
+	SUBQ $18,R10
+	MOVQ $3,R11
+REDUCELOOP:
+	MOVQ SI,R12
+	SHRQ $51,R12
+	ANDQ AX,SI
+	ADDQ R12,DX
+	MOVQ DX,R12
+	SHRQ $51,R12
+	ANDQ AX,DX
+	ADDQ R12,CX
+	MOVQ CX,R12
+	SHRQ $51,R12
+	ANDQ AX,CX
+	ADDQ R12,R8
+	MOVQ R8,R12
+	SHRQ $51,R12
+	ANDQ AX,R8
+	ADDQ R12,R9
+	MOVQ R9,R12
+	SHRQ $51,R12
+	ANDQ AX,R9
+	IMUL3Q $19,R12,R12
+	ADDQ R12,SI
+	SUBQ $1,R11
+	JA REDUCELOOP
+	MOVQ $1,R12
+	CMPQ R10,SI
+	CMOVQLT R11,R12
+	CMPQ AX,DX
+	CMOVQNE R11,R12
+	CMPQ AX,CX
+	CMOVQNE R11,R12
+	CMPQ AX,R8
+	CMOVQNE R11,R12
+	CMPQ AX,R9
+	CMOVQNE R11,R12
+	NEGQ R12
+	ANDQ R12,AX
+	ANDQ R12,R10
+	SUBQ R10,SI
+	SUBQ AX,DX
+	SUBQ AX,CX
+	SUBQ AX,R8
+	SUBQ AX,R9
+	MOVQ SI,0(DI)
+	MOVQ DX,8(DI)
+	MOVQ CX,16(DI)
+	MOVQ R8,24(DI)
+	MOVQ R9,32(DI)
+	RET

+ 1377 - 0
psiphon/common/tls/crypto/curve25519/ladderstep_amd64.s

@@ -0,0 +1,1377 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func ladderstep(inout *[5][5]uint64)
+TEXT ·ladderstep(SB),0,$296-8
+	MOVQ inout+0(FP),DI
+
+	MOVQ 40(DI),SI
+	MOVQ 48(DI),DX
+	MOVQ 56(DI),CX
+	MOVQ 64(DI),R8
+	MOVQ 72(DI),R9
+	MOVQ SI,AX
+	MOVQ DX,R10
+	MOVQ CX,R11
+	MOVQ R8,R12
+	MOVQ R9,R13
+	ADDQ ·_2P0(SB),AX
+	ADDQ ·_2P1234(SB),R10
+	ADDQ ·_2P1234(SB),R11
+	ADDQ ·_2P1234(SB),R12
+	ADDQ ·_2P1234(SB),R13
+	ADDQ 80(DI),SI
+	ADDQ 88(DI),DX
+	ADDQ 96(DI),CX
+	ADDQ 104(DI),R8
+	ADDQ 112(DI),R9
+	SUBQ 80(DI),AX
+	SUBQ 88(DI),R10
+	SUBQ 96(DI),R11
+	SUBQ 104(DI),R12
+	SUBQ 112(DI),R13
+	MOVQ SI,0(SP)
+	MOVQ DX,8(SP)
+	MOVQ CX,16(SP)
+	MOVQ R8,24(SP)
+	MOVQ R9,32(SP)
+	MOVQ AX,40(SP)
+	MOVQ R10,48(SP)
+	MOVQ R11,56(SP)
+	MOVQ R12,64(SP)
+	MOVQ R13,72(SP)
+	MOVQ 40(SP),AX
+	MULQ 40(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 40(SP),AX
+	SHLQ $1,AX
+	MULQ 48(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 40(SP),AX
+	SHLQ $1,AX
+	MULQ 56(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 40(SP),AX
+	SHLQ $1,AX
+	MULQ 64(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 40(SP),AX
+	SHLQ $1,AX
+	MULQ 72(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 48(SP),AX
+	MULQ 48(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 48(SP),AX
+	SHLQ $1,AX
+	MULQ 56(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 48(SP),AX
+	SHLQ $1,AX
+	MULQ 64(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 48(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 56(SP),AX
+	MULQ 56(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 56(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 64(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 56(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 64(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 64(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 64(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 72(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	ANDQ DX,SI
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ADDQ R10,CX
+	ANDQ DX,R8
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ADDQ R12,CX
+	ANDQ DX,R9
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ADDQ R14,CX
+	ANDQ DX,AX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,80(SP)
+	MOVQ R8,88(SP)
+	MOVQ R9,96(SP)
+	MOVQ AX,104(SP)
+	MOVQ R10,112(SP)
+	MOVQ 0(SP),AX
+	MULQ 0(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 8(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 16(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 24(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 32(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 8(SP),AX
+	MULQ 8(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	SHLQ $1,AX
+	MULQ 16(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 8(SP),AX
+	SHLQ $1,AX
+	MULQ 24(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 16(SP),AX
+	MULQ 16(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 16(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 24(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 16(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 24(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 24(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 32(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	ANDQ DX,SI
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ADDQ R10,CX
+	ANDQ DX,R8
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ADDQ R12,CX
+	ANDQ DX,R9
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ADDQ R14,CX
+	ANDQ DX,AX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,120(SP)
+	MOVQ R8,128(SP)
+	MOVQ R9,136(SP)
+	MOVQ AX,144(SP)
+	MOVQ R10,152(SP)
+	MOVQ SI,SI
+	MOVQ R8,DX
+	MOVQ R9,CX
+	MOVQ AX,R8
+	MOVQ R10,R9
+	ADDQ ·_2P0(SB),SI
+	ADDQ ·_2P1234(SB),DX
+	ADDQ ·_2P1234(SB),CX
+	ADDQ ·_2P1234(SB),R8
+	ADDQ ·_2P1234(SB),R9
+	SUBQ 80(SP),SI
+	SUBQ 88(SP),DX
+	SUBQ 96(SP),CX
+	SUBQ 104(SP),R8
+	SUBQ 112(SP),R9
+	MOVQ SI,160(SP)
+	MOVQ DX,168(SP)
+	MOVQ CX,176(SP)
+	MOVQ R8,184(SP)
+	MOVQ R9,192(SP)
+	MOVQ 120(DI),SI
+	MOVQ 128(DI),DX
+	MOVQ 136(DI),CX
+	MOVQ 144(DI),R8
+	MOVQ 152(DI),R9
+	MOVQ SI,AX
+	MOVQ DX,R10
+	MOVQ CX,R11
+	MOVQ R8,R12
+	MOVQ R9,R13
+	ADDQ ·_2P0(SB),AX
+	ADDQ ·_2P1234(SB),R10
+	ADDQ ·_2P1234(SB),R11
+	ADDQ ·_2P1234(SB),R12
+	ADDQ ·_2P1234(SB),R13
+	ADDQ 160(DI),SI
+	ADDQ 168(DI),DX
+	ADDQ 176(DI),CX
+	ADDQ 184(DI),R8
+	ADDQ 192(DI),R9
+	SUBQ 160(DI),AX
+	SUBQ 168(DI),R10
+	SUBQ 176(DI),R11
+	SUBQ 184(DI),R12
+	SUBQ 192(DI),R13
+	MOVQ SI,200(SP)
+	MOVQ DX,208(SP)
+	MOVQ CX,216(SP)
+	MOVQ R8,224(SP)
+	MOVQ R9,232(SP)
+	MOVQ AX,240(SP)
+	MOVQ R10,248(SP)
+	MOVQ R11,256(SP)
+	MOVQ R12,264(SP)
+	MOVQ R13,272(SP)
+	MOVQ 224(SP),SI
+	IMUL3Q $19,SI,AX
+	MOVQ AX,280(SP)
+	MULQ 56(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 232(SP),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,288(SP)
+	MULQ 48(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 200(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 200(SP),AX
+	MULQ 48(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 200(SP),AX
+	MULQ 56(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 200(SP),AX
+	MULQ 64(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 200(SP),AX
+	MULQ 72(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 208(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 208(SP),AX
+	MULQ 48(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 208(SP),AX
+	MULQ 56(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 208(SP),AX
+	MULQ 64(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 208(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 216(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 216(SP),AX
+	MULQ 48(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 216(SP),AX
+	MULQ 56(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 216(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 64(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 216(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 224(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 224(SP),AX
+	MULQ 48(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 280(SP),AX
+	MULQ 64(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 280(SP),AX
+	MULQ 72(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 232(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 288(SP),AX
+	MULQ 56(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 288(SP),AX
+	MULQ 64(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 288(SP),AX
+	MULQ 72(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ANDQ DX,SI
+	ADDQ R10,CX
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ANDQ DX,R8
+	ADDQ R12,CX
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ANDQ DX,R9
+	ADDQ R14,CX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	ANDQ DX,AX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,40(SP)
+	MOVQ R8,48(SP)
+	MOVQ R9,56(SP)
+	MOVQ AX,64(SP)
+	MOVQ R10,72(SP)
+	MOVQ 264(SP),SI
+	IMUL3Q $19,SI,AX
+	MOVQ AX,200(SP)
+	MULQ 16(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 272(SP),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,208(SP)
+	MULQ 8(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 240(SP),AX
+	MULQ 0(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 240(SP),AX
+	MULQ 8(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 240(SP),AX
+	MULQ 16(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 240(SP),AX
+	MULQ 24(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 240(SP),AX
+	MULQ 32(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 248(SP),AX
+	MULQ 0(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 248(SP),AX
+	MULQ 8(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 248(SP),AX
+	MULQ 16(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 248(SP),AX
+	MULQ 24(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 248(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 256(SP),AX
+	MULQ 0(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 256(SP),AX
+	MULQ 8(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 256(SP),AX
+	MULQ 16(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 256(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 256(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 264(SP),AX
+	MULQ 0(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 264(SP),AX
+	MULQ 8(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 200(SP),AX
+	MULQ 24(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 200(SP),AX
+	MULQ 32(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 272(SP),AX
+	MULQ 0(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 208(SP),AX
+	MULQ 16(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 208(SP),AX
+	MULQ 24(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 208(SP),AX
+	MULQ 32(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ANDQ DX,SI
+	ADDQ R10,CX
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ANDQ DX,R8
+	ADDQ R12,CX
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ANDQ DX,R9
+	ADDQ R14,CX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	ANDQ DX,AX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,DX
+	MOVQ R8,CX
+	MOVQ R9,R11
+	MOVQ AX,R12
+	MOVQ R10,R13
+	ADDQ ·_2P0(SB),DX
+	ADDQ ·_2P1234(SB),CX
+	ADDQ ·_2P1234(SB),R11
+	ADDQ ·_2P1234(SB),R12
+	ADDQ ·_2P1234(SB),R13
+	ADDQ 40(SP),SI
+	ADDQ 48(SP),R8
+	ADDQ 56(SP),R9
+	ADDQ 64(SP),AX
+	ADDQ 72(SP),R10
+	SUBQ 40(SP),DX
+	SUBQ 48(SP),CX
+	SUBQ 56(SP),R11
+	SUBQ 64(SP),R12
+	SUBQ 72(SP),R13
+	MOVQ SI,120(DI)
+	MOVQ R8,128(DI)
+	MOVQ R9,136(DI)
+	MOVQ AX,144(DI)
+	MOVQ R10,152(DI)
+	MOVQ DX,160(DI)
+	MOVQ CX,168(DI)
+	MOVQ R11,176(DI)
+	MOVQ R12,184(DI)
+	MOVQ R13,192(DI)
+	MOVQ 120(DI),AX
+	MULQ 120(DI)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 120(DI),AX
+	SHLQ $1,AX
+	MULQ 128(DI)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 120(DI),AX
+	SHLQ $1,AX
+	MULQ 136(DI)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 120(DI),AX
+	SHLQ $1,AX
+	MULQ 144(DI)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 120(DI),AX
+	SHLQ $1,AX
+	MULQ 152(DI)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 128(DI),AX
+	MULQ 128(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 128(DI),AX
+	SHLQ $1,AX
+	MULQ 136(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 128(DI),AX
+	SHLQ $1,AX
+	MULQ 144(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 128(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 152(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 136(DI),AX
+	MULQ 136(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 136(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 144(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 136(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 152(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 144(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 144(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 144(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 152(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 152(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 152(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	ANDQ DX,SI
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ADDQ R10,CX
+	ANDQ DX,R8
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ADDQ R12,CX
+	ANDQ DX,R9
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ADDQ R14,CX
+	ANDQ DX,AX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,120(DI)
+	MOVQ R8,128(DI)
+	MOVQ R9,136(DI)
+	MOVQ AX,144(DI)
+	MOVQ R10,152(DI)
+	MOVQ 160(DI),AX
+	MULQ 160(DI)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 160(DI),AX
+	SHLQ $1,AX
+	MULQ 168(DI)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 160(DI),AX
+	SHLQ $1,AX
+	MULQ 176(DI)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 160(DI),AX
+	SHLQ $1,AX
+	MULQ 184(DI)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 160(DI),AX
+	SHLQ $1,AX
+	MULQ 192(DI)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 168(DI),AX
+	MULQ 168(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 168(DI),AX
+	SHLQ $1,AX
+	MULQ 176(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 168(DI),AX
+	SHLQ $1,AX
+	MULQ 184(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 168(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 192(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 176(DI),AX
+	MULQ 176(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 176(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 184(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 176(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 192(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 184(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 184(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 184(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 192(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 192(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 192(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	ANDQ DX,SI
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ADDQ R10,CX
+	ANDQ DX,R8
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ADDQ R12,CX
+	ANDQ DX,R9
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ADDQ R14,CX
+	ANDQ DX,AX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,160(DI)
+	MOVQ R8,168(DI)
+	MOVQ R9,176(DI)
+	MOVQ AX,184(DI)
+	MOVQ R10,192(DI)
+	MOVQ 184(DI),SI
+	IMUL3Q $19,SI,AX
+	MOVQ AX,0(SP)
+	MULQ 16(DI)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 192(DI),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,8(SP)
+	MULQ 8(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 160(DI),AX
+	MULQ 0(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 160(DI),AX
+	MULQ 8(DI)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 160(DI),AX
+	MULQ 16(DI)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 160(DI),AX
+	MULQ 24(DI)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 160(DI),AX
+	MULQ 32(DI)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 168(DI),AX
+	MULQ 0(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 168(DI),AX
+	MULQ 8(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 168(DI),AX
+	MULQ 16(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 168(DI),AX
+	MULQ 24(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 168(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 176(DI),AX
+	MULQ 0(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 176(DI),AX
+	MULQ 8(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 176(DI),AX
+	MULQ 16(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 176(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 176(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 184(DI),AX
+	MULQ 0(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 184(DI),AX
+	MULQ 8(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 0(SP),AX
+	MULQ 24(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 0(SP),AX
+	MULQ 32(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 192(DI),AX
+	MULQ 0(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SP),AX
+	MULQ 16(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 8(SP),AX
+	MULQ 24(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	MULQ 32(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ANDQ DX,SI
+	ADDQ R10,CX
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ANDQ DX,R8
+	ADDQ R12,CX
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ANDQ DX,R9
+	ADDQ R14,CX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	ANDQ DX,AX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,160(DI)
+	MOVQ R8,168(DI)
+	MOVQ R9,176(DI)
+	MOVQ AX,184(DI)
+	MOVQ R10,192(DI)
+	MOVQ 144(SP),SI
+	IMUL3Q $19,SI,AX
+	MOVQ AX,0(SP)
+	MULQ 96(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 152(SP),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,8(SP)
+	MULQ 88(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 120(SP),AX
+	MULQ 80(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 120(SP),AX
+	MULQ 88(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 120(SP),AX
+	MULQ 96(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 120(SP),AX
+	MULQ 104(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 120(SP),AX
+	MULQ 112(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 128(SP),AX
+	MULQ 80(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 128(SP),AX
+	MULQ 88(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 128(SP),AX
+	MULQ 96(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 128(SP),AX
+	MULQ 104(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 128(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 112(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 136(SP),AX
+	MULQ 80(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 136(SP),AX
+	MULQ 88(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 136(SP),AX
+	MULQ 96(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 136(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 104(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 136(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 112(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 144(SP),AX
+	MULQ 80(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 144(SP),AX
+	MULQ 88(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 0(SP),AX
+	MULQ 104(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 0(SP),AX
+	MULQ 112(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 152(SP),AX
+	MULQ 80(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SP),AX
+	MULQ 96(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 8(SP),AX
+	MULQ 104(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	MULQ 112(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ANDQ DX,SI
+	ADDQ R10,CX
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ANDQ DX,R8
+	ADDQ R12,CX
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ANDQ DX,R9
+	ADDQ R14,CX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	ANDQ DX,AX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,40(DI)
+	MOVQ R8,48(DI)
+	MOVQ R9,56(DI)
+	MOVQ AX,64(DI)
+	MOVQ R10,72(DI)
+	MOVQ 160(SP),AX
+	MULQ ·_121666_213(SB)
+	SHRQ $13,AX
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 168(SP),AX
+	MULQ ·_121666_213(SB)
+	SHRQ $13,AX
+	ADDQ AX,CX
+	MOVQ DX,R8
+	MOVQ 176(SP),AX
+	MULQ ·_121666_213(SB)
+	SHRQ $13,AX
+	ADDQ AX,R8
+	MOVQ DX,R9
+	MOVQ 184(SP),AX
+	MULQ ·_121666_213(SB)
+	SHRQ $13,AX
+	ADDQ AX,R9
+	MOVQ DX,R10
+	MOVQ 192(SP),AX
+	MULQ ·_121666_213(SB)
+	SHRQ $13,AX
+	ADDQ AX,R10
+	IMUL3Q $19,DX,DX
+	ADDQ DX,SI
+	ADDQ 80(SP),SI
+	ADDQ 88(SP),CX
+	ADDQ 96(SP),R8
+	ADDQ 104(SP),R9
+	ADDQ 112(SP),R10
+	MOVQ SI,80(DI)
+	MOVQ CX,88(DI)
+	MOVQ R8,96(DI)
+	MOVQ R9,104(DI)
+	MOVQ R10,112(DI)
+	MOVQ 104(DI),SI
+	IMUL3Q $19,SI,AX
+	MOVQ AX,0(SP)
+	MULQ 176(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 112(DI),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,8(SP)
+	MULQ 168(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 80(DI),AX
+	MULQ 160(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 80(DI),AX
+	MULQ 168(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 80(DI),AX
+	MULQ 176(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 80(DI),AX
+	MULQ 184(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 80(DI),AX
+	MULQ 192(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 88(DI),AX
+	MULQ 160(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 88(DI),AX
+	MULQ 168(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 88(DI),AX
+	MULQ 176(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 88(DI),AX
+	MULQ 184(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 88(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 192(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 96(DI),AX
+	MULQ 160(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 96(DI),AX
+	MULQ 168(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 96(DI),AX
+	MULQ 176(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 96(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 184(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 96(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 192(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 104(DI),AX
+	MULQ 160(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 104(DI),AX
+	MULQ 168(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 0(SP),AX
+	MULQ 184(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 0(SP),AX
+	MULQ 192(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 112(DI),AX
+	MULQ 160(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SP),AX
+	MULQ 176(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 8(SP),AX
+	MULQ 184(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	MULQ 192(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ANDQ DX,SI
+	ADDQ R10,CX
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ANDQ DX,R8
+	ADDQ R12,CX
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ANDQ DX,R9
+	ADDQ R14,CX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	ANDQ DX,AX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,80(DI)
+	MOVQ R8,88(DI)
+	MOVQ R9,96(DI)
+	MOVQ AX,104(DI)
+	MOVQ R10,112(DI)
+	RET

+ 240 - 0
psiphon/common/tls/crypto/curve25519/mont25519_amd64.go

@@ -0,0 +1,240 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+package curve25519
+
+// These functions are implemented in the .s files. The names of the functions
+// in the rest of the file are also taken from the SUPERCOP sources to help
+// people following along.
+
+//go:noescape
+
+func cswap(inout *[5]uint64, v uint64)
+
+//go:noescape
+
+func ladderstep(inout *[5][5]uint64)
+
+//go:noescape
+
+func freeze(inout *[5]uint64)
+
+//go:noescape
+
+func mul(dest, a, b *[5]uint64)
+
+//go:noescape
+
+func square(out, in *[5]uint64)
+
+// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
+func mladder(xr, zr *[5]uint64, s *[32]byte) {
+	var work [5][5]uint64
+
+	work[0] = *xr
+	setint(&work[1], 1)
+	setint(&work[2], 0)
+	work[3] = *xr
+	setint(&work[4], 1)
+
+	j := uint(6)
+	var prevbit byte
+
+	for i := 31; i >= 0; i-- {
+		for j < 8 {
+			bit := ((*s)[i] >> j) & 1
+			swap := bit ^ prevbit
+			prevbit = bit
+			cswap(&work[1], uint64(swap))
+			ladderstep(&work)
+			j--
+		}
+		j = 7
+	}
+
+	*xr = work[1]
+	*zr = work[2]
+}
+
+func scalarMult(out, in, base *[32]byte) {
+	var e [32]byte
+	copy(e[:], (*in)[:])
+	e[0] &= 248
+	e[31] &= 127
+	e[31] |= 64
+
+	var t, z [5]uint64
+	unpack(&t, base)
+	mladder(&t, &z, &e)
+	invert(&z, &z)
+	mul(&t, &t, &z)
+	pack(out, &t)
+}
+
+func setint(r *[5]uint64, v uint64) {
+	r[0] = v
+	r[1] = 0
+	r[2] = 0
+	r[3] = 0
+	r[4] = 0
+}
+
+// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
+// order.
+func unpack(r *[5]uint64, x *[32]byte) {
+	r[0] = uint64(x[0]) |
+		uint64(x[1])<<8 |
+		uint64(x[2])<<16 |
+		uint64(x[3])<<24 |
+		uint64(x[4])<<32 |
+		uint64(x[5])<<40 |
+		uint64(x[6]&7)<<48
+
+	r[1] = uint64(x[6])>>3 |
+		uint64(x[7])<<5 |
+		uint64(x[8])<<13 |
+		uint64(x[9])<<21 |
+		uint64(x[10])<<29 |
+		uint64(x[11])<<37 |
+		uint64(x[12]&63)<<45
+
+	r[2] = uint64(x[12])>>6 |
+		uint64(x[13])<<2 |
+		uint64(x[14])<<10 |
+		uint64(x[15])<<18 |
+		uint64(x[16])<<26 |
+		uint64(x[17])<<34 |
+		uint64(x[18])<<42 |
+		uint64(x[19]&1)<<50
+
+	r[3] = uint64(x[19])>>1 |
+		uint64(x[20])<<7 |
+		uint64(x[21])<<15 |
+		uint64(x[22])<<23 |
+		uint64(x[23])<<31 |
+		uint64(x[24])<<39 |
+		uint64(x[25]&15)<<47
+
+	r[4] = uint64(x[25])>>4 |
+		uint64(x[26])<<4 |
+		uint64(x[27])<<12 |
+		uint64(x[28])<<20 |
+		uint64(x[29])<<28 |
+		uint64(x[30])<<36 |
+		uint64(x[31]&127)<<44
+}
+
+// pack sets out = x where out is the usual, little-endian form of the 5,
+// 51-bit limbs in x.
+func pack(out *[32]byte, x *[5]uint64) {
+	t := *x
+	freeze(&t)
+
+	out[0] = byte(t[0])
+	out[1] = byte(t[0] >> 8)
+	out[2] = byte(t[0] >> 16)
+	out[3] = byte(t[0] >> 24)
+	out[4] = byte(t[0] >> 32)
+	out[5] = byte(t[0] >> 40)
+	out[6] = byte(t[0] >> 48)
+
+	out[6] ^= byte(t[1]<<3) & 0xf8
+	out[7] = byte(t[1] >> 5)
+	out[8] = byte(t[1] >> 13)
+	out[9] = byte(t[1] >> 21)
+	out[10] = byte(t[1] >> 29)
+	out[11] = byte(t[1] >> 37)
+	out[12] = byte(t[1] >> 45)
+
+	out[12] ^= byte(t[2]<<6) & 0xc0
+	out[13] = byte(t[2] >> 2)
+	out[14] = byte(t[2] >> 10)
+	out[15] = byte(t[2] >> 18)
+	out[16] = byte(t[2] >> 26)
+	out[17] = byte(t[2] >> 34)
+	out[18] = byte(t[2] >> 42)
+	out[19] = byte(t[2] >> 50)
+
+	out[19] ^= byte(t[3]<<1) & 0xfe
+	out[20] = byte(t[3] >> 7)
+	out[21] = byte(t[3] >> 15)
+	out[22] = byte(t[3] >> 23)
+	out[23] = byte(t[3] >> 31)
+	out[24] = byte(t[3] >> 39)
+	out[25] = byte(t[3] >> 47)
+
+	out[25] ^= byte(t[4]<<4) & 0xf0
+	out[26] = byte(t[4] >> 4)
+	out[27] = byte(t[4] >> 12)
+	out[28] = byte(t[4] >> 20)
+	out[29] = byte(t[4] >> 28)
+	out[30] = byte(t[4] >> 36)
+	out[31] = byte(t[4] >> 44)
+}
+
+// invert calculates r = x^-1 mod p using Fermat's little theorem.
+func invert(r *[5]uint64, x *[5]uint64) {
+	var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
+
+	square(&z2, x)        /* 2 */
+	square(&t, &z2)       /* 4 */
+	square(&t, &t)        /* 8 */
+	mul(&z9, &t, x)       /* 9 */
+	mul(&z11, &z9, &z2)   /* 11 */
+	square(&t, &z11)      /* 22 */
+	mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
+
+	square(&t, &z2_5_0)      /* 2^6 - 2^1 */
+	for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
+		square(&t, &t)
+	}
+	mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
+
+	square(&t, &z2_10_0)      /* 2^11 - 2^1 */
+	for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
+		square(&t, &t)
+	}
+	mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
+
+	square(&t, &z2_20_0)      /* 2^21 - 2^1 */
+	for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
+		square(&t, &t)
+	}
+	mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
+
+	square(&t, &t)            /* 2^41 - 2^1 */
+	for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
+		square(&t, &t)
+	}
+	mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
+
+	square(&t, &z2_50_0)      /* 2^51 - 2^1 */
+	for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
+		square(&t, &t)
+	}
+	mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
+
+	square(&t, &z2_100_0)      /* 2^101 - 2^1 */
+	for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
+		square(&t, &t)
+	}
+	mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
+
+	square(&t, &t)            /* 2^201 - 2^1 */
+	for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
+		square(&t, &t)
+	}
+	mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
+
+	square(&t, &t) /* 2^251 - 2^1 */
+	square(&t, &t) /* 2^252 - 2^2 */
+	square(&t, &t) /* 2^253 - 2^3 */
+
+	square(&t, &t) /* 2^254 - 2^4 */
+
+	square(&t, &t)   /* 2^255 - 2^5 */
+	mul(r, &t, &z11) /* 2^255 - 21 */
+}

+ 169 - 0
psiphon/common/tls/crypto/curve25519/mul_amd64.s

@@ -0,0 +1,169 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func mul(dest, a, b *[5]uint64)
+TEXT ·mul(SB),0,$16-24
+	MOVQ dest+0(FP), DI
+	MOVQ a+8(FP), SI
+	MOVQ b+16(FP), DX
+
+	MOVQ DX,CX
+	MOVQ 24(SI),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,0(SP)
+	MULQ 16(CX)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 32(SI),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,8(SP)
+	MULQ 8(CX)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 0(SI),AX
+	MULQ 0(CX)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 0(SI),AX
+	MULQ 8(CX)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 0(SI),AX
+	MULQ 16(CX)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 0(SI),AX
+	MULQ 24(CX)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 0(SI),AX
+	MULQ 32(CX)
+	MOVQ AX,BX
+	MOVQ DX,BP
+	MOVQ 8(SI),AX
+	MULQ 0(CX)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SI),AX
+	MULQ 8(CX)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 8(SI),AX
+	MULQ 16(CX)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SI),AX
+	MULQ 24(CX)
+	ADDQ AX,BX
+	ADCQ DX,BP
+	MOVQ 8(SI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(CX)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 16(SI),AX
+	MULQ 0(CX)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 16(SI),AX
+	MULQ 8(CX)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 16(SI),AX
+	MULQ 16(CX)
+	ADDQ AX,BX
+	ADCQ DX,BP
+	MOVQ 16(SI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(CX)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 16(SI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(CX)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 24(SI),AX
+	MULQ 0(CX)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 24(SI),AX
+	MULQ 8(CX)
+	ADDQ AX,BX
+	ADCQ DX,BP
+	MOVQ 0(SP),AX
+	MULQ 24(CX)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 0(SP),AX
+	MULQ 32(CX)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 32(SI),AX
+	MULQ 0(CX)
+	ADDQ AX,BX
+	ADCQ DX,BP
+	MOVQ 8(SP),AX
+	MULQ 16(CX)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	MULQ 24(CX)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 8(SP),AX
+	MULQ 32(CX)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ $REDMASK51,SI
+	SHLQ $13,R9:R8
+	ANDQ SI,R8
+	SHLQ $13,R11:R10
+	ANDQ SI,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ SI,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ SI,R14
+	ADDQ R13,R14
+	SHLQ $13,BP:BX
+	ANDQ SI,BX
+	ADDQ R15,BX
+	IMUL3Q $19,BP,DX
+	ADDQ DX,R8
+	MOVQ R8,DX
+	SHRQ $51,DX
+	ADDQ R10,DX
+	MOVQ DX,CX
+	SHRQ $51,DX
+	ANDQ SI,R8
+	ADDQ R12,DX
+	MOVQ DX,R9
+	SHRQ $51,DX
+	ANDQ SI,CX
+	ADDQ R14,DX
+	MOVQ DX,AX
+	SHRQ $51,DX
+	ANDQ SI,R9
+	ADDQ BX,DX
+	MOVQ DX,R10
+	SHRQ $51,DX
+	ANDQ SI,AX
+	IMUL3Q $19,DX,DX
+	ADDQ DX,R8
+	ANDQ SI,R10
+	MOVQ R8,0(DI)
+	MOVQ CX,8(DI)
+	MOVQ R9,16(DI)
+	MOVQ AX,24(DI)
+	MOVQ R10,32(DI)
+	RET

+ 132 - 0
psiphon/common/tls/crypto/curve25519/square_amd64.s

@@ -0,0 +1,132 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func square(out, in *[5]uint64)
+TEXT ·square(SB),7,$0-16
+	MOVQ out+0(FP), DI
+	MOVQ in+8(FP), SI
+
+	MOVQ 0(SI),AX
+	MULQ 0(SI)
+	MOVQ AX,CX
+	MOVQ DX,R8
+	MOVQ 0(SI),AX
+	SHLQ $1,AX
+	MULQ 8(SI)
+	MOVQ AX,R9
+	MOVQ DX,R10
+	MOVQ 0(SI),AX
+	SHLQ $1,AX
+	MULQ 16(SI)
+	MOVQ AX,R11
+	MOVQ DX,R12
+	MOVQ 0(SI),AX
+	SHLQ $1,AX
+	MULQ 24(SI)
+	MOVQ AX,R13
+	MOVQ DX,R14
+	MOVQ 0(SI),AX
+	SHLQ $1,AX
+	MULQ 32(SI)
+	MOVQ AX,R15
+	MOVQ DX,BX
+	MOVQ 8(SI),AX
+	MULQ 8(SI)
+	ADDQ AX,R11
+	ADCQ DX,R12
+	MOVQ 8(SI),AX
+	SHLQ $1,AX
+	MULQ 16(SI)
+	ADDQ AX,R13
+	ADCQ DX,R14
+	MOVQ 8(SI),AX
+	SHLQ $1,AX
+	MULQ 24(SI)
+	ADDQ AX,R15
+	ADCQ DX,BX
+	MOVQ 8(SI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SI)
+	ADDQ AX,CX
+	ADCQ DX,R8
+	MOVQ 16(SI),AX
+	MULQ 16(SI)
+	ADDQ AX,R15
+	ADCQ DX,BX
+	MOVQ 16(SI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 24(SI)
+	ADDQ AX,CX
+	ADCQ DX,R8
+	MOVQ 16(SI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SI)
+	ADDQ AX,R9
+	ADCQ DX,R10
+	MOVQ 24(SI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(SI)
+	ADDQ AX,R9
+	ADCQ DX,R10
+	MOVQ 24(SI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SI)
+	ADDQ AX,R11
+	ADCQ DX,R12
+	MOVQ 32(SI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(SI)
+	ADDQ AX,R13
+	ADCQ DX,R14
+	MOVQ $REDMASK51,SI
+	SHLQ $13,R8:CX
+	ANDQ SI,CX
+	SHLQ $13,R10:R9
+	ANDQ SI,R9
+	ADDQ R8,R9
+	SHLQ $13,R12:R11
+	ANDQ SI,R11
+	ADDQ R10,R11
+	SHLQ $13,R14:R13
+	ANDQ SI,R13
+	ADDQ R12,R13
+	SHLQ $13,BX:R15
+	ANDQ SI,R15
+	ADDQ R14,R15
+	IMUL3Q $19,BX,DX
+	ADDQ DX,CX
+	MOVQ CX,DX
+	SHRQ $51,DX
+	ADDQ R9,DX
+	ANDQ SI,CX
+	MOVQ DX,R8
+	SHRQ $51,DX
+	ADDQ R11,DX
+	ANDQ SI,R8
+	MOVQ DX,R9
+	SHRQ $51,DX
+	ADDQ R13,DX
+	ANDQ SI,R9
+	MOVQ DX,AX
+	SHRQ $51,DX
+	ADDQ R15,DX
+	ANDQ SI,AX
+	MOVQ DX,R10
+	SHRQ $51,DX
+	IMUL3Q $19,DX,DX
+	ADDQ DX,CX
+	ANDQ SI,R10
+	MOVQ CX,0(DI)
+	MOVQ R8,8(DI)
+	MOVQ R9,16(DI)
+	MOVQ AX,24(DI)
+	MOVQ R10,32(DI)
+	RET

+ 32 - 0
psiphon/common/tls/crypto/poly1305/poly1305.go

@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package poly1305 implements Poly1305 one-time message authentication code as specified in http://cr.yp.to/mac/poly1305-20050329.pdf.
+
+Poly1305 is a fast, one-time authentication function. It is infeasible for an
+attacker to generate an authenticator for a message without the key. However, a
+key must only be used for a single message. Authenticating two different
+messages with the same key allows an attacker to forge authenticators for other
+messages with the same key.
+
+Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was
+used with a fixed key in order to generate one-time keys from an nonce.
+However, in this package AES isn't used and the one-time key is specified
+directly.
+*/
+package poly1305 // import "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tls/crypto/poly1305"
+
+import "crypto/subtle"
+
+// TagSize is the size, in bytes, of a poly1305 authenticator.
+const TagSize = 16
+
+// Verify returns true if mac is a valid authenticator for m with the given
+// key.
+func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
+	var tmp [16]byte
+	Sum(&tmp, m, key)
+	return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1
+}

+ 92 - 0
psiphon/common/tls/crypto/poly1305/poly1305_test.go

@@ -0,0 +1,92 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package poly1305
+
+import (
+	"bytes"
+	"testing"
+	"unsafe"
+)
+
+var testData = []struct {
+	in, k, correct []byte
+}{
+	{
+		[]byte("Hello world!"),
+		[]byte("this is 32-byte key for Poly1305"),
+		[]byte{0xa6, 0xf7, 0x45, 0x00, 0x8f, 0x81, 0xc9, 0x16, 0xa2, 0x0d, 0xcc, 0x74, 0xee, 0xf2, 0xb2, 0xf0},
+	},
+	{
+		make([]byte, 32),
+		[]byte("this is 32-byte key for Poly1305"),
+		[]byte{0x49, 0xec, 0x78, 0x09, 0x0e, 0x48, 0x1e, 0xc6, 0xc2, 0x6b, 0x33, 0xb9, 0x1c, 0xcc, 0x03, 0x07},
+	},
+	{
+		make([]byte, 2007),
+		[]byte("this is 32-byte key for Poly1305"),
+		[]byte{0xda, 0x84, 0xbc, 0xab, 0x02, 0x67, 0x6c, 0x38, 0xcd, 0xb0, 0x15, 0x60, 0x42, 0x74, 0xc2, 0xaa},
+	},
+	{
+		make([]byte, 2007),
+		make([]byte, 32),
+		make([]byte, 16),
+	},
+	{
+		// This test triggers an edge-case. See https://go-review.googlesource.com/#/c/30101/.
+		[]byte{0x81, 0xd8, 0xb2, 0xe4, 0x6a, 0x25, 0x21, 0x3b, 0x58, 0xfe, 0xe4, 0x21, 0x3a, 0x2a, 0x28, 0xe9, 0x21, 0xc1, 0x2a, 0x96, 0x32, 0x51, 0x6d, 0x3b, 0x73, 0x27, 0x27, 0x27, 0xbe, 0xcf, 0x21, 0x29},
+		[]byte{0x3b, 0x3a, 0x29, 0xe9, 0x3b, 0x21, 0x3a, 0x5c, 0x5c, 0x3b, 0x3b, 0x05, 0x3a, 0x3a, 0x8c, 0x0d},
+		[]byte{0x6d, 0xc1, 0x8b, 0x8c, 0x34, 0x4c, 0xd7, 0x99, 0x27, 0x11, 0x8b, 0xbe, 0x84, 0xb7, 0xf3, 0x14},
+	},
+}
+
+func testSum(t *testing.T, unaligned bool) {
+	var out [16]byte
+	var key [32]byte
+
+	for i, v := range testData {
+		in := v.in
+		if unaligned {
+			in = unalignBytes(in)
+		}
+		copy(key[:], v.k)
+		Sum(&out, in, &key)
+		if !bytes.Equal(out[:], v.correct) {
+			t.Errorf("%d: expected %x, got %x", i, v.correct, out[:])
+		}
+	}
+}
+
+func TestSum(t *testing.T)          { testSum(t, false) }
+func TestSumUnaligned(t *testing.T) { testSum(t, true) }
+
+func benchmark(b *testing.B, size int, unaligned bool) {
+	var out [16]byte
+	var key [32]byte
+	in := make([]byte, size)
+	if unaligned {
+		in = unalignBytes(in)
+	}
+	b.SetBytes(int64(len(in)))
+	b.ResetTimer()
+	for i := 0; i < b.N; i++ {
+		Sum(&out, in, &key)
+	}
+}
+
+func Benchmark64(b *testing.B)          { benchmark(b, 64, false) }
+func Benchmark1K(b *testing.B)          { benchmark(b, 1024, false) }
+func Benchmark64Unaligned(b *testing.B) { benchmark(b, 64, true) }
+func Benchmark1KUnaligned(b *testing.B) { benchmark(b, 1024, true) }
+
+func unalignBytes(in []byte) []byte {
+	out := make([]byte, len(in)+1)
+	if uintptr(unsafe.Pointer(&out[0]))&(unsafe.Alignof(uint32(0))-1) == 0 {
+		out = out[1:]
+	} else {
+		out = out[:len(in)]
+	}
+	copy(out, in)
+	return out
+}

+ 22 - 0
psiphon/common/tls/crypto/poly1305/sum_amd64.go

@@ -0,0 +1,22 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+package poly1305
+
+// This function is implemented in sum_amd64.s
+//go:noescape
+func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
+
+// Sum generates an authenticator for m using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+	var mPtr *byte
+	if len(m) > 0 {
+		mPtr = &m[0]
+	}
+	poly1305(out, mPtr, uint64(len(m)), key)
+}

+ 125 - 0
psiphon/common/tls/crypto/poly1305/sum_amd64.s

@@ -0,0 +1,125 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+#include "textflag.h"
+
+#define POLY1305_ADD(msg, h0, h1, h2) \
+	ADDQ 0(msg), h0;  \
+	ADCQ 8(msg), h1;  \
+	ADCQ $1, h2;      \
+	LEAQ 16(msg), msg
+
+#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
+	MOVQ  r0, AX;                  \
+	MULQ  h0;                      \
+	MOVQ  AX, t0;                  \
+	MOVQ  DX, t1;                  \
+	MOVQ  r0, AX;                  \
+	MULQ  h1;                      \
+	ADDQ  AX, t1;                  \
+	ADCQ  $0, DX;                  \
+	MOVQ  r0, t2;                  \
+	IMULQ h2, t2;                  \
+	ADDQ  DX, t2;                  \
+	                               \
+	MOVQ  r1, AX;                  \
+	MULQ  h0;                      \
+	ADDQ  AX, t1;                  \
+	ADCQ  $0, DX;                  \
+	MOVQ  DX, h0;                  \
+	MOVQ  r1, t3;                  \
+	IMULQ h2, t3;                  \
+	MOVQ  r1, AX;                  \
+	MULQ  h1;                      \
+	ADDQ  AX, t2;                  \
+	ADCQ  DX, t3;                  \
+	ADDQ  h0, t2;                  \
+	ADCQ  $0, t3;                  \
+	                               \
+	MOVQ  t0, h0;                  \
+	MOVQ  t1, h1;                  \
+	MOVQ  t2, h2;                  \
+	ANDQ  $3, h2;                  \
+	MOVQ  t2, t0;                  \
+	ANDQ  $0xFFFFFFFFFFFFFFFC, t0; \
+	ADDQ  t0, h0;                  \
+	ADCQ  t3, h1;                  \
+	ADCQ  $0, h2;                  \
+	SHRQ  $2, t3, t2;              \
+	SHRQ  $2, t3;                  \
+	ADDQ  t2, h0;                  \
+	ADCQ  t3, h1;                  \
+	ADCQ  $0, h2
+
+DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
+DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
+GLOBL ·poly1305Mask<>(SB), RODATA, $16
+
+// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key)
+TEXT ·poly1305(SB), $0-32
+	MOVQ out+0(FP), DI
+	MOVQ m+8(FP), SI
+	MOVQ mlen+16(FP), R15
+	MOVQ key+24(FP), AX
+
+	MOVQ 0(AX), R11
+	MOVQ 8(AX), R12
+	ANDQ ·poly1305Mask<>(SB), R11   // r0
+	ANDQ ·poly1305Mask<>+8(SB), R12 // r1
+	XORQ R8, R8                    // h0
+	XORQ R9, R9                    // h1
+	XORQ R10, R10                  // h2
+
+	CMPQ R15, $16
+	JB   bytes_between_0_and_15
+
+loop:
+	POLY1305_ADD(SI, R8, R9, R10)
+
+multiply:
+	POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
+	SUBQ $16, R15
+	CMPQ R15, $16
+	JAE  loop
+
+bytes_between_0_and_15:
+	TESTQ R15, R15
+	JZ    done
+	MOVQ  $1, BX
+	XORQ  CX, CX
+	XORQ  R13, R13
+	ADDQ  R15, SI
+
+flush_buffer:
+	SHLQ $8, BX, CX
+	SHLQ $8, BX
+	MOVB -1(SI), R13
+	XORQ R13, BX
+	DECQ SI
+	DECQ R15
+	JNZ  flush_buffer
+
+	ADDQ BX, R8
+	ADCQ CX, R9
+	ADCQ $0, R10
+	MOVQ $16, R15
+	JMP  multiply
+
+done:
+	MOVQ    R8, AX
+	MOVQ    R9, BX
+	SUBQ    $0xFFFFFFFFFFFFFFFB, AX
+	SBBQ    $0xFFFFFFFFFFFFFFFF, BX
+	SBBQ    $3, R10
+	CMOVQCS R8, AX
+	CMOVQCS R9, BX
+	MOVQ    key+24(FP), R8
+	ADDQ    16(R8), AX
+	ADCQ    24(R8), BX
+
+	MOVQ AX, 0(DI)
+	MOVQ BX, 8(DI)
+	RET

+ 22 - 0
psiphon/common/tls/crypto/poly1305/sum_arm.go

@@ -0,0 +1,22 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,!gccgo,!appengine,!nacl
+
+package poly1305
+
+// This function is implemented in sum_arm.s
+//go:noescape
+func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
+
+// Sum generates an authenticator for m using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+	var mPtr *byte
+	if len(m) > 0 {
+		mPtr = &m[0]
+	}
+	poly1305_auth_armv6(out, mPtr, uint32(len(m)), key)
+}

+ 427 - 0
psiphon/common/tls/crypto/poly1305/sum_arm.s

@@ -0,0 +1,427 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,!gccgo,!appengine,!nacl
+
+#include "textflag.h"
+
+// This code was translated into a form compatible with 5a from the public
+// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
+
+DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
+DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
+DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
+DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
+DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
+GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20
+
+// Warning: the linker may use R11 to synthesize certain instructions. Please
+// take care and verify that no synthetic instructions use it.
+
+TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0
+	// Needs 16 bytes of stack and 64 bytes of space pointed to by R0.  (It
+	// might look like it's only 60 bytes of space but the final four bytes
+	// will be written by another function.) We need to skip over four
+	// bytes of stack because that's saving the value of 'g'.
+	ADD       $4, R13, R8
+	MOVM.IB   [R4-R7], (R8)
+	MOVM.IA.W (R1), [R2-R5]
+	MOVW      $·poly1305_init_constants_armv6<>(SB), R7
+	MOVW      R2, R8
+	MOVW      R2>>26, R9
+	MOVW      R3>>20, g
+	MOVW      R4>>14, R11
+	MOVW      R5>>8, R12
+	ORR       R3<<6, R9, R9
+	ORR       R4<<12, g, g
+	ORR       R5<<18, R11, R11
+	MOVM.IA   (R7), [R2-R6]
+	AND       R8, R2, R2
+	AND       R9, R3, R3
+	AND       g, R4, R4
+	AND       R11, R5, R5
+	AND       R12, R6, R6
+	MOVM.IA.W [R2-R6], (R0)
+	EOR       R2, R2, R2
+	EOR       R3, R3, R3
+	EOR       R4, R4, R4
+	EOR       R5, R5, R5
+	EOR       R6, R6, R6
+	MOVM.IA.W [R2-R6], (R0)
+	MOVM.IA.W (R1), [R2-R5]
+	MOVM.IA   [R2-R6], (R0)
+	ADD       $20, R13, R0
+	MOVM.DA   (R0), [R4-R7]
+	RET
+
+#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
+	MOVBU (offset+0)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+0)(Rdst); \
+	MOVBU (offset+1)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+1)(Rdst); \
+	MOVBU (offset+2)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+2)(Rdst); \
+	MOVBU (offset+3)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+3)(Rdst)
+
+TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0
+	// Needs 24 bytes of stack for saved registers and then 88 bytes of
+	// scratch space after that. We assume that 24 bytes at (R13) have
+	// already been used: four bytes for the link register saved in the
+	// prelude of poly1305_auth_armv6, four bytes for saving the value of g
+	// in that function and 16 bytes of scratch space used around
+	// poly1305_finish_ext_armv6_skip1.
+	ADD     $24, R13, R12
+	MOVM.IB [R4-R8, R14], (R12)
+	MOVW    R0, 88(R13)
+	MOVW    R1, 92(R13)
+	MOVW    R2, 96(R13)
+	MOVW    R1, R14
+	MOVW    R2, R12
+	MOVW    56(R0), R8
+	WORD    $0xe1180008                // TST R8, R8 not working see issue 5921
+	EOR     R6, R6, R6
+	MOVW.EQ $(1<<24), R6
+	MOVW    R6, 84(R13)
+	ADD     $116, R13, g
+	MOVM.IA (R0), [R0-R9]
+	MOVM.IA [R0-R4], (g)
+	CMP     $16, R12
+	BLO     poly1305_blocks_armv6_done
+
+poly1305_blocks_armv6_mainloop:
+	WORD    $0xe31e0003                            // TST R14, #3 not working see issue 5921
+	BEQ     poly1305_blocks_armv6_mainloop_aligned
+	ADD     $100, R13, g
+	MOVW_UNALIGNED(R14, g, R0, 0)
+	MOVW_UNALIGNED(R14, g, R0, 4)
+	MOVW_UNALIGNED(R14, g, R0, 8)
+	MOVW_UNALIGNED(R14, g, R0, 12)
+	MOVM.IA (g), [R0-R3]
+	ADD     $16, R14
+	B       poly1305_blocks_armv6_mainloop_loaded
+
+poly1305_blocks_armv6_mainloop_aligned:
+	MOVM.IA.W (R14), [R0-R3]
+
+poly1305_blocks_armv6_mainloop_loaded:
+	MOVW    R0>>26, g
+	MOVW    R1>>20, R11
+	MOVW    R2>>14, R12
+	MOVW    R14, 92(R13)
+	MOVW    R3>>8, R4
+	ORR     R1<<6, g, g
+	ORR     R2<<12, R11, R11
+	ORR     R3<<18, R12, R12
+	BIC     $0xfc000000, R0, R0
+	BIC     $0xfc000000, g, g
+	MOVW    84(R13), R3
+	BIC     $0xfc000000, R11, R11
+	BIC     $0xfc000000, R12, R12
+	ADD     R0, R5, R5
+	ADD     g, R6, R6
+	ORR     R3, R4, R4
+	ADD     R11, R7, R7
+	ADD     $116, R13, R14
+	ADD     R12, R8, R8
+	ADD     R4, R9, R9
+	MOVM.IA (R14), [R0-R4]
+	MULLU   R4, R5, (R11, g)
+	MULLU   R3, R5, (R14, R12)
+	MULALU  R3, R6, (R11, g)
+	MULALU  R2, R6, (R14, R12)
+	MULALU  R2, R7, (R11, g)
+	MULALU  R1, R7, (R14, R12)
+	ADD     R4<<2, R4, R4
+	ADD     R3<<2, R3, R3
+	MULALU  R1, R8, (R11, g)
+	MULALU  R0, R8, (R14, R12)
+	MULALU  R0, R9, (R11, g)
+	MULALU  R4, R9, (R14, R12)
+	MOVW    g, 76(R13)
+	MOVW    R11, 80(R13)
+	MOVW    R12, 68(R13)
+	MOVW    R14, 72(R13)
+	MULLU   R2, R5, (R11, g)
+	MULLU   R1, R5, (R14, R12)
+	MULALU  R1, R6, (R11, g)
+	MULALU  R0, R6, (R14, R12)
+	MULALU  R0, R7, (R11, g)
+	MULALU  R4, R7, (R14, R12)
+	ADD     R2<<2, R2, R2
+	ADD     R1<<2, R1, R1
+	MULALU  R4, R8, (R11, g)
+	MULALU  R3, R8, (R14, R12)
+	MULALU  R3, R9, (R11, g)
+	MULALU  R2, R9, (R14, R12)
+	MOVW    g, 60(R13)
+	MOVW    R11, 64(R13)
+	MOVW    R12, 52(R13)
+	MOVW    R14, 56(R13)
+	MULLU   R0, R5, (R11, g)
+	MULALU  R4, R6, (R11, g)
+	MULALU  R3, R7, (R11, g)
+	MULALU  R2, R8, (R11, g)
+	MULALU  R1, R9, (R11, g)
+	ADD     $52, R13, R0
+	MOVM.IA (R0), [R0-R7]
+	MOVW    g>>26, R12
+	MOVW    R4>>26, R14
+	ORR     R11<<6, R12, R12
+	ORR     R5<<6, R14, R14
+	BIC     $0xfc000000, g, g
+	BIC     $0xfc000000, R4, R4
+	ADD.S   R12, R0, R0
+	ADC     $0, R1, R1
+	ADD.S   R14, R6, R6
+	ADC     $0, R7, R7
+	MOVW    R0>>26, R12
+	MOVW    R6>>26, R14
+	ORR     R1<<6, R12, R12
+	ORR     R7<<6, R14, R14
+	BIC     $0xfc000000, R0, R0
+	BIC     $0xfc000000, R6, R6
+	ADD     R14<<2, R14, R14
+	ADD.S   R12, R2, R2
+	ADC     $0, R3, R3
+	ADD     R14, g, g
+	MOVW    R2>>26, R12
+	MOVW    g>>26, R14
+	ORR     R3<<6, R12, R12
+	BIC     $0xfc000000, g, R5
+	BIC     $0xfc000000, R2, R7
+	ADD     R12, R4, R4
+	ADD     R14, R0, R0
+	MOVW    R4>>26, R12
+	BIC     $0xfc000000, R4, R8
+	ADD     R12, R6, R9
+	MOVW    96(R13), R12
+	MOVW    92(R13), R14
+	MOVW    R0, R6
+	CMP     $32, R12
+	SUB     $16, R12, R12
+	MOVW    R12, 96(R13)
+	BHS     poly1305_blocks_armv6_mainloop
+
+poly1305_blocks_armv6_done:
+	MOVW    88(R13), R12
+	MOVW    R5, 20(R12)
+	MOVW    R6, 24(R12)
+	MOVW    R7, 28(R12)
+	MOVW    R8, 32(R12)
+	MOVW    R9, 36(R12)
+	ADD     $48, R13, R0
+	MOVM.DA (R0), [R4-R8, R14]
+	RET
+
+#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
+	MOVBU.P 1(Rsrc), Rtmp; \
+	MOVBU.P Rtmp, 1(Rdst); \
+	MOVBU.P 1(Rsrc), Rtmp; \
+	MOVBU.P Rtmp, 1(Rdst)
+
+#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
+	MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
+	MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
+
+// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
+TEXT ·poly1305_auth_armv6(SB), $196-16
+	// The value 196, just above, is the sum of 64 (the size of the context
+	// structure) and 132 (the amount of stack needed).
+	//
+	// At this point, the stack pointer (R13) has been moved down. It
+	// points to the saved link register and there's 196 bytes of free
+	// space above it.
+	//
+	// The stack for this function looks like:
+	//
+	// +---------------------
+	// |
+	// | 64 bytes of context structure
+	// |
+	// +---------------------
+	// |
+	// | 112 bytes for poly1305_blocks_armv6
+	// |
+	// +---------------------
+	// | 16 bytes of final block, constructed at
+	// | poly1305_finish_ext_armv6_skip8
+	// +---------------------
+	// | four bytes of saved 'g'
+	// +---------------------
+	// | lr, saved by prelude    <- R13 points here
+	// +---------------------
+	MOVW g, 4(R13)
+
+	MOVW out+0(FP), R4
+	MOVW m+4(FP), R5
+	MOVW mlen+8(FP), R6
+	MOVW key+12(FP), R7
+
+	ADD  $136, R13, R0 // 136 = 4 + 4 + 16 + 112
+	MOVW R7, R1
+
+	// poly1305_init_ext_armv6 will write to the stack from R13+4, but
+	// that's ok because none of the other values have been written yet.
+	BL    poly1305_init_ext_armv6<>(SB)
+	BIC.S $15, R6, R2
+	BEQ   poly1305_auth_armv6_noblocks
+	ADD   $136, R13, R0
+	MOVW  R5, R1
+	ADD   R2, R5, R5
+	SUB   R2, R6, R6
+	BL    poly1305_blocks_armv6<>(SB)
+
+poly1305_auth_armv6_noblocks:
+	ADD  $136, R13, R0
+	MOVW R5, R1
+	MOVW R6, R2
+	MOVW R4, R3
+
+	MOVW  R0, R5
+	MOVW  R1, R6
+	MOVW  R2, R7
+	MOVW  R3, R8
+	AND.S R2, R2, R2
+	BEQ   poly1305_finish_ext_armv6_noremaining
+	EOR   R0, R0
+	ADD   $8, R13, R9                           // 8 = offset to 16 byte scratch space
+	MOVW  R0, (R9)
+	MOVW  R0, 4(R9)
+	MOVW  R0, 8(R9)
+	MOVW  R0, 12(R9)
+	WORD  $0xe3110003                           // TST R1, #3 not working see issue 5921
+	BEQ   poly1305_finish_ext_armv6_aligned
+	WORD  $0xe3120008                           // TST R2, #8 not working see issue 5921
+	BEQ   poly1305_finish_ext_armv6_skip8
+	MOVWP_UNALIGNED(R1, R9, g)
+	MOVWP_UNALIGNED(R1, R9, g)
+
+poly1305_finish_ext_armv6_skip8:
+	WORD $0xe3120004                     // TST $4, R2 not working see issue 5921
+	BEQ  poly1305_finish_ext_armv6_skip4
+	MOVWP_UNALIGNED(R1, R9, g)
+
+poly1305_finish_ext_armv6_skip4:
+	WORD $0xe3120002                     // TST $2, R2 not working see issue 5921
+	BEQ  poly1305_finish_ext_armv6_skip2
+	MOVHUP_UNALIGNED(R1, R9, g)
+	B    poly1305_finish_ext_armv6_skip2
+
+poly1305_finish_ext_armv6_aligned:
+	WORD      $0xe3120008                             // TST R2, #8 not working see issue 5921
+	BEQ       poly1305_finish_ext_armv6_skip8_aligned
+	MOVM.IA.W (R1), [g-R11]
+	MOVM.IA.W [g-R11], (R9)
+
+poly1305_finish_ext_armv6_skip8_aligned:
+	WORD   $0xe3120004                             // TST $4, R2 not working see issue 5921
+	BEQ    poly1305_finish_ext_armv6_skip4_aligned
+	MOVW.P 4(R1), g
+	MOVW.P g, 4(R9)
+
+poly1305_finish_ext_armv6_skip4_aligned:
+	WORD    $0xe3120002                     // TST $2, R2 not working see issue 5921
+	BEQ     poly1305_finish_ext_armv6_skip2
+	MOVHU.P 2(R1), g
+	MOVH.P  g, 2(R9)
+
+poly1305_finish_ext_armv6_skip2:
+	WORD    $0xe3120001                     // TST $1, R2 not working see issue 5921
+	BEQ     poly1305_finish_ext_armv6_skip1
+	MOVBU.P 1(R1), g
+	MOVBU.P g, 1(R9)
+
+poly1305_finish_ext_armv6_skip1:
+	MOVW  $1, R11
+	MOVBU R11, 0(R9)
+	MOVW  R11, 56(R5)
+	MOVW  R5, R0
+	ADD   $8, R13, R1
+	MOVW  $16, R2
+	BL    poly1305_blocks_armv6<>(SB)
+
+poly1305_finish_ext_armv6_noremaining:
+	MOVW      20(R5), R0
+	MOVW      24(R5), R1
+	MOVW      28(R5), R2
+	MOVW      32(R5), R3
+	MOVW      36(R5), R4
+	MOVW      R4>>26, R12
+	BIC       $0xfc000000, R4, R4
+	ADD       R12<<2, R12, R12
+	ADD       R12, R0, R0
+	MOVW      R0>>26, R12
+	BIC       $0xfc000000, R0, R0
+	ADD       R12, R1, R1
+	MOVW      R1>>26, R12
+	BIC       $0xfc000000, R1, R1
+	ADD       R12, R2, R2
+	MOVW      R2>>26, R12
+	BIC       $0xfc000000, R2, R2
+	ADD       R12, R3, R3
+	MOVW      R3>>26, R12
+	BIC       $0xfc000000, R3, R3
+	ADD       R12, R4, R4
+	ADD       $5, R0, R6
+	MOVW      R6>>26, R12
+	BIC       $0xfc000000, R6, R6
+	ADD       R12, R1, R7
+	MOVW      R7>>26, R12
+	BIC       $0xfc000000, R7, R7
+	ADD       R12, R2, g
+	MOVW      g>>26, R12
+	BIC       $0xfc000000, g, g
+	ADD       R12, R3, R11
+	MOVW      $-(1<<26), R12
+	ADD       R11>>26, R12, R12
+	BIC       $0xfc000000, R11, R11
+	ADD       R12, R4, R9
+	MOVW      R9>>31, R12
+	SUB       $1, R12
+	AND       R12, R6, R6
+	AND       R12, R7, R7
+	AND       R12, g, g
+	AND       R12, R11, R11
+	AND       R12, R9, R9
+	MVN       R12, R12
+	AND       R12, R0, R0
+	AND       R12, R1, R1
+	AND       R12, R2, R2
+	AND       R12, R3, R3
+	AND       R12, R4, R4
+	ORR       R6, R0, R0
+	ORR       R7, R1, R1
+	ORR       g, R2, R2
+	ORR       R11, R3, R3
+	ORR       R9, R4, R4
+	ORR       R1<<26, R0, R0
+	MOVW      R1>>6, R1
+	ORR       R2<<20, R1, R1
+	MOVW      R2>>12, R2
+	ORR       R3<<14, R2, R2
+	MOVW      R3>>18, R3
+	ORR       R4<<8, R3, R3
+	MOVW      40(R5), R6
+	MOVW      44(R5), R7
+	MOVW      48(R5), g
+	MOVW      52(R5), R11
+	ADD.S     R6, R0, R0
+	ADC.S     R7, R1, R1
+	ADC.S     g, R2, R2
+	ADC.S     R11, R3, R3
+	MOVM.IA   [R0-R3], (R8)
+	MOVW      R5, R12
+	EOR       R0, R0, R0
+	EOR       R1, R1, R1
+	EOR       R2, R2, R2
+	EOR       R3, R3, R3
+	EOR       R4, R4, R4
+	EOR       R5, R5, R5
+	EOR       R6, R6, R6
+	EOR       R7, R7, R7
+	MOVM.IA.W [R0-R7], (R12)
+	MOVM.IA   [R0-R7], (R12)
+	MOVW      4(R13), g
+	RET

+ 1531 - 0
psiphon/common/tls/crypto/poly1305/sum_ref.go

@@ -0,0 +1,1531 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64,!arm gccgo appengine nacl
+
+package poly1305
+
+// Based on original, public domain implementation from NaCl by D. J.
+// Bernstein.
+
+import "math"
+
+const (
+	alpham80 = 0.00000000558793544769287109375
+	alpham48 = 24.0
+	alpham16 = 103079215104.0
+	alpha0   = 6755399441055744.0
+	alpha18  = 1770887431076116955136.0
+	alpha32  = 29014219670751100192948224.0
+	alpha50  = 7605903601369376408980219232256.0
+	alpha64  = 124615124604835863084731911901282304.0
+	alpha82  = 32667107224410092492483962313449748299776.0
+	alpha96  = 535217884764734955396857238543560676143529984.0
+	alpha112 = 35076039295941670036888435985190792471742381031424.0
+	alpha130 = 9194973245195333150150082162901855101712434733101613056.0
+	scale    = 0.0000000000000000000000000000000000000036734198463196484624023016788195177431833298649127735047148490821200539357960224151611328125
+	offset0  = 6755408030990331.0
+	offset1  = 29014256564239239022116864.0
+	offset2  = 124615283061160854719918951570079744.0
+	offset3  = 535219245894202480694386063513315216128475136.0
+)
+
+// Sum generates an authenticator for m using a one-time key and puts the
+// 16-byte result into out. Authenticating two different messages with the same
+// key allows an attacker to forge messages at will.
+func Sum(out *[16]byte, m []byte, key *[32]byte) {
+	r := key
+	s := key[16:]
+	var (
+		y7        float64
+		y6        float64
+		y1        float64
+		y0        float64
+		y5        float64
+		y4        float64
+		x7        float64
+		x6        float64
+		x1        float64
+		x0        float64
+		y3        float64
+		y2        float64
+		x5        float64
+		r3lowx0   float64
+		x4        float64
+		r0lowx6   float64
+		x3        float64
+		r3highx0  float64
+		x2        float64
+		r0highx6  float64
+		r0lowx0   float64
+		sr1lowx6  float64
+		r0highx0  float64
+		sr1highx6 float64
+		sr3low    float64
+		r1lowx0   float64
+		sr2lowx6  float64
+		r1highx0  float64
+		sr2highx6 float64
+		r2lowx0   float64
+		sr3lowx6  float64
+		r2highx0  float64
+		sr3highx6 float64
+		r1highx4  float64
+		r1lowx4   float64
+		r0highx4  float64
+		r0lowx4   float64
+		sr3highx4 float64
+		sr3lowx4  float64
+		sr2highx4 float64
+		sr2lowx4  float64
+		r0lowx2   float64
+		r0highx2  float64
+		r1lowx2   float64
+		r1highx2  float64
+		r2lowx2   float64
+		r2highx2  float64
+		sr3lowx2  float64
+		sr3highx2 float64
+		z0        float64
+		z1        float64
+		z2        float64
+		z3        float64
+		m0        int64
+		m1        int64
+		m2        int64
+		m3        int64
+		m00       uint32
+		m01       uint32
+		m02       uint32
+		m03       uint32
+		m10       uint32
+		m11       uint32
+		m12       uint32
+		m13       uint32
+		m20       uint32
+		m21       uint32
+		m22       uint32
+		m23       uint32
+		m30       uint32
+		m31       uint32
+		m32       uint32
+		m33       uint64
+		lbelow2   int32
+		lbelow3   int32
+		lbelow4   int32
+		lbelow5   int32
+		lbelow6   int32
+		lbelow7   int32
+		lbelow8   int32
+		lbelow9   int32
+		lbelow10  int32
+		lbelow11  int32
+		lbelow12  int32
+		lbelow13  int32
+		lbelow14  int32
+		lbelow15  int32
+		s00       uint32
+		s01       uint32
+		s02       uint32
+		s03       uint32
+		s10       uint32
+		s11       uint32
+		s12       uint32
+		s13       uint32
+		s20       uint32
+		s21       uint32
+		s22       uint32
+		s23       uint32
+		s30       uint32
+		s31       uint32
+		s32       uint32
+		s33       uint32
+		bits32    uint64
+		f         uint64
+		f0        uint64
+		f1        uint64
+		f2        uint64
+		f3        uint64
+		f4        uint64
+		g         uint64
+		g0        uint64
+		g1        uint64
+		g2        uint64
+		g3        uint64
+		g4        uint64
+	)
+
+	var p int32
+
+	l := int32(len(m))
+
+	r00 := uint32(r[0])
+
+	r01 := uint32(r[1])
+
+	r02 := uint32(r[2])
+	r0 := int64(2151)
+
+	r03 := uint32(r[3])
+	r03 &= 15
+	r0 <<= 51
+
+	r10 := uint32(r[4])
+	r10 &= 252
+	r01 <<= 8
+	r0 += int64(r00)
+
+	r11 := uint32(r[5])
+	r02 <<= 16
+	r0 += int64(r01)
+
+	r12 := uint32(r[6])
+	r03 <<= 24
+	r0 += int64(r02)
+
+	r13 := uint32(r[7])
+	r13 &= 15
+	r1 := int64(2215)
+	r0 += int64(r03)
+
+	d0 := r0
+	r1 <<= 51
+	r2 := int64(2279)
+
+	r20 := uint32(r[8])
+	r20 &= 252
+	r11 <<= 8
+	r1 += int64(r10)
+
+	r21 := uint32(r[9])
+	r12 <<= 16
+	r1 += int64(r11)
+
+	r22 := uint32(r[10])
+	r13 <<= 24
+	r1 += int64(r12)
+
+	r23 := uint32(r[11])
+	r23 &= 15
+	r2 <<= 51
+	r1 += int64(r13)
+
+	d1 := r1
+	r21 <<= 8
+	r2 += int64(r20)
+
+	r30 := uint32(r[12])
+	r30 &= 252
+	r22 <<= 16
+	r2 += int64(r21)
+
+	r31 := uint32(r[13])
+	r23 <<= 24
+	r2 += int64(r22)
+
+	r32 := uint32(r[14])
+	r2 += int64(r23)
+	r3 := int64(2343)
+
+	d2 := r2
+	r3 <<= 51
+
+	r33 := uint32(r[15])
+	r33 &= 15
+	r31 <<= 8
+	r3 += int64(r30)
+
+	r32 <<= 16
+	r3 += int64(r31)
+
+	r33 <<= 24
+	r3 += int64(r32)
+
+	r3 += int64(r33)
+	h0 := alpha32 - alpha32
+
+	d3 := r3
+	h1 := alpha32 - alpha32
+
+	h2 := alpha32 - alpha32
+
+	h3 := alpha32 - alpha32
+
+	h4 := alpha32 - alpha32
+
+	r0low := math.Float64frombits(uint64(d0))
+	h5 := alpha32 - alpha32
+
+	r1low := math.Float64frombits(uint64(d1))
+	h6 := alpha32 - alpha32
+
+	r2low := math.Float64frombits(uint64(d2))
+	h7 := alpha32 - alpha32
+
+	r0low -= alpha0
+
+	r1low -= alpha32
+
+	r2low -= alpha64
+
+	r0high := r0low + alpha18
+
+	r3low := math.Float64frombits(uint64(d3))
+
+	r1high := r1low + alpha50
+	sr1low := scale * r1low
+
+	r2high := r2low + alpha82
+	sr2low := scale * r2low
+
+	r0high -= alpha18
+	r0high_stack := r0high
+
+	r3low -= alpha96
+
+	r1high -= alpha50
+	r1high_stack := r1high
+
+	sr1high := sr1low + alpham80
+
+	r0low -= r0high
+
+	r2high -= alpha82
+	sr3low = scale * r3low
+
+	sr2high := sr2low + alpham48
+
+	r1low -= r1high
+	r1low_stack := r1low
+
+	sr1high -= alpham80
+	sr1high_stack := sr1high
+
+	r2low -= r2high
+	r2low_stack := r2low
+
+	sr2high -= alpham48
+	sr2high_stack := sr2high
+
+	r3high := r3low + alpha112
+	r0low_stack := r0low
+
+	sr1low -= sr1high
+	sr1low_stack := sr1low
+
+	sr3high := sr3low + alpham16
+	r2high_stack := r2high
+
+	sr2low -= sr2high
+	sr2low_stack := sr2low
+
+	r3high -= alpha112
+	r3high_stack := r3high
+
+	sr3high -= alpham16
+	sr3high_stack := sr3high
+
+	r3low -= r3high
+	r3low_stack := r3low
+
+	sr3low -= sr3high
+	sr3low_stack := sr3low
+
+	if l < 16 {
+		goto addatmost15bytes
+	}
+
+	m00 = uint32(m[p+0])
+	m0 = 2151
+
+	m0 <<= 51
+	m1 = 2215
+	m01 = uint32(m[p+1])
+
+	m1 <<= 51
+	m2 = 2279
+	m02 = uint32(m[p+2])
+
+	m2 <<= 51
+	m3 = 2343
+	m03 = uint32(m[p+3])
+
+	m10 = uint32(m[p+4])
+	m01 <<= 8
+	m0 += int64(m00)
+
+	m11 = uint32(m[p+5])
+	m02 <<= 16
+	m0 += int64(m01)
+
+	m12 = uint32(m[p+6])
+	m03 <<= 24
+	m0 += int64(m02)
+
+	m13 = uint32(m[p+7])
+	m3 <<= 51
+	m0 += int64(m03)
+
+	m20 = uint32(m[p+8])
+	m11 <<= 8
+	m1 += int64(m10)
+
+	m21 = uint32(m[p+9])
+	m12 <<= 16
+	m1 += int64(m11)
+
+	m22 = uint32(m[p+10])
+	m13 <<= 24
+	m1 += int64(m12)
+
+	m23 = uint32(m[p+11])
+	m1 += int64(m13)
+
+	m30 = uint32(m[p+12])
+	m21 <<= 8
+	m2 += int64(m20)
+
+	m31 = uint32(m[p+13])
+	m22 <<= 16
+	m2 += int64(m21)
+
+	m32 = uint32(m[p+14])
+	m23 <<= 24
+	m2 += int64(m22)
+
+	m33 = uint64(m[p+15])
+	m2 += int64(m23)
+
+	d0 = m0
+	m31 <<= 8
+	m3 += int64(m30)
+
+	d1 = m1
+	m32 <<= 16
+	m3 += int64(m31)
+
+	d2 = m2
+	m33 += 256
+
+	m33 <<= 24
+	m3 += int64(m32)
+
+	m3 += int64(m33)
+	d3 = m3
+
+	p += 16
+	l -= 16
+
+	z0 = math.Float64frombits(uint64(d0))
+
+	z1 = math.Float64frombits(uint64(d1))
+
+	z2 = math.Float64frombits(uint64(d2))
+
+	z3 = math.Float64frombits(uint64(d3))
+
+	z0 -= alpha0
+
+	z1 -= alpha32
+
+	z2 -= alpha64
+
+	z3 -= alpha96
+
+	h0 += z0
+
+	h1 += z1
+
+	h3 += z2
+
+	h5 += z3
+
+	if l < 16 {
+		goto multiplyaddatmost15bytes
+	}
+
+multiplyaddatleast16bytes:
+
+	m2 = 2279
+	m20 = uint32(m[p+8])
+	y7 = h7 + alpha130
+
+	m2 <<= 51
+	m3 = 2343
+	m21 = uint32(m[p+9])
+	y6 = h6 + alpha130
+
+	m3 <<= 51
+	m0 = 2151
+	m22 = uint32(m[p+10])
+	y1 = h1 + alpha32
+
+	m0 <<= 51
+	m1 = 2215
+	m23 = uint32(m[p+11])
+	y0 = h0 + alpha32
+
+	m1 <<= 51
+	m30 = uint32(m[p+12])
+	y7 -= alpha130
+
+	m21 <<= 8
+	m2 += int64(m20)
+	m31 = uint32(m[p+13])
+	y6 -= alpha130
+
+	m22 <<= 16
+	m2 += int64(m21)
+	m32 = uint32(m[p+14])
+	y1 -= alpha32
+
+	m23 <<= 24
+	m2 += int64(m22)
+	m33 = uint64(m[p+15])
+	y0 -= alpha32
+
+	m2 += int64(m23)
+	m00 = uint32(m[p+0])
+	y5 = h5 + alpha96
+
+	m31 <<= 8
+	m3 += int64(m30)
+	m01 = uint32(m[p+1])
+	y4 = h4 + alpha96
+
+	m32 <<= 16
+	m02 = uint32(m[p+2])
+	x7 = h7 - y7
+	y7 *= scale
+
+	m33 += 256
+	m03 = uint32(m[p+3])
+	x6 = h6 - y6
+	y6 *= scale
+
+	m33 <<= 24
+	m3 += int64(m31)
+	m10 = uint32(m[p+4])
+	x1 = h1 - y1
+
+	m01 <<= 8
+	m3 += int64(m32)
+	m11 = uint32(m[p+5])
+	x0 = h0 - y0
+
+	m3 += int64(m33)
+	m0 += int64(m00)
+	m12 = uint32(m[p+6])
+	y5 -= alpha96
+
+	m02 <<= 16
+	m0 += int64(m01)
+	m13 = uint32(m[p+7])
+	y4 -= alpha96
+
+	m03 <<= 24
+	m0 += int64(m02)
+	d2 = m2
+	x1 += y7
+
+	m0 += int64(m03)
+	d3 = m3
+	x0 += y6
+
+	m11 <<= 8
+	m1 += int64(m10)
+	d0 = m0
+	x7 += y5
+
+	m12 <<= 16
+	m1 += int64(m11)
+	x6 += y4
+
+	m13 <<= 24
+	m1 += int64(m12)
+	y3 = h3 + alpha64
+
+	m1 += int64(m13)
+	d1 = m1
+	y2 = h2 + alpha64
+
+	x0 += x1
+
+	x6 += x7
+
+	y3 -= alpha64
+	r3low = r3low_stack
+
+	y2 -= alpha64
+	r0low = r0low_stack
+
+	x5 = h5 - y5
+	r3lowx0 = r3low * x0
+	r3high = r3high_stack
+
+	x4 = h4 - y4
+	r0lowx6 = r0low * x6
+	r0high = r0high_stack
+
+	x3 = h3 - y3
+	r3highx0 = r3high * x0
+	sr1low = sr1low_stack
+
+	x2 = h2 - y2
+	r0highx6 = r0high * x6
+	sr1high = sr1high_stack
+
+	x5 += y3
+	r0lowx0 = r0low * x0
+	r1low = r1low_stack
+
+	h6 = r3lowx0 + r0lowx6
+	sr1lowx6 = sr1low * x6
+	r1high = r1high_stack
+
+	x4 += y2
+	r0highx0 = r0high * x0
+	sr2low = sr2low_stack
+
+	h7 = r3highx0 + r0highx6
+	sr1highx6 = sr1high * x6
+	sr2high = sr2high_stack
+
+	x3 += y1
+	r1lowx0 = r1low * x0
+	r2low = r2low_stack
+
+	h0 = r0lowx0 + sr1lowx6
+	sr2lowx6 = sr2low * x6
+	r2high = r2high_stack
+
+	x2 += y0
+	r1highx0 = r1high * x0
+	sr3low = sr3low_stack
+
+	h1 = r0highx0 + sr1highx6
+	sr2highx6 = sr2high * x6
+	sr3high = sr3high_stack
+
+	x4 += x5
+	r2lowx0 = r2low * x0
+	z2 = math.Float64frombits(uint64(d2))
+
+	h2 = r1lowx0 + sr2lowx6
+	sr3lowx6 = sr3low * x6
+
+	x2 += x3
+	r2highx0 = r2high * x0
+	z3 = math.Float64frombits(uint64(d3))
+
+	h3 = r1highx0 + sr2highx6
+	sr3highx6 = sr3high * x6
+
+	r1highx4 = r1high * x4
+	z2 -= alpha64
+
+	h4 = r2lowx0 + sr3lowx6
+	r1lowx4 = r1low * x4
+
+	r0highx4 = r0high * x4
+	z3 -= alpha96
+
+	h5 = r2highx0 + sr3highx6
+	r0lowx4 = r0low * x4
+
+	h7 += r1highx4
+	sr3highx4 = sr3high * x4
+
+	h6 += r1lowx4
+	sr3lowx4 = sr3low * x4
+
+	h5 += r0highx4
+	sr2highx4 = sr2high * x4
+
+	h4 += r0lowx4
+	sr2lowx4 = sr2low * x4
+
+	h3 += sr3highx4
+	r0lowx2 = r0low * x2
+
+	h2 += sr3lowx4
+	r0highx2 = r0high * x2
+
+	h1 += sr2highx4
+	r1lowx2 = r1low * x2
+
+	h0 += sr2lowx4
+	r1highx2 = r1high * x2
+
+	h2 += r0lowx2
+	r2lowx2 = r2low * x2
+
+	h3 += r0highx2
+	r2highx2 = r2high * x2
+
+	h4 += r1lowx2
+	sr3lowx2 = sr3low * x2
+
+	h5 += r1highx2
+	sr3highx2 = sr3high * x2
+
+	p += 16
+	l -= 16
+	h6 += r2lowx2
+
+	h7 += r2highx2
+
+	z1 = math.Float64frombits(uint64(d1))
+	h0 += sr3lowx2
+
+	z0 = math.Float64frombits(uint64(d0))
+	h1 += sr3highx2
+
+	z1 -= alpha32
+
+	z0 -= alpha0
+
+	h5 += z3
+
+	h3 += z2
+
+	h1 += z1
+
+	h0 += z0
+
+	if l >= 16 {
+		goto multiplyaddatleast16bytes
+	}
+
+multiplyaddatmost15bytes:
+
+	y7 = h7 + alpha130
+
+	y6 = h6 + alpha130
+
+	y1 = h1 + alpha32
+
+	y0 = h0 + alpha32
+
+	y7 -= alpha130
+
+	y6 -= alpha130
+
+	y1 -= alpha32
+
+	y0 -= alpha32
+
+	y5 = h5 + alpha96
+
+	y4 = h4 + alpha96
+
+	x7 = h7 - y7
+	y7 *= scale
+
+	x6 = h6 - y6
+	y6 *= scale
+
+	x1 = h1 - y1
+
+	x0 = h0 - y0
+
+	y5 -= alpha96
+
+	y4 -= alpha96
+
+	x1 += y7
+
+	x0 += y6
+
+	x7 += y5
+
+	x6 += y4
+
+	y3 = h3 + alpha64
+
+	y2 = h2 + alpha64
+
+	x0 += x1
+
+	x6 += x7
+
+	y3 -= alpha64
+	r3low = r3low_stack
+
+	y2 -= alpha64
+	r0low = r0low_stack
+
+	x5 = h5 - y5
+	r3lowx0 = r3low * x0
+	r3high = r3high_stack
+
+	x4 = h4 - y4
+	r0lowx6 = r0low * x6
+	r0high = r0high_stack
+
+	x3 = h3 - y3
+	r3highx0 = r3high * x0
+	sr1low = sr1low_stack
+
+	x2 = h2 - y2
+	r0highx6 = r0high * x6
+	sr1high = sr1high_stack
+
+	x5 += y3
+	r0lowx0 = r0low * x0
+	r1low = r1low_stack
+
+	h6 = r3lowx0 + r0lowx6
+	sr1lowx6 = sr1low * x6
+	r1high = r1high_stack
+
+	x4 += y2
+	r0highx0 = r0high * x0
+	sr2low = sr2low_stack
+
+	h7 = r3highx0 + r0highx6
+	sr1highx6 = sr1high * x6
+	sr2high = sr2high_stack
+
+	x3 += y1
+	r1lowx0 = r1low * x0
+	r2low = r2low_stack
+
+	h0 = r0lowx0 + sr1lowx6
+	sr2lowx6 = sr2low * x6
+	r2high = r2high_stack
+
+	x2 += y0
+	r1highx0 = r1high * x0
+	sr3low = sr3low_stack
+
+	h1 = r0highx0 + sr1highx6
+	sr2highx6 = sr2high * x6
+	sr3high = sr3high_stack
+
+	x4 += x5
+	r2lowx0 = r2low * x0
+
+	h2 = r1lowx0 + sr2lowx6
+	sr3lowx6 = sr3low * x6
+
+	x2 += x3
+	r2highx0 = r2high * x0
+
+	h3 = r1highx0 + sr2highx6
+	sr3highx6 = sr3high * x6
+
+	r1highx4 = r1high * x4
+
+	h4 = r2lowx0 + sr3lowx6
+	r1lowx4 = r1low * x4
+
+	r0highx4 = r0high * x4
+
+	h5 = r2highx0 + sr3highx6
+	r0lowx4 = r0low * x4
+
+	h7 += r1highx4
+	sr3highx4 = sr3high * x4
+
+	h6 += r1lowx4
+	sr3lowx4 = sr3low * x4
+
+	h5 += r0highx4
+	sr2highx4 = sr2high * x4
+
+	h4 += r0lowx4
+	sr2lowx4 = sr2low * x4
+
+	h3 += sr3highx4
+	r0lowx2 = r0low * x2
+
+	h2 += sr3lowx4
+	r0highx2 = r0high * x2
+
+	h1 += sr2highx4
+	r1lowx2 = r1low * x2
+
+	h0 += sr2lowx4
+	r1highx2 = r1high * x2
+
+	h2 += r0lowx2
+	r2lowx2 = r2low * x2
+
+	h3 += r0highx2
+	r2highx2 = r2high * x2
+
+	h4 += r1lowx2
+	sr3lowx2 = sr3low * x2
+
+	h5 += r1highx2
+	sr3highx2 = sr3high * x2
+
+	h6 += r2lowx2
+
+	h7 += r2highx2
+
+	h0 += sr3lowx2
+
+	h1 += sr3highx2
+
+addatmost15bytes:
+
+	if l == 0 {
+		goto nomorebytes
+	}
+
+	lbelow2 = l - 2
+
+	lbelow3 = l - 3
+
+	lbelow2 >>= 31
+	lbelow4 = l - 4
+
+	m00 = uint32(m[p+0])
+	lbelow3 >>= 31
+	p += lbelow2
+
+	m01 = uint32(m[p+1])
+	lbelow4 >>= 31
+	p += lbelow3
+
+	m02 = uint32(m[p+2])
+	p += lbelow4
+	m0 = 2151
+
+	m03 = uint32(m[p+3])
+	m0 <<= 51
+	m1 = 2215
+
+	m0 += int64(m00)
+	m01 &^= uint32(lbelow2)
+
+	m02 &^= uint32(lbelow3)
+	m01 -= uint32(lbelow2)
+
+	m01 <<= 8
+	m03 &^= uint32(lbelow4)
+
+	m0 += int64(m01)
+	lbelow2 -= lbelow3
+
+	m02 += uint32(lbelow2)
+	lbelow3 -= lbelow4
+
+	m02 <<= 16
+	m03 += uint32(lbelow3)
+
+	m03 <<= 24
+	m0 += int64(m02)
+
+	m0 += int64(m03)
+	lbelow5 = l - 5
+
+	lbelow6 = l - 6
+	lbelow7 = l - 7
+
+	lbelow5 >>= 31
+	lbelow8 = l - 8
+
+	lbelow6 >>= 31
+	p += lbelow5
+
+	m10 = uint32(m[p+4])
+	lbelow7 >>= 31
+	p += lbelow6
+
+	m11 = uint32(m[p+5])
+	lbelow8 >>= 31
+	p += lbelow7
+
+	m12 = uint32(m[p+6])
+	m1 <<= 51
+	p += lbelow8
+
+	m13 = uint32(m[p+7])
+	m10 &^= uint32(lbelow5)
+	lbelow4 -= lbelow5
+
+	m10 += uint32(lbelow4)
+	lbelow5 -= lbelow6
+
+	m11 &^= uint32(lbelow6)
+	m11 += uint32(lbelow5)
+
+	m11 <<= 8
+	m1 += int64(m10)
+
+	m1 += int64(m11)
+	m12 &^= uint32(lbelow7)
+
+	lbelow6 -= lbelow7
+	m13 &^= uint32(lbelow8)
+
+	m12 += uint32(lbelow6)
+	lbelow7 -= lbelow8
+
+	m12 <<= 16
+	m13 += uint32(lbelow7)
+
+	m13 <<= 24
+	m1 += int64(m12)
+
+	m1 += int64(m13)
+	m2 = 2279
+
+	lbelow9 = l - 9
+	m3 = 2343
+
+	lbelow10 = l - 10
+	lbelow11 = l - 11
+
+	lbelow9 >>= 31
+	lbelow12 = l - 12
+
+	lbelow10 >>= 31
+	p += lbelow9
+
+	m20 = uint32(m[p+8])
+	lbelow11 >>= 31
+	p += lbelow10
+
+	m21 = uint32(m[p+9])
+	lbelow12 >>= 31
+	p += lbelow11
+
+	m22 = uint32(m[p+10])
+	m2 <<= 51
+	p += lbelow12
+
+	m23 = uint32(m[p+11])
+	m20 &^= uint32(lbelow9)
+	lbelow8 -= lbelow9
+
+	m20 += uint32(lbelow8)
+	lbelow9 -= lbelow10
+
+	m21 &^= uint32(lbelow10)
+	m21 += uint32(lbelow9)
+
+	m21 <<= 8
+	m2 += int64(m20)
+
+	m2 += int64(m21)
+	m22 &^= uint32(lbelow11)
+
+	lbelow10 -= lbelow11
+	m23 &^= uint32(lbelow12)
+
+	m22 += uint32(lbelow10)
+	lbelow11 -= lbelow12
+
+	m22 <<= 16
+	m23 += uint32(lbelow11)
+
+	m23 <<= 24
+	m2 += int64(m22)
+
+	m3 <<= 51
+	lbelow13 = l - 13
+
+	lbelow13 >>= 31
+	lbelow14 = l - 14
+
+	lbelow14 >>= 31
+	p += lbelow13
+	lbelow15 = l - 15
+
+	m30 = uint32(m[p+12])
+	lbelow15 >>= 31
+	p += lbelow14
+
+	m31 = uint32(m[p+13])
+	p += lbelow15
+	m2 += int64(m23)
+
+	m32 = uint32(m[p+14])
+	m30 &^= uint32(lbelow13)
+	lbelow12 -= lbelow13
+
+	m30 += uint32(lbelow12)
+	lbelow13 -= lbelow14
+
+	m3 += int64(m30)
+	m31 &^= uint32(lbelow14)
+
+	m31 += uint32(lbelow13)
+	m32 &^= uint32(lbelow15)
+
+	m31 <<= 8
+	lbelow14 -= lbelow15
+
+	m3 += int64(m31)
+	m32 += uint32(lbelow14)
+	d0 = m0
+
+	m32 <<= 16
+	m33 = uint64(lbelow15 + 1)
+	d1 = m1
+
+	m33 <<= 24
+	m3 += int64(m32)
+	d2 = m2
+
+	m3 += int64(m33)
+	d3 = m3
+
+	z3 = math.Float64frombits(uint64(d3))
+
+	z2 = math.Float64frombits(uint64(d2))
+
+	z1 = math.Float64frombits(uint64(d1))
+
+	z0 = math.Float64frombits(uint64(d0))
+
+	z3 -= alpha96
+
+	z2 -= alpha64
+
+	z1 -= alpha32
+
+	z0 -= alpha0
+
+	h5 += z3
+
+	h3 += z2
+
+	h1 += z1
+
+	h0 += z0
+
+	y7 = h7 + alpha130
+
+	y6 = h6 + alpha130
+
+	y1 = h1 + alpha32
+
+	y0 = h0 + alpha32
+
+	y7 -= alpha130
+
+	y6 -= alpha130
+
+	y1 -= alpha32
+
+	y0 -= alpha32
+
+	y5 = h5 + alpha96
+
+	y4 = h4 + alpha96
+
+	x7 = h7 - y7
+	y7 *= scale
+
+	x6 = h6 - y6
+	y6 *= scale
+
+	x1 = h1 - y1
+
+	x0 = h0 - y0
+
+	y5 -= alpha96
+
+	y4 -= alpha96
+
+	x1 += y7
+
+	x0 += y6
+
+	x7 += y5
+
+	x6 += y4
+
+	y3 = h3 + alpha64
+
+	y2 = h2 + alpha64
+
+	x0 += x1
+
+	x6 += x7
+
+	y3 -= alpha64
+	r3low = r3low_stack
+
+	y2 -= alpha64
+	r0low = r0low_stack
+
+	x5 = h5 - y5
+	r3lowx0 = r3low * x0
+	r3high = r3high_stack
+
+	x4 = h4 - y4
+	r0lowx6 = r0low * x6
+	r0high = r0high_stack
+
+	x3 = h3 - y3
+	r3highx0 = r3high * x0
+	sr1low = sr1low_stack
+
+	x2 = h2 - y2
+	r0highx6 = r0high * x6
+	sr1high = sr1high_stack
+
+	x5 += y3
+	r0lowx0 = r0low * x0
+	r1low = r1low_stack
+
+	h6 = r3lowx0 + r0lowx6
+	sr1lowx6 = sr1low * x6
+	r1high = r1high_stack
+
+	x4 += y2
+	r0highx0 = r0high * x0
+	sr2low = sr2low_stack
+
+	h7 = r3highx0 + r0highx6
+	sr1highx6 = sr1high * x6
+	sr2high = sr2high_stack
+
+	x3 += y1
+	r1lowx0 = r1low * x0
+	r2low = r2low_stack
+
+	h0 = r0lowx0 + sr1lowx6
+	sr2lowx6 = sr2low * x6
+	r2high = r2high_stack
+
+	x2 += y0
+	r1highx0 = r1high * x0
+	sr3low = sr3low_stack
+
+	h1 = r0highx0 + sr1highx6
+	sr2highx6 = sr2high * x6
+	sr3high = sr3high_stack
+
+	x4 += x5
+	r2lowx0 = r2low * x0
+
+	h2 = r1lowx0 + sr2lowx6
+	sr3lowx6 = sr3low * x6
+
+	x2 += x3
+	r2highx0 = r2high * x0
+
+	h3 = r1highx0 + sr2highx6
+	sr3highx6 = sr3high * x6
+
+	r1highx4 = r1high * x4
+
+	h4 = r2lowx0 + sr3lowx6
+	r1lowx4 = r1low * x4
+
+	r0highx4 = r0high * x4
+
+	h5 = r2highx0 + sr3highx6
+	r0lowx4 = r0low * x4
+
+	h7 += r1highx4
+	sr3highx4 = sr3high * x4
+
+	h6 += r1lowx4
+	sr3lowx4 = sr3low * x4
+
+	h5 += r0highx4
+	sr2highx4 = sr2high * x4
+
+	h4 += r0lowx4
+	sr2lowx4 = sr2low * x4
+
+	h3 += sr3highx4
+	r0lowx2 = r0low * x2
+
+	h2 += sr3lowx4
+	r0highx2 = r0high * x2
+
+	h1 += sr2highx4
+	r1lowx2 = r1low * x2
+
+	h0 += sr2lowx4
+	r1highx2 = r1high * x2
+
+	h2 += r0lowx2
+	r2lowx2 = r2low * x2
+
+	h3 += r0highx2
+	r2highx2 = r2high * x2
+
+	h4 += r1lowx2
+	sr3lowx2 = sr3low * x2
+
+	h5 += r1highx2
+	sr3highx2 = sr3high * x2
+
+	h6 += r2lowx2
+
+	h7 += r2highx2
+
+	h0 += sr3lowx2
+
+	h1 += sr3highx2
+
+nomorebytes:
+
+	y7 = h7 + alpha130
+
+	y0 = h0 + alpha32
+
+	y1 = h1 + alpha32
+
+	y2 = h2 + alpha64
+
+	y7 -= alpha130
+
+	y3 = h3 + alpha64
+
+	y4 = h4 + alpha96
+
+	y5 = h5 + alpha96
+
+	x7 = h7 - y7
+	y7 *= scale
+
+	y0 -= alpha32
+
+	y1 -= alpha32
+
+	y2 -= alpha64
+
+	h6 += x7
+
+	y3 -= alpha64
+
+	y4 -= alpha96
+
+	y5 -= alpha96
+
+	y6 = h6 + alpha130
+
+	x0 = h0 - y0
+
+	x1 = h1 - y1
+
+	x2 = h2 - y2
+
+	y6 -= alpha130
+
+	x0 += y7
+
+	x3 = h3 - y3
+
+	x4 = h4 - y4
+
+	x5 = h5 - y5
+
+	x6 = h6 - y6
+
+	y6 *= scale
+
+	x2 += y0
+
+	x3 += y1
+
+	x4 += y2
+
+	x0 += y6
+
+	x5 += y3
+
+	x6 += y4
+
+	x2 += x3
+
+	x0 += x1
+
+	x4 += x5
+
+	x6 += y5
+
+	x2 += offset1
+	d1 = int64(math.Float64bits(x2))
+
+	x0 += offset0
+	d0 = int64(math.Float64bits(x0))
+
+	x4 += offset2
+	d2 = int64(math.Float64bits(x4))
+
+	x6 += offset3
+	d3 = int64(math.Float64bits(x6))
+
+	f0 = uint64(d0)
+
+	f1 = uint64(d1)
+	bits32 = math.MaxUint64
+
+	f2 = uint64(d2)
+	bits32 >>= 32
+
+	f3 = uint64(d3)
+	f = f0 >> 32
+
+	f0 &= bits32
+	f &= 255
+
+	f1 += f
+	g0 = f0 + 5
+
+	g = g0 >> 32
+	g0 &= bits32
+
+	f = f1 >> 32
+	f1 &= bits32
+
+	f &= 255
+	g1 = f1 + g
+
+	g = g1 >> 32
+	f2 += f
+
+	f = f2 >> 32
+	g1 &= bits32
+
+	f2 &= bits32
+	f &= 255
+
+	f3 += f
+	g2 = f2 + g
+
+	g = g2 >> 32
+	g2 &= bits32
+
+	f4 = f3 >> 32
+	f3 &= bits32
+
+	f4 &= 255
+	g3 = f3 + g
+
+	g = g3 >> 32
+	g3 &= bits32
+
+	g4 = f4 + g
+
+	g4 = g4 - 4
+	s00 = uint32(s[0])
+
+	f = uint64(int64(g4) >> 63)
+	s01 = uint32(s[1])
+
+	f0 &= f
+	g0 &^= f
+	s02 = uint32(s[2])
+
+	f1 &= f
+	f0 |= g0
+	s03 = uint32(s[3])
+
+	g1 &^= f
+	f2 &= f
+	s10 = uint32(s[4])
+
+	f3 &= f
+	g2 &^= f
+	s11 = uint32(s[5])
+
+	g3 &^= f
+	f1 |= g1
+	s12 = uint32(s[6])
+
+	f2 |= g2
+	f3 |= g3
+	s13 = uint32(s[7])
+
+	s01 <<= 8
+	f0 += uint64(s00)
+	s20 = uint32(s[8])
+
+	s02 <<= 16
+	f0 += uint64(s01)
+	s21 = uint32(s[9])
+
+	s03 <<= 24
+	f0 += uint64(s02)
+	s22 = uint32(s[10])
+
+	s11 <<= 8
+	f1 += uint64(s10)
+	s23 = uint32(s[11])
+
+	s12 <<= 16
+	f1 += uint64(s11)
+	s30 = uint32(s[12])
+
+	s13 <<= 24
+	f1 += uint64(s12)
+	s31 = uint32(s[13])
+
+	f0 += uint64(s03)
+	f1 += uint64(s13)
+	s32 = uint32(s[14])
+
+	s21 <<= 8
+	f2 += uint64(s20)
+	s33 = uint32(s[15])
+
+	s22 <<= 16
+	f2 += uint64(s21)
+
+	s23 <<= 24
+	f2 += uint64(s22)
+
+	s31 <<= 8
+	f3 += uint64(s30)
+
+	s32 <<= 16
+	f3 += uint64(s31)
+
+	s33 <<= 24
+	f3 += uint64(s32)
+
+	f2 += uint64(s23)
+	f3 += uint64(s33)
+
+	out[0] = byte(f0)
+	f0 >>= 8
+	out[1] = byte(f0)
+	f0 >>= 8
+	out[2] = byte(f0)
+	f0 >>= 8
+	out[3] = byte(f0)
+	f0 >>= 8
+	f1 += f0
+
+	out[4] = byte(f1)
+	f1 >>= 8
+	out[5] = byte(f1)
+	f1 >>= 8
+	out[6] = byte(f1)
+	f1 >>= 8
+	out[7] = byte(f1)
+	f1 >>= 8
+	f2 += f1
+
+	out[8] = byte(f2)
+	f2 >>= 8
+	out[9] = byte(f2)
+	f2 >>= 8
+	out[10] = byte(f2)
+	f2 >>= 8
+	out[11] = byte(f2)
+	f2 >>= 8
+	f3 += f2
+
+	out[12] = byte(f3)
+	f3 >>= 8
+	out[13] = byte(f3)
+	f3 >>= 8
+	out[14] = byte(f3)
+	f3 >>= 8
+	out[15] = byte(f3)
+}

+ 1 - 1
psiphon/common/tls/key_agreement.go

@@ -17,7 +17,7 @@ import (
 	"io"
 	"math/big"
 
-	"github.com/Psiphon-Inc/crypto/curve25519"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tls/crypto/curve25519"
 )
 
 var errClientKeyExchange = errors.New("tls: invalid ClientKeyExchange message")

Alguns ficheiros não foram mostrados porque muitos ficheiros mudaram neste diff