优化架构

This commit is contained in:
huangxiaolei
2022-11-23 18:05:11 +08:00
parent 3efed3defe
commit 43403202b5
6760 changed files with 33748 additions and 554768 deletions

64
gate/kcp/autotune.go Normal file
View File

@@ -0,0 +1,64 @@
package kcp
const maxAutoTuneSamples = 258
// pulse represents a 0/1 signal with time sequence
type pulse struct {
bit bool // 0 or 1
seq uint32 // sequence of the signal
}
// autoTune object
type autoTune struct {
pulses [maxAutoTuneSamples]pulse
}
// Sample adds a signal sample to the pulse buffer
func (tune *autoTune) Sample(bit bool, seq uint32) {
tune.pulses[seq%maxAutoTuneSamples] = pulse{bit, seq}
}
// Find a period for a given signal
// returns -1 if not found
//
// --- ------
// | |
// |______________|
// Period
// Falling Edge Rising Edge
func (tune *autoTune) FindPeriod(bit bool) int {
// last pulse and initial index setup
lastPulse := tune.pulses[0]
idx := 1
// left edge
var leftEdge int
for ; idx < len(tune.pulses); idx++ {
if lastPulse.bit != bit && tune.pulses[idx].bit == bit { // edge found
if lastPulse.seq+1 == tune.pulses[idx].seq { // ensure edge continuity
leftEdge = idx
break
}
}
lastPulse = tune.pulses[idx]
}
// right edge
var rightEdge int
lastPulse = tune.pulses[leftEdge]
idx = leftEdge + 1
for ; idx < len(tune.pulses); idx++ {
if lastPulse.seq+1 == tune.pulses[idx].seq { // ensure pulses in this level monotonic
if lastPulse.bit == bit && tune.pulses[idx].bit != bit { // edge found
rightEdge = idx
break
}
} else {
return -1
}
lastPulse = tune.pulses[idx]
}
return rightEdge - leftEdge
}

47
gate/kcp/autotune_test.go Normal file
View File

@@ -0,0 +1,47 @@
package kcp
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestAutoTune(t *testing.T) {
signals := []uint32{0, 0, 0, 0, 0, 0}
tune := autoTune{}
for i := 0; i < len(signals); i++ {
if signals[i] == 0 {
tune.Sample(false, uint32(i))
} else {
tune.Sample(true, uint32(i))
}
}
assert.Equal(t, -1, tune.FindPeriod(false))
assert.Equal(t, -1, tune.FindPeriod(true))
signals = []uint32{1, 0, 1, 0, 0, 1}
tune = autoTune{}
for i := 0; i < len(signals); i++ {
if signals[i] == 0 {
tune.Sample(false, uint32(i))
} else {
tune.Sample(true, uint32(i))
}
}
assert.Equal(t, 1, tune.FindPeriod(false))
assert.Equal(t, 1, tune.FindPeriod(true))
signals = []uint32{1, 0, 0, 0, 0, 1}
tune = autoTune{}
for i := 0; i < len(signals); i++ {
if signals[i] == 0 {
tune.Sample(false, uint32(i))
} else {
tune.Sample(true, uint32(i))
}
}
assert.Equal(t, -1, tune.FindPeriod(true))
assert.Equal(t, 4, tune.FindPeriod(false))
}

12
gate/kcp/batchconn.go Normal file
View File

@@ -0,0 +1,12 @@
package kcp
import "golang.org/x/net/ipv4"
const (
batchSize = 16
)
type batchConn interface {
WriteBatch(ms []ipv4.Message, flags int) (int, error)
ReadBatch(ms []ipv4.Message, flags int) (int, error)
}

618
gate/kcp/crypt.go Normal file
View File

@@ -0,0 +1,618 @@
package kcp
import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/sha1"
"unsafe"
xor "github.com/templexxx/xorsimd"
"github.com/tjfoc/gmsm/sm4"
"golang.org/x/crypto/blowfish"
"golang.org/x/crypto/cast5"
"golang.org/x/crypto/pbkdf2"
"golang.org/x/crypto/salsa20"
"golang.org/x/crypto/tea"
"golang.org/x/crypto/twofish"
"golang.org/x/crypto/xtea"
)
var (
initialVector = []byte{167, 115, 79, 156, 18, 172, 27, 1, 164, 21, 242, 193, 252, 120, 230, 107}
saltxor = `sH3CIVoF#rWLtJo6`
)
// BlockCrypt defines encryption/decryption methods for a given byte slice.
// Notes on implementing: the data to be encrypted contains a builtin
// nonce at the first 16 bytes
type BlockCrypt interface {
// Encrypt encrypts the whole block in src into dst.
// Dst and src may point at the same memory.
Encrypt(dst, src []byte)
// Decrypt decrypts the whole block in src into dst.
// Dst and src may point at the same memory.
Decrypt(dst, src []byte)
}
type salsa20BlockCrypt struct {
key [32]byte
}
// NewSalsa20BlockCrypt https://en.wikipedia.org/wiki/Salsa20
func NewSalsa20BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(salsa20BlockCrypt)
copy(c.key[:], key)
return c, nil
}
func (c *salsa20BlockCrypt) Encrypt(dst, src []byte) {
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
copy(dst[:8], src[:8])
}
func (c *salsa20BlockCrypt) Decrypt(dst, src []byte) {
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
copy(dst[:8], src[:8])
}
type sm4BlockCrypt struct {
encbuf [sm4.BlockSize]byte // 64bit alignment enc/dec buffer
decbuf [2 * sm4.BlockSize]byte
block cipher.Block
}
// NewSM4BlockCrypt https://github.com/tjfoc/gmsm/tree/master/sm4
func NewSM4BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(sm4BlockCrypt)
block, err := sm4.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
return c, nil
}
func (c *sm4BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *sm4BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type twofishBlockCrypt struct {
encbuf [twofish.BlockSize]byte
decbuf [2 * twofish.BlockSize]byte
block cipher.Block
}
// NewTwofishBlockCrypt https://en.wikipedia.org/wiki/Twofish
func NewTwofishBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(twofishBlockCrypt)
block, err := twofish.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
return c, nil
}
func (c *twofishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *twofishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type tripleDESBlockCrypt struct {
encbuf [des.BlockSize]byte
decbuf [2 * des.BlockSize]byte
block cipher.Block
}
// NewTripleDESBlockCrypt https://en.wikipedia.org/wiki/Triple_DES
func NewTripleDESBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(tripleDESBlockCrypt)
block, err := des.NewTripleDESCipher(key)
if err != nil {
return nil, err
}
c.block = block
return c, nil
}
func (c *tripleDESBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *tripleDESBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type cast5BlockCrypt struct {
encbuf [cast5.BlockSize]byte
decbuf [2 * cast5.BlockSize]byte
block cipher.Block
}
// NewCast5BlockCrypt https://en.wikipedia.org/wiki/CAST-128
func NewCast5BlockCrypt(key []byte) (BlockCrypt, error) {
c := new(cast5BlockCrypt)
block, err := cast5.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
return c, nil
}
func (c *cast5BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *cast5BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type blowfishBlockCrypt struct {
encbuf [blowfish.BlockSize]byte
decbuf [2 * blowfish.BlockSize]byte
block cipher.Block
}
// NewBlowfishBlockCrypt https://en.wikipedia.org/wiki/Blowfish_(cipher)
func NewBlowfishBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(blowfishBlockCrypt)
block, err := blowfish.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
return c, nil
}
func (c *blowfishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *blowfishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type aesBlockCrypt struct {
encbuf [aes.BlockSize]byte
decbuf [2 * aes.BlockSize]byte
block cipher.Block
}
// NewAESBlockCrypt https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
func NewAESBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(aesBlockCrypt)
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
return c, nil
}
func (c *aesBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *aesBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type teaBlockCrypt struct {
encbuf [tea.BlockSize]byte
decbuf [2 * tea.BlockSize]byte
block cipher.Block
}
// NewTEABlockCrypt https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
func NewTEABlockCrypt(key []byte) (BlockCrypt, error) {
c := new(teaBlockCrypt)
block, err := tea.NewCipherWithRounds(key, 16)
if err != nil {
return nil, err
}
c.block = block
return c, nil
}
func (c *teaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *teaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type xteaBlockCrypt struct {
encbuf [xtea.BlockSize]byte
decbuf [2 * xtea.BlockSize]byte
block cipher.Block
}
// NewXTEABlockCrypt https://en.wikipedia.org/wiki/XTEA
func NewXTEABlockCrypt(key []byte) (BlockCrypt, error) {
c := new(xteaBlockCrypt)
block, err := xtea.NewCipher(key)
if err != nil {
return nil, err
}
c.block = block
return c, nil
}
func (c *xteaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
func (c *xteaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
type simpleXORBlockCrypt struct {
xortbl []byte
}
// NewSimpleXORBlockCrypt simple xor with key expanding
func NewSimpleXORBlockCrypt(key []byte) (BlockCrypt, error) {
c := new(simpleXORBlockCrypt)
c.xortbl = pbkdf2.Key(key, []byte(saltxor), 32, mtuLimit, sha1.New)
return c, nil
}
func (c *simpleXORBlockCrypt) Encrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
func (c *simpleXORBlockCrypt) Decrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
type noneBlockCrypt struct{}
// NewNoneBlockCrypt does nothing but copying
func NewNoneBlockCrypt(key []byte) (BlockCrypt, error) {
return new(noneBlockCrypt), nil
}
func (c *noneBlockCrypt) Encrypt(dst, src []byte) { copy(dst, src) }
func (c *noneBlockCrypt) Decrypt(dst, src []byte) { copy(dst, src) }
// packet encryption with local CFB mode
func encrypt(block cipher.Block, dst, src, buf []byte) {
switch block.BlockSize() {
case 8:
encrypt8(block, dst, src, buf)
case 16:
encrypt16(block, dst, src, buf)
default:
panic("unsupported cipher block size")
}
}
// optimized encryption for the ciphers which works in 8-bytes
func encrypt8(block cipher.Block, dst, src, buf []byte) {
tbl := buf[:8]
block.Encrypt(tbl, initialVector)
n := len(src) / 8
base := 0
repeat := n / 8
left := n % 8
ptr_tbl := (*uint64)(unsafe.Pointer(&tbl[0]))
for i := 0; i < repeat; i++ {
s := src[base:][0:64]
d := dst[base:][0:64]
// 1
*(*uint64)(unsafe.Pointer(&d[0])) = *(*uint64)(unsafe.Pointer(&s[0])) ^ *ptr_tbl
block.Encrypt(tbl, d[0:8])
// 2
*(*uint64)(unsafe.Pointer(&d[8])) = *(*uint64)(unsafe.Pointer(&s[8])) ^ *ptr_tbl
block.Encrypt(tbl, d[8:16])
// 3
*(*uint64)(unsafe.Pointer(&d[16])) = *(*uint64)(unsafe.Pointer(&s[16])) ^ *ptr_tbl
block.Encrypt(tbl, d[16:24])
// 4
*(*uint64)(unsafe.Pointer(&d[24])) = *(*uint64)(unsafe.Pointer(&s[24])) ^ *ptr_tbl
block.Encrypt(tbl, d[24:32])
// 5
*(*uint64)(unsafe.Pointer(&d[32])) = *(*uint64)(unsafe.Pointer(&s[32])) ^ *ptr_tbl
block.Encrypt(tbl, d[32:40])
// 6
*(*uint64)(unsafe.Pointer(&d[40])) = *(*uint64)(unsafe.Pointer(&s[40])) ^ *ptr_tbl
block.Encrypt(tbl, d[40:48])
// 7
*(*uint64)(unsafe.Pointer(&d[48])) = *(*uint64)(unsafe.Pointer(&s[48])) ^ *ptr_tbl
block.Encrypt(tbl, d[48:56])
// 8
*(*uint64)(unsafe.Pointer(&d[56])) = *(*uint64)(unsafe.Pointer(&s[56])) ^ *ptr_tbl
block.Encrypt(tbl, d[56:64])
base += 64
}
switch left {
case 7:
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 6:
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 5:
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 4:
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 3:
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 2:
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 1:
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *ptr_tbl
block.Encrypt(tbl, dst[base:])
base += 8
fallthrough
case 0:
xorBytes(dst[base:], src[base:], tbl)
}
}
// optimized encryption for the ciphers which works in 16-bytes
func encrypt16(block cipher.Block, dst, src, buf []byte) {
tbl := buf[:16]
block.Encrypt(tbl, initialVector)
n := len(src) / 16
base := 0
repeat := n / 8
left := n % 8
for i := 0; i < repeat; i++ {
s := src[base:][0:128]
d := dst[base:][0:128]
// 1
xor.Bytes16Align(d[0:16], s[0:16], tbl)
block.Encrypt(tbl, d[0:16])
// 2
xor.Bytes16Align(d[16:32], s[16:32], tbl)
block.Encrypt(tbl, d[16:32])
// 3
xor.Bytes16Align(d[32:48], s[32:48], tbl)
block.Encrypt(tbl, d[32:48])
// 4
xor.Bytes16Align(d[48:64], s[48:64], tbl)
block.Encrypt(tbl, d[48:64])
// 5
xor.Bytes16Align(d[64:80], s[64:80], tbl)
block.Encrypt(tbl, d[64:80])
// 6
xor.Bytes16Align(d[80:96], s[80:96], tbl)
block.Encrypt(tbl, d[80:96])
// 7
xor.Bytes16Align(d[96:112], s[96:112], tbl)
block.Encrypt(tbl, d[96:112])
// 8
xor.Bytes16Align(d[112:128], s[112:128], tbl)
block.Encrypt(tbl, d[112:128])
base += 128
}
switch left {
case 7:
xor.Bytes16Align(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 6:
xor.Bytes16Align(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 5:
xor.Bytes16Align(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 4:
xor.Bytes16Align(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 3:
xor.Bytes16Align(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 2:
xor.Bytes16Align(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 1:
xor.Bytes16Align(dst[base:], src[base:], tbl)
block.Encrypt(tbl, dst[base:])
base += 16
fallthrough
case 0:
xorBytes(dst[base:], src[base:], tbl)
}
}
// decryption
func decrypt(block cipher.Block, dst, src, buf []byte) {
switch block.BlockSize() {
case 8:
decrypt8(block, dst, src, buf)
case 16:
decrypt16(block, dst, src, buf)
default:
panic("unsupported cipher block size")
}
}
// decrypt 8 bytes block, all byte slices are supposed to be 64bit aligned
func decrypt8(block cipher.Block, dst, src, buf []byte) {
tbl := buf[0:8]
next := buf[8:16]
block.Encrypt(tbl, initialVector)
n := len(src) / 8
base := 0
repeat := n / 8
left := n % 8
ptr_tbl := (*uint64)(unsafe.Pointer(&tbl[0]))
ptr_next := (*uint64)(unsafe.Pointer(&next[0]))
for i := 0; i < repeat; i++ {
s := src[base:][0:64]
d := dst[base:][0:64]
// 1
block.Encrypt(next, s[0:8])
*(*uint64)(unsafe.Pointer(&d[0])) = *(*uint64)(unsafe.Pointer(&s[0])) ^ *ptr_tbl
// 2
block.Encrypt(tbl, s[8:16])
*(*uint64)(unsafe.Pointer(&d[8])) = *(*uint64)(unsafe.Pointer(&s[8])) ^ *ptr_next
// 3
block.Encrypt(next, s[16:24])
*(*uint64)(unsafe.Pointer(&d[16])) = *(*uint64)(unsafe.Pointer(&s[16])) ^ *ptr_tbl
// 4
block.Encrypt(tbl, s[24:32])
*(*uint64)(unsafe.Pointer(&d[24])) = *(*uint64)(unsafe.Pointer(&s[24])) ^ *ptr_next
// 5
block.Encrypt(next, s[32:40])
*(*uint64)(unsafe.Pointer(&d[32])) = *(*uint64)(unsafe.Pointer(&s[32])) ^ *ptr_tbl
// 6
block.Encrypt(tbl, s[40:48])
*(*uint64)(unsafe.Pointer(&d[40])) = *(*uint64)(unsafe.Pointer(&s[40])) ^ *ptr_next
// 7
block.Encrypt(next, s[48:56])
*(*uint64)(unsafe.Pointer(&d[48])) = *(*uint64)(unsafe.Pointer(&s[48])) ^ *ptr_tbl
// 8
block.Encrypt(tbl, s[56:64])
*(*uint64)(unsafe.Pointer(&d[56])) = *(*uint64)(unsafe.Pointer(&s[56])) ^ *ptr_next
base += 64
}
switch left {
case 7:
block.Encrypt(next, src[base:])
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
tbl, next = next, tbl
base += 8
fallthrough
case 6:
block.Encrypt(next, src[base:])
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
tbl, next = next, tbl
base += 8
fallthrough
case 5:
block.Encrypt(next, src[base:])
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
tbl, next = next, tbl
base += 8
fallthrough
case 4:
block.Encrypt(next, src[base:])
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
tbl, next = next, tbl
base += 8
fallthrough
case 3:
block.Encrypt(next, src[base:])
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
tbl, next = next, tbl
base += 8
fallthrough
case 2:
block.Encrypt(next, src[base:])
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
tbl, next = next, tbl
base += 8
fallthrough
case 1:
block.Encrypt(next, src[base:])
*(*uint64)(unsafe.Pointer(&dst[base])) = *(*uint64)(unsafe.Pointer(&src[base])) ^ *(*uint64)(unsafe.Pointer(&tbl[0]))
tbl, next = next, tbl
base += 8
fallthrough
case 0:
xorBytes(dst[base:], src[base:], tbl)
}
}
func decrypt16(block cipher.Block, dst, src, buf []byte) {
tbl := buf[0:16]
next := buf[16:32]
block.Encrypt(tbl, initialVector)
n := len(src) / 16
base := 0
repeat := n / 8
left := n % 8
for i := 0; i < repeat; i++ {
s := src[base:][0:128]
d := dst[base:][0:128]
// 1
block.Encrypt(next, s[0:16])
xor.Bytes16Align(d[0:16], s[0:16], tbl)
// 2
block.Encrypt(tbl, s[16:32])
xor.Bytes16Align(d[16:32], s[16:32], next)
// 3
block.Encrypt(next, s[32:48])
xor.Bytes16Align(d[32:48], s[32:48], tbl)
// 4
block.Encrypt(tbl, s[48:64])
xor.Bytes16Align(d[48:64], s[48:64], next)
// 5
block.Encrypt(next, s[64:80])
xor.Bytes16Align(d[64:80], s[64:80], tbl)
// 6
block.Encrypt(tbl, s[80:96])
xor.Bytes16Align(d[80:96], s[80:96], next)
// 7
block.Encrypt(next, s[96:112])
xor.Bytes16Align(d[96:112], s[96:112], tbl)
// 8
block.Encrypt(tbl, s[112:128])
xor.Bytes16Align(d[112:128], s[112:128], next)
base += 128
}
switch left {
case 7:
block.Encrypt(next, src[base:])
xor.Bytes16Align(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 6:
block.Encrypt(next, src[base:])
xor.Bytes16Align(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 5:
block.Encrypt(next, src[base:])
xor.Bytes16Align(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 4:
block.Encrypt(next, src[base:])
xor.Bytes16Align(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 3:
block.Encrypt(next, src[base:])
xor.Bytes16Align(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 2:
block.Encrypt(next, src[base:])
xor.Bytes16Align(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 1:
block.Encrypt(next, src[base:])
xor.Bytes16Align(dst[base:], src[base:], tbl)
tbl, next = next, tbl
base += 16
fallthrough
case 0:
xorBytes(dst[base:], src[base:], tbl)
}
}
// per bytes xors
func xorBytes(dst, a, b []byte) int {
n := len(a)
if len(b) < n {
n = len(b)
}
if n == 0 {
return 0
}
for i := 0; i < n; i++ {
dst[i] = a[i] ^ b[i]
}
return n
}

289
gate/kcp/crypt_test.go Normal file
View File

@@ -0,0 +1,289 @@
package kcp
import (
"bytes"
"crypto/aes"
"crypto/md5"
"crypto/rand"
"crypto/sha1"
"hash/crc32"
"io"
"testing"
)
func TestSM4(t *testing.T) {
bc, err := NewSM4BlockCrypt(pass[:16])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func TestAES(t *testing.T) {
bc, err := NewAESBlockCrypt(pass[:32])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func TestTEA(t *testing.T) {
bc, err := NewTEABlockCrypt(pass[:16])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func TestXOR(t *testing.T) {
bc, err := NewSimpleXORBlockCrypt(pass[:32])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func TestBlowfish(t *testing.T) {
bc, err := NewBlowfishBlockCrypt(pass[:32])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func TestNone(t *testing.T) {
bc, err := NewNoneBlockCrypt(pass[:32])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func TestCast5(t *testing.T) {
bc, err := NewCast5BlockCrypt(pass[:16])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func Test3DES(t *testing.T) {
bc, err := NewTripleDESBlockCrypt(pass[:24])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func TestTwofish(t *testing.T) {
bc, err := NewTwofishBlockCrypt(pass[:32])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func TestXTEA(t *testing.T) {
bc, err := NewXTEABlockCrypt(pass[:16])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func TestSalsa20(t *testing.T) {
bc, err := NewSalsa20BlockCrypt(pass[:32])
if err != nil {
t.Fatal(err)
}
cryptTest(t, bc)
}
func cryptTest(t *testing.T, bc BlockCrypt) {
data := make([]byte, mtuLimit)
io.ReadFull(rand.Reader, data)
dec := make([]byte, mtuLimit)
enc := make([]byte, mtuLimit)
bc.Encrypt(enc, data)
bc.Decrypt(dec, enc)
if !bytes.Equal(data, dec) {
t.Fail()
}
}
func BenchmarkSM4(b *testing.B) {
bc, err := NewSM4BlockCrypt(pass[:16])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkAES128(b *testing.B) {
bc, err := NewAESBlockCrypt(pass[:16])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkAES192(b *testing.B) {
bc, err := NewAESBlockCrypt(pass[:24])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkAES256(b *testing.B) {
bc, err := NewAESBlockCrypt(pass[:32])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkTEA(b *testing.B) {
bc, err := NewTEABlockCrypt(pass[:16])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkXOR(b *testing.B) {
bc, err := NewSimpleXORBlockCrypt(pass[:32])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkBlowfish(b *testing.B) {
bc, err := NewBlowfishBlockCrypt(pass[:32])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkNone(b *testing.B) {
bc, err := NewNoneBlockCrypt(pass[:32])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkCast5(b *testing.B) {
bc, err := NewCast5BlockCrypt(pass[:16])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func Benchmark3DES(b *testing.B) {
bc, err := NewTripleDESBlockCrypt(pass[:24])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkTwofish(b *testing.B) {
bc, err := NewTwofishBlockCrypt(pass[:32])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkXTEA(b *testing.B) {
bc, err := NewXTEABlockCrypt(pass[:16])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func BenchmarkSalsa20(b *testing.B) {
bc, err := NewSalsa20BlockCrypt(pass[:32])
if err != nil {
b.Fatal(err)
}
benchCrypt(b, bc)
}
func benchCrypt(b *testing.B, bc BlockCrypt) {
data := make([]byte, mtuLimit)
io.ReadFull(rand.Reader, data)
dec := make([]byte, mtuLimit)
enc := make([]byte, mtuLimit)
b.ReportAllocs()
b.SetBytes(int64(len(enc) * 2))
b.ResetTimer()
for i := 0; i < b.N; i++ {
bc.Encrypt(enc, data)
bc.Decrypt(dec, enc)
}
}
func BenchmarkCRC32(b *testing.B) {
content := make([]byte, 1024)
b.SetBytes(int64(len(content)))
for i := 0; i < b.N; i++ {
crc32.ChecksumIEEE(content)
}
}
func BenchmarkCsprngSystem(b *testing.B) {
data := make([]byte, md5.Size)
b.SetBytes(int64(len(data)))
for i := 0; i < b.N; i++ {
io.ReadFull(rand.Reader, data)
}
}
func BenchmarkCsprngMD5(b *testing.B) {
var data [md5.Size]byte
b.SetBytes(md5.Size)
for i := 0; i < b.N; i++ {
data = md5.Sum(data[:])
}
}
func BenchmarkCsprngSHA1(b *testing.B) {
var data [sha1.Size]byte
b.SetBytes(sha1.Size)
for i := 0; i < b.N; i++ {
data = sha1.Sum(data[:])
}
}
func BenchmarkCsprngNonceMD5(b *testing.B) {
var ng nonceMD5
ng.Init()
b.SetBytes(md5.Size)
data := make([]byte, md5.Size)
for i := 0; i < b.N; i++ {
ng.Fill(data)
}
}
func BenchmarkCsprngNonceAES128(b *testing.B) {
var ng nonceAES128
ng.Init()
b.SetBytes(aes.BlockSize)
data := make([]byte, aes.BlockSize)
for i := 0; i < b.N; i++ {
ng.Fill(data)
}
}

52
gate/kcp/entropy.go Normal file
View File

@@ -0,0 +1,52 @@
package kcp
import (
"crypto/aes"
"crypto/cipher"
"crypto/md5"
"crypto/rand"
"io"
)
// Entropy defines a entropy source
type Entropy interface {
Init()
Fill(nonce []byte)
}
// nonceMD5 nonce generator for packet header
type nonceMD5 struct {
seed [md5.Size]byte
}
func (n *nonceMD5) Init() { /*nothing required*/ }
func (n *nonceMD5) Fill(nonce []byte) {
if n.seed[0] == 0 { // entropy update
io.ReadFull(rand.Reader, n.seed[:])
}
n.seed = md5.Sum(n.seed[:])
copy(nonce, n.seed[:])
}
// nonceAES128 nonce generator for packet headers
type nonceAES128 struct {
seed [aes.BlockSize]byte
block cipher.Block
}
func (n *nonceAES128) Init() {
var key [16]byte //aes-128
io.ReadFull(rand.Reader, key[:])
io.ReadFull(rand.Reader, n.seed[:])
block, _ := aes.NewCipher(key[:])
n.block = block
}
func (n *nonceAES128) Fill(nonce []byte) {
if n.seed[0] == 0 { // entropy update
io.ReadFull(rand.Reader, n.seed[:])
}
n.block.Encrypt(n.seed[:], n.seed[:])
copy(nonce, n.seed[:])
}

381
gate/kcp/fec.go Normal file
View File

@@ -0,0 +1,381 @@
package kcp
import (
"encoding/binary"
"sync/atomic"
"github.com/klauspost/reedsolomon"
)
const (
fecHeaderSize = 6
fecHeaderSizePlus2 = fecHeaderSize + 2 // plus 2B data size
typeData = 0xf1
typeParity = 0xf2
fecExpire = 60000
rxFECMulti = 3 // FEC keeps rxFECMulti* (dataShard+parityShard) ordered packets in memory
)
// fecPacket is a decoded FEC packet
type fecPacket []byte
func (bts fecPacket) seqid() uint32 { return binary.LittleEndian.Uint32(bts) }
func (bts fecPacket) flag() uint16 { return binary.LittleEndian.Uint16(bts[4:]) }
func (bts fecPacket) data() []byte { return bts[6:] }
// fecElement has auxcilliary time field
type fecElement struct {
fecPacket
ts uint32
}
// fecDecoder for decoding incoming packets
type fecDecoder struct {
rxlimit int // queue size limit
dataShards int
parityShards int
shardSize int
rx []fecElement // ordered receive queue
// caches
decodeCache [][]byte
flagCache []bool
// zeros
zeros []byte
// RS decoder
codec reedsolomon.Encoder
// auto tune fec parameter
autoTune autoTune
}
func newFECDecoder(dataShards, parityShards int) *fecDecoder {
if dataShards <= 0 || parityShards <= 0 {
return nil
}
dec := new(fecDecoder)
dec.dataShards = dataShards
dec.parityShards = parityShards
dec.shardSize = dataShards + parityShards
dec.rxlimit = rxFECMulti * dec.shardSize
codec, err := reedsolomon.New(dataShards, parityShards)
if err != nil {
return nil
}
dec.codec = codec
dec.decodeCache = make([][]byte, dec.shardSize)
dec.flagCache = make([]bool, dec.shardSize)
dec.zeros = make([]byte, mtuLimit)
return dec
}
// decode a fec packet
func (dec *fecDecoder) decode(in fecPacket) (recovered [][]byte) {
// sample to auto FEC tuner
if in.flag() == typeData {
dec.autoTune.Sample(true, in.seqid())
} else {
dec.autoTune.Sample(false, in.seqid())
}
// check if FEC parameters is out of sync
var shouldTune bool
if int(in.seqid())%dec.shardSize < dec.dataShards {
if in.flag() != typeData { // expect typeData
shouldTune = true
}
} else {
if in.flag() != typeParity {
shouldTune = true
}
}
if shouldTune {
autoDS := dec.autoTune.FindPeriod(true)
autoPS := dec.autoTune.FindPeriod(false)
// edges found, we can tune parameters now
if autoDS > 0 && autoPS > 0 && autoDS < 256 && autoPS < 256 {
// and make sure it's different
if autoDS != dec.dataShards || autoPS != dec.parityShards {
dec.dataShards = autoDS
dec.parityShards = autoPS
dec.shardSize = autoDS + autoPS
dec.rxlimit = rxFECMulti * dec.shardSize
codec, err := reedsolomon.New(autoDS, autoPS)
if err != nil {
return nil
}
dec.codec = codec
dec.decodeCache = make([][]byte, dec.shardSize)
dec.flagCache = make([]bool, dec.shardSize)
//log.Println("autotune to :", dec.dataShards, dec.parityShards)
}
}
}
// insertion
n := len(dec.rx) - 1
insertIdx := 0
for i := n; i >= 0; i-- {
if in.seqid() == dec.rx[i].seqid() { // de-duplicate
return nil
} else if _itimediff(in.seqid(), dec.rx[i].seqid()) > 0 { // insertion
insertIdx = i + 1
break
}
}
// make a copy
pkt := fecPacket(xmitBuf.Get().([]byte)[:len(in)])
copy(pkt, in)
elem := fecElement{pkt, currentMs()}
// insert into ordered rx queue
if insertIdx == n+1 {
dec.rx = append(dec.rx, elem)
} else {
dec.rx = append(dec.rx, fecElement{})
copy(dec.rx[insertIdx+1:], dec.rx[insertIdx:]) // shift right
dec.rx[insertIdx] = elem
}
// shard range for current packet
shardBegin := pkt.seqid() - pkt.seqid()%uint32(dec.shardSize)
shardEnd := shardBegin + uint32(dec.shardSize) - 1
// max search range in ordered queue for current shard
searchBegin := insertIdx - int(pkt.seqid()%uint32(dec.shardSize))
if searchBegin < 0 {
searchBegin = 0
}
searchEnd := searchBegin + dec.shardSize - 1
if searchEnd >= len(dec.rx) {
searchEnd = len(dec.rx) - 1
}
// re-construct datashards
if searchEnd-searchBegin+1 >= dec.dataShards {
var numshard, numDataShard, first, maxlen int
// zero caches
shards := dec.decodeCache
shardsflag := dec.flagCache
for k := range dec.decodeCache {
shards[k] = nil
shardsflag[k] = false
}
// shard assembly
for i := searchBegin; i <= searchEnd; i++ {
seqid := dec.rx[i].seqid()
if _itimediff(seqid, shardEnd) > 0 {
break
} else if _itimediff(seqid, shardBegin) >= 0 {
shards[seqid%uint32(dec.shardSize)] = dec.rx[i].data()
shardsflag[seqid%uint32(dec.shardSize)] = true
numshard++
if dec.rx[i].flag() == typeData {
numDataShard++
}
if numshard == 1 {
first = i
}
if len(dec.rx[i].data()) > maxlen {
maxlen = len(dec.rx[i].data())
}
}
}
if numDataShard == dec.dataShards {
// case 1: no loss on data shards
dec.rx = dec.freeRange(first, numshard, dec.rx)
} else if numshard >= dec.dataShards {
// case 2: loss on data shards, but it's recoverable from parity shards
for k := range shards {
if shards[k] != nil {
dlen := len(shards[k])
shards[k] = shards[k][:maxlen]
copy(shards[k][dlen:], dec.zeros)
} else if k < dec.dataShards {
shards[k] = xmitBuf.Get().([]byte)[:0]
}
}
if err := dec.codec.ReconstructData(shards); err == nil {
for k := range shards[:dec.dataShards] {
if !shardsflag[k] {
// recovered data should be recycled
recovered = append(recovered, shards[k])
}
}
}
dec.rx = dec.freeRange(first, numshard, dec.rx)
}
}
// keep rxlimit
if len(dec.rx) > dec.rxlimit {
if dec.rx[0].flag() == typeData { // track the unrecoverable data
atomic.AddUint64(&DefaultSnmp.FECShortShards, 1)
}
dec.rx = dec.freeRange(0, 1, dec.rx)
}
// timeout policy
current := currentMs()
numExpired := 0
for k := range dec.rx {
if _itimediff(current, dec.rx[k].ts) > fecExpire {
numExpired++
continue
}
break
}
if numExpired > 0 {
dec.rx = dec.freeRange(0, numExpired, dec.rx)
}
return
}
// free a range of fecPacket
func (dec *fecDecoder) freeRange(first, n int, q []fecElement) []fecElement {
for i := first; i < first+n; i++ { // recycle buffer
xmitBuf.Put([]byte(q[i].fecPacket))
}
if first == 0 && n < cap(q)/2 {
return q[n:]
}
copy(q[first:], q[first+n:])
return q[:len(q)-n]
}
// release all segments back to xmitBuf
func (dec *fecDecoder) release() {
if n := len(dec.rx); n > 0 {
dec.rx = dec.freeRange(0, n, dec.rx)
}
}
type (
// fecEncoder for encoding outgoing packets
fecEncoder struct {
dataShards int
parityShards int
shardSize int
paws uint32 // Protect Against Wrapped Sequence numbers
next uint32 // next seqid
shardCount int // count the number of datashards collected
maxSize int // track maximum data length in datashard
headerOffset int // FEC header offset
payloadOffset int // FEC payload offset
// caches
shardCache [][]byte
encodeCache [][]byte
// zeros
zeros []byte
// RS encoder
codec reedsolomon.Encoder
}
)
func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder {
if dataShards <= 0 || parityShards <= 0 {
return nil
}
enc := new(fecEncoder)
enc.dataShards = dataShards
enc.parityShards = parityShards
enc.shardSize = dataShards + parityShards
enc.paws = 0xffffffff / uint32(enc.shardSize) * uint32(enc.shardSize)
enc.headerOffset = offset
enc.payloadOffset = enc.headerOffset + fecHeaderSize
codec, err := reedsolomon.New(dataShards, parityShards)
if err != nil {
return nil
}
enc.codec = codec
// caches
enc.encodeCache = make([][]byte, enc.shardSize)
enc.shardCache = make([][]byte, enc.shardSize)
for k := range enc.shardCache {
enc.shardCache[k] = make([]byte, mtuLimit)
}
enc.zeros = make([]byte, mtuLimit)
return enc
}
// encodes the packet, outputs parity shards if we have collected quorum datashards
// notice: the contents of 'ps' will be re-written in successive calling
func (enc *fecEncoder) encode(b []byte) (ps [][]byte) {
// The header format:
// | FEC SEQID(4B) | FEC TYPE(2B) | SIZE (2B) | PAYLOAD(SIZE-2) |
// |<-headerOffset |<-payloadOffset
enc.markData(b[enc.headerOffset:])
binary.LittleEndian.PutUint16(b[enc.payloadOffset:], uint16(len(b[enc.payloadOffset:])))
// copy data from payloadOffset to fec shard cache
sz := len(b)
enc.shardCache[enc.shardCount] = enc.shardCache[enc.shardCount][:sz]
copy(enc.shardCache[enc.shardCount][enc.payloadOffset:], b[enc.payloadOffset:])
enc.shardCount++
// track max datashard length
if sz > enc.maxSize {
enc.maxSize = sz
}
// Generation of Reed-Solomon Erasure Code
if enc.shardCount == enc.dataShards {
// fill '0' into the tail of each datashard
for i := 0; i < enc.dataShards; i++ {
shard := enc.shardCache[i]
slen := len(shard)
copy(shard[slen:enc.maxSize], enc.zeros)
}
// construct equal-sized slice with stripped header
cache := enc.encodeCache
for k := range cache {
cache[k] = enc.shardCache[k][enc.payloadOffset:enc.maxSize]
}
// encoding
if err := enc.codec.Encode(cache); err == nil {
ps = enc.shardCache[enc.dataShards:]
for k := range ps {
enc.markParity(ps[k][enc.headerOffset:])
ps[k] = ps[k][:enc.maxSize]
}
}
// counters resetting
enc.shardCount = 0
enc.maxSize = 0
}
return
}
func (enc *fecEncoder) markData(data []byte) {
binary.LittleEndian.PutUint32(data, enc.next)
binary.LittleEndian.PutUint16(data[4:], typeData)
enc.next++
}
func (enc *fecEncoder) markParity(data []byte) {
binary.LittleEndian.PutUint32(data, enc.next)
binary.LittleEndian.PutUint16(data[4:], typeParity)
// sequence wrap will only happen at parity shard
enc.next = (enc.next + 1) % enc.paws
}

43
gate/kcp/fec_test.go Normal file
View File

@@ -0,0 +1,43 @@
package kcp
import (
"encoding/binary"
"math/rand"
"testing"
)
func BenchmarkFECDecode(b *testing.B) {
const dataSize = 10
const paritySize = 3
const payLoad = 1500
decoder := newFECDecoder(dataSize, paritySize)
b.ReportAllocs()
b.SetBytes(payLoad)
for i := 0; i < b.N; i++ {
if rand.Int()%(dataSize+paritySize) == 0 { // random loss
continue
}
pkt := make([]byte, payLoad)
binary.LittleEndian.PutUint32(pkt, uint32(i))
if i%(dataSize+paritySize) >= dataSize {
binary.LittleEndian.PutUint16(pkt[4:], typeParity)
} else {
binary.LittleEndian.PutUint16(pkt[4:], typeData)
}
decoder.decode(pkt)
}
}
func BenchmarkFECEncode(b *testing.B) {
const dataSize = 10
const paritySize = 3
const payLoad = 1500
b.ReportAllocs()
b.SetBytes(payLoad)
encoder := newFECEncoder(dataSize, paritySize, 0)
for i := 0; i < b.N; i++ {
data := make([]byte, payLoad)
encoder.encode(data)
}
}

1094
gate/kcp/kcp.go Normal file

File diff suppressed because it is too large Load Diff

135
gate/kcp/kcp_test.go Normal file
View File

@@ -0,0 +1,135 @@
package kcp
import (
"io"
"net"
"sync"
"testing"
"time"
"github.com/xtaci/lossyconn"
)
const repeat = 16
func TestLossyConn1(t *testing.T) {
t.Log("testing loss rate 10%, rtt 200ms")
t.Log("testing link with nodelay parameters:1 10 2 1")
client, err := lossyconn.NewLossyConn(0.1, 100)
if err != nil {
t.Fatal(err)
}
server, err := lossyconn.NewLossyConn(0.1, 100)
if err != nil {
t.Fatal(err)
}
testlink(t, client, server, 1, 10, 2, 1)
}
func TestLossyConn2(t *testing.T) {
t.Log("testing loss rate 20%, rtt 200ms")
t.Log("testing link with nodelay parameters:1 10 2 1")
client, err := lossyconn.NewLossyConn(0.2, 100)
if err != nil {
t.Fatal(err)
}
server, err := lossyconn.NewLossyConn(0.2, 100)
if err != nil {
t.Fatal(err)
}
testlink(t, client, server, 1, 10, 2, 1)
}
func TestLossyConn3(t *testing.T) {
t.Log("testing loss rate 30%, rtt 200ms")
t.Log("testing link with nodelay parameters:1 10 2 1")
client, err := lossyconn.NewLossyConn(0.3, 100)
if err != nil {
t.Fatal(err)
}
server, err := lossyconn.NewLossyConn(0.3, 100)
if err != nil {
t.Fatal(err)
}
testlink(t, client, server, 1, 10, 2, 1)
}
func TestLossyConn4(t *testing.T) {
t.Log("testing loss rate 10%, rtt 200ms")
t.Log("testing link with nodelay parameters:1 10 2 0")
client, err := lossyconn.NewLossyConn(0.1, 100)
if err != nil {
t.Fatal(err)
}
server, err := lossyconn.NewLossyConn(0.1, 100)
if err != nil {
t.Fatal(err)
}
testlink(t, client, server, 1, 10, 2, 0)
}
func testlink(t *testing.T, client *lossyconn.LossyConn, server *lossyconn.LossyConn, nodelay, interval, resend, nc int) {
t.Log("testing with nodelay parameters:", nodelay, interval, resend, nc)
sess, _ := NewConn2(server.LocalAddr(), nil, 0, 0, client)
listener, _ := ServeConn(nil, 0, 0, server)
echoServer := func(l *Listener) {
for {
conn, err := l.AcceptKCP()
if err != nil {
return
}
go func() {
conn.SetNoDelay(nodelay, interval, resend, nc)
buf := make([]byte, 65536)
for {
n, err := conn.Read(buf)
if err != nil {
return
}
conn.Write(buf[:n])
}
}()
}
}
echoTester := func(s *UDPSession, raddr net.Addr) {
s.SetNoDelay(nodelay, interval, resend, nc)
buf := make([]byte, 64)
var rtt time.Duration
for i := 0; i < repeat; i++ {
start := time.Now()
s.Write(buf)
io.ReadFull(s, buf)
rtt += time.Since(start)
}
t.Log("client:", client)
t.Log("server:", server)
t.Log("avg rtt:", rtt/repeat)
t.Logf("total time: %v for %v round trip:", rtt, repeat)
}
go echoServer(listener)
echoTester(sess, server.LocalAddr())
}
func BenchmarkFlush(b *testing.B) {
kcp := NewKCP(1, func(buf []byte, size int) {})
kcp.snd_buf = make([]segment, 1024)
for k := range kcp.snd_buf {
kcp.snd_buf[k].xmit = 1
kcp.snd_buf[k].resendts = currentMs() + 10000
}
b.ResetTimer()
b.ReportAllocs()
var mu sync.Mutex
for i := 0; i < b.N; i++ {
mu.Lock()
kcp.flush(false)
mu.Unlock()
}
}

126
gate/kcp/readloop.go Normal file
View File

@@ -0,0 +1,126 @@
package kcp
import (
"bytes"
"encoding/binary"
"github.com/pkg/errors"
)
func (s *UDPSession) defaultReadLoop() {
buf := make([]byte, mtuLimit)
var src string
for {
if n, addr, err := s.conn.ReadFrom(buf); err == nil {
udpPayload := buf[:n]
// make sure the packet is from the same source
if src == "" { // set source address
src = addr.String()
} else if addr.String() != src {
//atomic.AddUint64(&DefaultSnmp.InErrs, 1)
//continue
s.remote = addr
src = addr.String()
}
s.packetInput(udpPayload)
} else {
s.notifyReadError(errors.WithStack(err))
return
}
}
}
func (l *Listener) defaultMonitor() {
buf := make([]byte, mtuLimit)
for {
if n, from, err := l.conn.ReadFrom(buf); err == nil {
udpPayload := buf[:n]
var convId uint64 = 0
if n == 20 {
// 原神KCP的Enet协议
// 提取convId
convId += uint64(udpPayload[4]) << 24
convId += uint64(udpPayload[5]) << 16
convId += uint64(udpPayload[6]) << 8
convId += uint64(udpPayload[7]) << 0
convId += uint64(udpPayload[8]) << 56
convId += uint64(udpPayload[9]) << 48
convId += uint64(udpPayload[10]) << 40
convId += uint64(udpPayload[11]) << 32
// 提取Enet协议头部和尾部幻数
udpPayloadEnetHead := udpPayload[:4]
udpPayloadEnetTail := udpPayload[len(udpPayload)-4:]
// 提取Enet协议类型
enetTypeData := udpPayload[12:16]
enetTypeDataBuffer := bytes.NewBuffer(enetTypeData)
var enetType uint32
_ = binary.Read(enetTypeDataBuffer, binary.BigEndian, &enetType)
equalHead := bytes.Compare(udpPayloadEnetHead, MagicEnetSynHead)
equalTail := bytes.Compare(udpPayloadEnetTail, MagicEnetSynTail)
if equalHead == 0 && equalTail == 0 {
// 客户端前置握手获取conv
l.EnetNotify <- &Enet{
Addr: from.String(),
ConvId: convId,
ConnType: ConnEnetSyn,
EnetType: enetType,
}
continue
}
equalHead = bytes.Compare(udpPayloadEnetHead, MagicEnetEstHead)
equalTail = bytes.Compare(udpPayloadEnetTail, MagicEnetEstTail)
if equalHead == 0 && equalTail == 0 {
// 连接建立
l.EnetNotify <- &Enet{
Addr: from.String(),
ConvId: convId,
ConnType: ConnEnetEst,
EnetType: enetType,
}
continue
}
equalHead = bytes.Compare(udpPayloadEnetHead, MagicEnetFinHead)
equalTail = bytes.Compare(udpPayloadEnetTail, MagicEnetFinTail)
if equalHead == 0 && equalTail == 0 {
// 连接断开
l.EnetNotify <- &Enet{
Addr: from.String(),
ConvId: convId,
ConnType: ConnEnetFin,
EnetType: enetType,
}
continue
}
} else {
// 正常KCP包
convId += uint64(udpPayload[0]) << 0
convId += uint64(udpPayload[1]) << 8
convId += uint64(udpPayload[2]) << 16
convId += uint64(udpPayload[3]) << 24
convId += uint64(udpPayload[4]) << 32
convId += uint64(udpPayload[5]) << 40
convId += uint64(udpPayload[6]) << 48
convId += uint64(udpPayload[7]) << 56
}
l.sessionLock.RLock()
conn, exist := l.sessions[convId]
l.sessionLock.RUnlock()
if exist {
if conn.remote.String() != from.String() {
conn.remote = from
// 连接地址改变
l.EnetNotify <- &Enet{
Addr: conn.remote.String(),
ConvId: convId,
ConnType: ConnEnetAddrChange,
}
}
}
l.packetInput(udpPayload, from, convId)
} else {
l.notifyReadError(errors.WithStack(err))
return
}
}
}

View File

@@ -0,0 +1,12 @@
//go:build !linux
// +build !linux
package kcp
func (s *UDPSession) readLoop() {
s.defaultReadLoop()
}
func (l *Listener) monitor() {
l.defaultMonitor()
}

199
gate/kcp/readloop_linux.go Normal file
View File

@@ -0,0 +1,199 @@
//go:build linux
// +build linux
package kcp
import (
"bytes"
"encoding/binary"
"github.com/pkg/errors"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
"net"
"os"
)
// the read loop for a client session
func (s *UDPSession) readLoop() {
// default version
if s.xconn == nil {
s.defaultReadLoop()
return
}
// x/net version
var src string
msgs := make([]ipv4.Message, batchSize)
for k := range msgs {
msgs[k].Buffers = [][]byte{make([]byte, mtuLimit)}
}
for {
if count, err := s.xconn.ReadBatch(msgs, 0); err == nil {
for i := 0; i < count; i++ {
msg := &msgs[i]
// make sure the packet is from the same source
if src == "" { // set source address if nil
src = msg.Addr.String()
} else if msg.Addr.String() != src {
//atomic.AddUint64(&DefaultSnmp.InErrs, 1)
//continue
s.remote = msg.Addr
src = msg.Addr.String()
}
udpPayload := msg.Buffers[0][:msg.N]
// source and size has validated
s.packetInput(udpPayload)
}
} else {
// compatibility issue:
// for linux kernel<=2.6.32, support for sendmmsg is not available
// an error of type os.SyscallError will be returned
if operr, ok := err.(*net.OpError); ok {
if se, ok := operr.Err.(*os.SyscallError); ok {
if se.Syscall == "recvmmsg" {
s.defaultReadLoop()
return
}
}
}
s.notifyReadError(errors.WithStack(err))
return
}
}
}
// monitor incoming data for all connections of server
func (l *Listener) monitor() {
var xconn batchConn
if _, ok := l.conn.(*net.UDPConn); ok {
addr, err := net.ResolveUDPAddr("udp", l.conn.LocalAddr().String())
if err == nil {
if addr.IP.To4() != nil {
xconn = ipv4.NewPacketConn(l.conn)
} else {
xconn = ipv6.NewPacketConn(l.conn)
}
}
}
// default version
if xconn == nil {
l.defaultMonitor()
return
}
// x/net version
msgs := make([]ipv4.Message, batchSize)
for k := range msgs {
msgs[k].Buffers = [][]byte{make([]byte, mtuLimit)}
}
for {
if count, err := xconn.ReadBatch(msgs, 0); err == nil {
for i := 0; i < count; i++ {
msg := &msgs[i]
udpPayload := msg.Buffers[0][:msg.N]
var convId uint64 = 0
if msg.N == 20 {
// 原神KCP的Enet协议
// 提取convId
convId += uint64(udpPayload[4]) << 24
convId += uint64(udpPayload[5]) << 16
convId += uint64(udpPayload[6]) << 8
convId += uint64(udpPayload[7]) << 0
convId += uint64(udpPayload[8]) << 56
convId += uint64(udpPayload[9]) << 48
convId += uint64(udpPayload[10]) << 40
convId += uint64(udpPayload[11]) << 32
// 提取Enet协议头部和尾部幻数
udpPayloadEnetHead := udpPayload[:4]
udpPayloadEnetTail := udpPayload[len(udpPayload)-4:]
// 提取Enet协议类型
enetTypeData := udpPayload[12:16]
enetTypeDataBuffer := bytes.NewBuffer(enetTypeData)
var enetType uint32
_ = binary.Read(enetTypeDataBuffer, binary.BigEndian, &enetType)
equalHead := bytes.Compare(udpPayloadEnetHead, MagicEnetSynHead)
equalTail := bytes.Compare(udpPayloadEnetTail, MagicEnetSynTail)
if equalHead == 0 && equalTail == 0 {
// 客户端前置握手获取conv
l.EnetNotify <- &Enet{
Addr: msg.Addr.String(),
ConvId: convId,
ConnType: ConnEnetSyn,
EnetType: enetType,
}
continue
}
equalHead = bytes.Compare(udpPayloadEnetHead, MagicEnetEstHead)
equalTail = bytes.Compare(udpPayloadEnetTail, MagicEnetEstTail)
if equalHead == 0 && equalTail == 0 {
// 连接建立
l.EnetNotify <- &Enet{
Addr: msg.Addr.String(),
ConvId: convId,
ConnType: ConnEnetEst,
EnetType: enetType,
}
continue
}
equalHead = bytes.Compare(udpPayloadEnetHead, MagicEnetFinHead)
equalTail = bytes.Compare(udpPayloadEnetTail, MagicEnetFinTail)
if equalHead == 0 && equalTail == 0 {
// 连接断开
l.EnetNotify <- &Enet{
Addr: msg.Addr.String(),
ConvId: convId,
ConnType: ConnEnetFin,
EnetType: enetType,
}
continue
}
} else {
// 正常KCP包
convId += uint64(udpPayload[0]) << 0
convId += uint64(udpPayload[1]) << 8
convId += uint64(udpPayload[2]) << 16
convId += uint64(udpPayload[3]) << 24
convId += uint64(udpPayload[4]) << 32
convId += uint64(udpPayload[5]) << 40
convId += uint64(udpPayload[6]) << 48
convId += uint64(udpPayload[7]) << 56
}
l.sessionLock.RLock()
conn, exist := l.sessions[convId]
l.sessionLock.RUnlock()
if exist {
if conn.remote.String() != msg.Addr.String() {
conn.remote = msg.Addr
// 连接地址改变
l.EnetNotify <- &Enet{
Addr: conn.remote.String(),
ConvId: convId,
ConnType: ConnEnetAddrChange,
}
}
}
l.packetInput(udpPayload, msg.Addr, convId)
}
} else {
// compatibility issue:
// for linux kernel<=2.6.32, support for sendmmsg is not available
// an error of type os.SyscallError will be returned
if operr, ok := err.(*net.OpError); ok {
if se, ok := operr.Err.(*os.SyscallError); ok {
if se.Syscall == "recvmmsg" {
l.defaultMonitor()
return
}
}
}
l.notifyReadError(errors.WithStack(err))
return
}
}
}

1144
gate/kcp/sess.go Normal file

File diff suppressed because it is too large Load Diff

703
gate/kcp/sess_test.go Normal file
View File

@@ -0,0 +1,703 @@
package kcp
import (
"crypto/sha1"
"fmt"
"io"
"log"
"net"
"net/http"
_ "net/http/pprof"
"sync"
"sync/atomic"
"testing"
"time"
"golang.org/x/crypto/pbkdf2"
)
var baseport = uint32(10000)
var key = []byte("testkey")
var pass = pbkdf2.Key(key, []byte("testsalt"), 4096, 32, sha1.New)
func init() {
go func() {
log.Println(http.ListenAndServe("0.0.0.0:6060", nil))
}()
log.Println("beginning tests, encryption:salsa20, fec:10/3")
}
func dialEcho(port int) (*UDPSession, error) {
//block, _ := NewNoneBlockCrypt(pass)
//block, _ := NewSimpleXORBlockCrypt(pass)
//block, _ := NewTEABlockCrypt(pass[:16])
//block, _ := NewAESBlockCrypt(pass)
block, _ := NewSalsa20BlockCrypt(pass)
sess, err := DialWithOptions(fmt.Sprintf("127.0.0.1:%v", port), block, 10, 3)
if err != nil {
panic(err)
}
sess.SetStreamMode(true)
sess.SetStreamMode(false)
sess.SetStreamMode(true)
sess.SetWindowSize(1024, 1024)
sess.SetReadBuffer(16 * 1024 * 1024)
sess.SetWriteBuffer(16 * 1024 * 1024)
sess.SetStreamMode(true)
sess.SetNoDelay(1, 10, 2, 1)
sess.SetMtu(1400)
sess.SetMtu(1600)
sess.SetMtu(1400)
sess.SetACKNoDelay(true)
sess.SetACKNoDelay(false)
sess.SetDeadline(time.Now().Add(time.Minute))
return sess, err
}
func dialSink(port int) (*UDPSession, error) {
sess, err := DialWithOptions(fmt.Sprintf("127.0.0.1:%v", port), nil, 0, 0)
if err != nil {
panic(err)
}
sess.SetStreamMode(true)
sess.SetWindowSize(1024, 1024)
sess.SetReadBuffer(16 * 1024 * 1024)
sess.SetWriteBuffer(16 * 1024 * 1024)
sess.SetStreamMode(true)
sess.SetNoDelay(1, 10, 2, 1)
sess.SetMtu(1400)
sess.SetACKNoDelay(false)
sess.SetDeadline(time.Now().Add(time.Minute))
return sess, err
}
func dialTinyBufferEcho(port int) (*UDPSession, error) {
//block, _ := NewNoneBlockCrypt(pass)
//block, _ := NewSimpleXORBlockCrypt(pass)
//block, _ := NewTEABlockCrypt(pass[:16])
//block, _ := NewAESBlockCrypt(pass)
block, _ := NewSalsa20BlockCrypt(pass)
sess, err := DialWithOptions(fmt.Sprintf("127.0.0.1:%v", port), block, 10, 3)
if err != nil {
panic(err)
}
return sess, err
}
// ////////////////////////
func listenEcho(port int) (net.Listener, error) {
//block, _ := NewNoneBlockCrypt(pass)
//block, _ := NewSimpleXORBlockCrypt(pass)
//block, _ := NewTEABlockCrypt(pass[:16])
//block, _ := NewAESBlockCrypt(pass)
block, _ := NewSalsa20BlockCrypt(pass)
return ListenWithOptions(fmt.Sprintf("127.0.0.1:%v", port), block, 10, 0)
}
func listenTinyBufferEcho(port int) (net.Listener, error) {
//block, _ := NewNoneBlockCrypt(pass)
//block, _ := NewSimpleXORBlockCrypt(pass)
//block, _ := NewTEABlockCrypt(pass[:16])
//block, _ := NewAESBlockCrypt(pass)
block, _ := NewSalsa20BlockCrypt(pass)
return ListenWithOptions(fmt.Sprintf("127.0.0.1:%v", port), block, 10, 3)
}
func listenSink(port int) (net.Listener, error) {
return ListenWithOptions(fmt.Sprintf("127.0.0.1:%v", port), nil, 0, 0)
}
func echoServer(port int) net.Listener {
l, err := listenEcho(port)
if err != nil {
panic(err)
}
go func() {
kcplistener := l.(*Listener)
kcplistener.SetReadBuffer(4 * 1024 * 1024)
kcplistener.SetWriteBuffer(4 * 1024 * 1024)
kcplistener.SetDSCP(46)
for {
s, err := l.Accept()
if err != nil {
return
}
// coverage test
s.(*UDPSession).SetReadBuffer(4 * 1024 * 1024)
s.(*UDPSession).SetWriteBuffer(4 * 1024 * 1024)
go handleEcho(s.(*UDPSession))
}
}()
return l
}
func sinkServer(port int) net.Listener {
l, err := listenSink(port)
if err != nil {
panic(err)
}
go func() {
kcplistener := l.(*Listener)
kcplistener.SetReadBuffer(4 * 1024 * 1024)
kcplistener.SetWriteBuffer(4 * 1024 * 1024)
kcplistener.SetDSCP(46)
for {
s, err := l.Accept()
if err != nil {
return
}
go handleSink(s.(*UDPSession))
}
}()
return l
}
func tinyBufferEchoServer(port int) net.Listener {
l, err := listenTinyBufferEcho(port)
if err != nil {
panic(err)
}
go func() {
for {
s, err := l.Accept()
if err != nil {
return
}
go handleTinyBufferEcho(s.(*UDPSession))
}
}()
return l
}
///////////////////////////
func handleEcho(conn *UDPSession) {
conn.SetStreamMode(true)
conn.SetWindowSize(4096, 4096)
conn.SetNoDelay(1, 10, 2, 1)
conn.SetDSCP(46)
conn.SetMtu(1400)
conn.SetACKNoDelay(false)
conn.SetReadDeadline(time.Now().Add(time.Hour))
conn.SetWriteDeadline(time.Now().Add(time.Hour))
buf := make([]byte, 65536)
for {
n, err := conn.Read(buf)
if err != nil {
return
}
conn.Write(buf[:n])
}
}
func handleSink(conn *UDPSession) {
conn.SetStreamMode(true)
conn.SetWindowSize(4096, 4096)
conn.SetNoDelay(1, 10, 2, 1)
conn.SetDSCP(46)
conn.SetMtu(1400)
conn.SetACKNoDelay(false)
conn.SetReadDeadline(time.Now().Add(time.Hour))
conn.SetWriteDeadline(time.Now().Add(time.Hour))
buf := make([]byte, 65536)
for {
_, err := conn.Read(buf)
if err != nil {
return
}
}
}
func handleTinyBufferEcho(conn *UDPSession) {
conn.SetStreamMode(true)
buf := make([]byte, 2)
for {
n, err := conn.Read(buf)
if err != nil {
return
}
conn.Write(buf[:n])
}
}
///////////////////////////
func TestTimeout(t *testing.T) {
port := int(atomic.AddUint32(&baseport, 1))
l := echoServer(port)
defer l.Close()
cli, err := dialEcho(port)
if err != nil {
panic(err)
}
buf := make([]byte, 10)
//timeout
cli.SetDeadline(time.Now().Add(time.Second))
<-time.After(2 * time.Second)
n, err := cli.Read(buf)
if n != 0 || err == nil {
t.Fail()
}
cli.Close()
}
func TestSendRecv(t *testing.T) {
port := int(atomic.AddUint32(&baseport, 1))
l := echoServer(port)
defer l.Close()
cli, err := dialEcho(port)
if err != nil {
panic(err)
}
cli.SetWriteDelay(true)
cli.SetDUP(1)
const N = 100
buf := make([]byte, 10)
for i := 0; i < N; i++ {
msg := fmt.Sprintf("hello%v", i)
cli.Write([]byte(msg))
if n, err := cli.Read(buf); err == nil {
if string(buf[:n]) != msg {
t.Fail()
}
} else {
panic(err)
}
}
cli.Close()
}
func TestSendVector(t *testing.T) {
port := int(atomic.AddUint32(&baseport, 1))
l := echoServer(port)
defer l.Close()
cli, err := dialEcho(port)
if err != nil {
panic(err)
}
cli.SetWriteDelay(false)
const N = 100
buf := make([]byte, 20)
v := make([][]byte, 2)
for i := 0; i < N; i++ {
v[0] = []byte(fmt.Sprintf("hello%v", i))
v[1] = []byte(fmt.Sprintf("world%v", i))
msg := fmt.Sprintf("hello%vworld%v", i, i)
cli.WriteBuffers(v)
if n, err := cli.Read(buf); err == nil {
if string(buf[:n]) != msg {
t.Error(string(buf[:n]), msg)
}
} else {
panic(err)
}
}
cli.Close()
}
func TestTinyBufferReceiver(t *testing.T) {
port := int(atomic.AddUint32(&baseport, 1))
l := tinyBufferEchoServer(port)
defer l.Close()
cli, err := dialTinyBufferEcho(port)
if err != nil {
panic(err)
}
const N = 100
snd := byte(0)
fillBuffer := func(buf []byte) {
for i := 0; i < len(buf); i++ {
buf[i] = snd
snd++
}
}
rcv := byte(0)
check := func(buf []byte) bool {
for i := 0; i < len(buf); i++ {
if buf[i] != rcv {
return false
}
rcv++
}
return true
}
sndbuf := make([]byte, 7)
rcvbuf := make([]byte, 7)
for i := 0; i < N; i++ {
fillBuffer(sndbuf)
cli.Write(sndbuf)
if n, err := io.ReadFull(cli, rcvbuf); err == nil {
if !check(rcvbuf[:n]) {
t.Fail()
}
} else {
panic(err)
}
}
cli.Close()
}
func TestClose(t *testing.T) {
var n int
var err error
port := int(atomic.AddUint32(&baseport, 1))
l := echoServer(port)
defer l.Close()
cli, err := dialEcho(port)
if err != nil {
panic(err)
}
// double close
cli.Close()
if cli.Close() == nil {
t.Fatal("double close misbehavior")
}
// write after close
buf := make([]byte, 10)
n, err = cli.Write(buf)
if n != 0 || err == nil {
t.Fatal("write after close misbehavior")
}
// write, close, read, read
cli, err = dialEcho(port)
if err != nil {
panic(err)
}
if n, err = cli.Write(buf); err != nil {
t.Fatal("write misbehavior")
}
// wait until data arrival
time.Sleep(2 * time.Second)
// drain
cli.Close()
n, err = io.ReadFull(cli, buf)
if err != nil {
t.Fatal("closed conn drain bytes failed", err, n)
}
// after drain, read should return error
n, err = cli.Read(buf)
if n != 0 || err == nil {
t.Fatal("write->close->drain->read misbehavior", err, n)
}
cli.Close()
}
func TestParallel1024CLIENT_64BMSG_64CNT(t *testing.T) {
port := int(atomic.AddUint32(&baseport, 1))
l := echoServer(port)
defer l.Close()
var wg sync.WaitGroup
wg.Add(1024)
for i := 0; i < 1024; i++ {
go parallel_client(&wg, port)
}
wg.Wait()
}
func parallel_client(wg *sync.WaitGroup, port int) (err error) {
cli, err := dialEcho(port)
if err != nil {
panic(err)
}
err = echo_tester(cli, 64, 64)
cli.Close()
wg.Done()
return
}
func BenchmarkEchoSpeed4K(b *testing.B) {
speedclient(b, 4096)
}
func BenchmarkEchoSpeed64K(b *testing.B) {
speedclient(b, 65536)
}
func BenchmarkEchoSpeed512K(b *testing.B) {
speedclient(b, 524288)
}
func BenchmarkEchoSpeed1M(b *testing.B) {
speedclient(b, 1048576)
}
func speedclient(b *testing.B, nbytes int) {
port := int(atomic.AddUint32(&baseport, 1))
l := echoServer(port)
defer l.Close()
b.ReportAllocs()
cli, err := dialEcho(port)
if err != nil {
panic(err)
}
if err := echo_tester(cli, nbytes, b.N); err != nil {
b.Fail()
}
b.SetBytes(int64(nbytes))
cli.Close()
}
func BenchmarkSinkSpeed4K(b *testing.B) {
sinkclient(b, 4096)
}
func BenchmarkSinkSpeed64K(b *testing.B) {
sinkclient(b, 65536)
}
func BenchmarkSinkSpeed256K(b *testing.B) {
sinkclient(b, 524288)
}
func BenchmarkSinkSpeed1M(b *testing.B) {
sinkclient(b, 1048576)
}
func sinkclient(b *testing.B, nbytes int) {
port := int(atomic.AddUint32(&baseport, 1))
l := sinkServer(port)
defer l.Close()
b.ReportAllocs()
cli, err := dialSink(port)
if err != nil {
panic(err)
}
sink_tester(cli, nbytes, b.N)
b.SetBytes(int64(nbytes))
cli.Close()
}
func echo_tester(cli net.Conn, msglen, msgcount int) error {
buf := make([]byte, msglen)
for i := 0; i < msgcount; i++ {
// send packet
if _, err := cli.Write(buf); err != nil {
return err
}
// receive packet
nrecv := 0
for {
n, err := cli.Read(buf)
if err != nil {
return err
} else {
nrecv += n
if nrecv == msglen {
break
}
}
}
}
return nil
}
func sink_tester(cli *UDPSession, msglen, msgcount int) error {
// sender
buf := make([]byte, msglen)
for i := 0; i < msgcount; i++ {
if _, err := cli.Write(buf); err != nil {
return err
}
}
return nil
}
func TestSNMP(t *testing.T) {
t.Log(DefaultSnmp.Copy())
t.Log(DefaultSnmp.Header())
t.Log(DefaultSnmp.ToSlice())
DefaultSnmp.Reset()
t.Log(DefaultSnmp.ToSlice())
}
func TestListenerClose(t *testing.T) {
port := int(atomic.AddUint32(&baseport, 1))
l, err := ListenWithOptions(fmt.Sprintf("127.0.0.1:%v", port), nil, 10, 3)
if err != nil {
t.Fail()
}
l.SetReadDeadline(time.Now().Add(time.Second))
l.SetWriteDeadline(time.Now().Add(time.Second))
l.SetDeadline(time.Now().Add(time.Second))
time.Sleep(2 * time.Second)
if _, err := l.Accept(); err == nil {
t.Fail()
}
l.Close()
//fakeaddr, _ := net.ResolveUDPAddr("udp6", "127.0.0.1:1111")
fakeConvId := uint64(0)
if l.closeSession(fakeConvId) {
t.Fail()
}
}
// A wrapper for net.PacketConn that remembers when Close has been called.
type closedFlagPacketConn struct {
net.PacketConn
Closed bool
}
func (c *closedFlagPacketConn) Close() error {
c.Closed = true
return c.PacketConn.Close()
}
func newClosedFlagPacketConn(c net.PacketConn) *closedFlagPacketConn {
return &closedFlagPacketConn{c, false}
}
// Listener should close a net.PacketConn that it created.
// https://github.com/xtaci/kcp-go/issues/165
func TestListenerOwnedPacketConn(t *testing.T) {
// ListenWithOptions creates its own net.PacketConn.
l, err := ListenWithOptions("127.0.0.1:0", nil, 0, 0)
if err != nil {
panic(err)
}
defer l.Close()
// Replace the internal net.PacketConn with one that remembers when it
// has been closed.
pconn := newClosedFlagPacketConn(l.conn)
l.conn = pconn
if pconn.Closed {
t.Fatal("owned PacketConn closed before Listener.Close()")
}
err = l.Close()
if err != nil {
panic(err)
}
if !pconn.Closed {
t.Fatal("owned PacketConn not closed after Listener.Close()")
}
}
// Listener should not close a net.PacketConn that it did not create.
// https://github.com/xtaci/kcp-go/issues/165
func TestListenerNonOwnedPacketConn(t *testing.T) {
// Create a net.PacketConn not owned by the Listener.
c, err := net.ListenPacket("udp", "127.0.0.1:0")
if err != nil {
panic(err)
}
defer c.Close()
// Make it remember when it has been closed.
pconn := newClosedFlagPacketConn(c)
l, err := ServeConn(nil, 0, 0, pconn)
if err != nil {
panic(err)
}
defer l.Close()
if pconn.Closed {
t.Fatal("non-owned PacketConn closed before Listener.Close()")
}
err = l.Close()
if err != nil {
panic(err)
}
if pconn.Closed {
t.Fatal("non-owned PacketConn closed after Listener.Close()")
}
}
// UDPSession should close a net.PacketConn that it created.
// https://github.com/xtaci/kcp-go/issues/165
func TestUDPSessionOwnedPacketConn(t *testing.T) {
l := sinkServer(0)
defer l.Close()
// DialWithOptions creates its own net.PacketConn.
client, err := DialWithOptions(l.Addr().String(), nil, 0, 0)
if err != nil {
panic(err)
}
defer client.Close()
// Replace the internal net.PacketConn with one that remembers when it
// has been closed.
pconn := newClosedFlagPacketConn(client.conn)
client.conn = pconn
if pconn.Closed {
t.Fatal("owned PacketConn closed before UDPSession.Close()")
}
err = client.Close()
if err != nil {
panic(err)
}
if !pconn.Closed {
t.Fatal("owned PacketConn not closed after UDPSession.Close()")
}
}
// UDPSession should not close a net.PacketConn that it did not create.
// https://github.com/xtaci/kcp-go/issues/165
func TestUDPSessionNonOwnedPacketConn(t *testing.T) {
l := sinkServer(0)
defer l.Close()
// Create a net.PacketConn not owned by the UDPSession.
c, err := net.ListenPacket("udp", "127.0.0.1:0")
if err != nil {
panic(err)
}
defer c.Close()
// Make it remember when it has been closed.
pconn := newClosedFlagPacketConn(c)
client, err := NewConn2(l.Addr(), nil, 0, 0, pconn)
if err != nil {
panic(err)
}
defer client.Close()
if pconn.Closed {
t.Fatal("non-owned PacketConn closed before UDPSession.Close()")
}
err = client.Close()
if err != nil {
panic(err)
}
if pconn.Closed {
t.Fatal("non-owned PacketConn closed after UDPSession.Close()")
}
}

164
gate/kcp/snmp.go Normal file
View File

@@ -0,0 +1,164 @@
package kcp
import (
"fmt"
"sync/atomic"
)
// Snmp defines network statistics indicator
type Snmp struct {
BytesSent uint64 // bytes sent from upper level
BytesReceived uint64 // bytes received to upper level
MaxConn uint64 // max number of connections ever reached
ActiveOpens uint64 // accumulated active open connections
PassiveOpens uint64 // accumulated passive open connections
CurrEstab uint64 // current number of established connections
InErrs uint64 // UDP read errors reported from net.PacketConn
InCsumErrors uint64 // checksum errors from CRC32
KCPInErrors uint64 // packet iput errors reported from KCP
InPkts uint64 // incoming packets count
OutPkts uint64 // outgoing packets count
InSegs uint64 // incoming KCP segments
OutSegs uint64 // outgoing KCP segments
InBytes uint64 // UDP bytes received
OutBytes uint64 // UDP bytes sent
RetransSegs uint64 // accmulated retransmited segments
FastRetransSegs uint64 // accmulated fast retransmitted segments
EarlyRetransSegs uint64 // accmulated early retransmitted segments
LostSegs uint64 // number of segs inferred as lost
RepeatSegs uint64 // number of segs duplicated
FECRecovered uint64 // correct packets recovered from FEC
FECErrs uint64 // incorrect packets recovered from FEC
FECParityShards uint64 // FEC segments received
FECShortShards uint64 // number of data shards that's not enough for recovery
}
func newSnmp() *Snmp {
return new(Snmp)
}
// Header returns all field names
func (s *Snmp) Header() []string {
return []string{
"BytesSent",
"BytesReceived",
"MaxConn",
"ActiveOpens",
"PassiveOpens",
"CurrEstab",
"InErrs",
"InCsumErrors",
"KCPInErrors",
"InPkts",
"OutPkts",
"InSegs",
"OutSegs",
"InBytes",
"OutBytes",
"RetransSegs",
"FastRetransSegs",
"EarlyRetransSegs",
"LostSegs",
"RepeatSegs",
"FECParityShards",
"FECErrs",
"FECRecovered",
"FECShortShards",
}
}
// ToSlice returns current snmp info as slice
func (s *Snmp) ToSlice() []string {
snmp := s.Copy()
return []string{
fmt.Sprint(snmp.BytesSent),
fmt.Sprint(snmp.BytesReceived),
fmt.Sprint(snmp.MaxConn),
fmt.Sprint(snmp.ActiveOpens),
fmt.Sprint(snmp.PassiveOpens),
fmt.Sprint(snmp.CurrEstab),
fmt.Sprint(snmp.InErrs),
fmt.Sprint(snmp.InCsumErrors),
fmt.Sprint(snmp.KCPInErrors),
fmt.Sprint(snmp.InPkts),
fmt.Sprint(snmp.OutPkts),
fmt.Sprint(snmp.InSegs),
fmt.Sprint(snmp.OutSegs),
fmt.Sprint(snmp.InBytes),
fmt.Sprint(snmp.OutBytes),
fmt.Sprint(snmp.RetransSegs),
fmt.Sprint(snmp.FastRetransSegs),
fmt.Sprint(snmp.EarlyRetransSegs),
fmt.Sprint(snmp.LostSegs),
fmt.Sprint(snmp.RepeatSegs),
fmt.Sprint(snmp.FECParityShards),
fmt.Sprint(snmp.FECErrs),
fmt.Sprint(snmp.FECRecovered),
fmt.Sprint(snmp.FECShortShards),
}
}
// Copy make a copy of current snmp snapshot
func (s *Snmp) Copy() *Snmp {
d := newSnmp()
d.BytesSent = atomic.LoadUint64(&s.BytesSent)
d.BytesReceived = atomic.LoadUint64(&s.BytesReceived)
d.MaxConn = atomic.LoadUint64(&s.MaxConn)
d.ActiveOpens = atomic.LoadUint64(&s.ActiveOpens)
d.PassiveOpens = atomic.LoadUint64(&s.PassiveOpens)
d.CurrEstab = atomic.LoadUint64(&s.CurrEstab)
d.InErrs = atomic.LoadUint64(&s.InErrs)
d.InCsumErrors = atomic.LoadUint64(&s.InCsumErrors)
d.KCPInErrors = atomic.LoadUint64(&s.KCPInErrors)
d.InPkts = atomic.LoadUint64(&s.InPkts)
d.OutPkts = atomic.LoadUint64(&s.OutPkts)
d.InSegs = atomic.LoadUint64(&s.InSegs)
d.OutSegs = atomic.LoadUint64(&s.OutSegs)
d.InBytes = atomic.LoadUint64(&s.InBytes)
d.OutBytes = atomic.LoadUint64(&s.OutBytes)
d.RetransSegs = atomic.LoadUint64(&s.RetransSegs)
d.FastRetransSegs = atomic.LoadUint64(&s.FastRetransSegs)
d.EarlyRetransSegs = atomic.LoadUint64(&s.EarlyRetransSegs)
d.LostSegs = atomic.LoadUint64(&s.LostSegs)
d.RepeatSegs = atomic.LoadUint64(&s.RepeatSegs)
d.FECParityShards = atomic.LoadUint64(&s.FECParityShards)
d.FECErrs = atomic.LoadUint64(&s.FECErrs)
d.FECRecovered = atomic.LoadUint64(&s.FECRecovered)
d.FECShortShards = atomic.LoadUint64(&s.FECShortShards)
return d
}
// Reset values to zero
func (s *Snmp) Reset() {
atomic.StoreUint64(&s.BytesSent, 0)
atomic.StoreUint64(&s.BytesReceived, 0)
atomic.StoreUint64(&s.MaxConn, 0)
atomic.StoreUint64(&s.ActiveOpens, 0)
atomic.StoreUint64(&s.PassiveOpens, 0)
atomic.StoreUint64(&s.CurrEstab, 0)
atomic.StoreUint64(&s.InErrs, 0)
atomic.StoreUint64(&s.InCsumErrors, 0)
atomic.StoreUint64(&s.KCPInErrors, 0)
atomic.StoreUint64(&s.InPkts, 0)
atomic.StoreUint64(&s.OutPkts, 0)
atomic.StoreUint64(&s.InSegs, 0)
atomic.StoreUint64(&s.OutSegs, 0)
atomic.StoreUint64(&s.InBytes, 0)
atomic.StoreUint64(&s.OutBytes, 0)
atomic.StoreUint64(&s.RetransSegs, 0)
atomic.StoreUint64(&s.FastRetransSegs, 0)
atomic.StoreUint64(&s.EarlyRetransSegs, 0)
atomic.StoreUint64(&s.LostSegs, 0)
atomic.StoreUint64(&s.RepeatSegs, 0)
atomic.StoreUint64(&s.FECParityShards, 0)
atomic.StoreUint64(&s.FECErrs, 0)
atomic.StoreUint64(&s.FECRecovered, 0)
atomic.StoreUint64(&s.FECShortShards, 0)
}
// DefaultSnmp is the global KCP connection statistics collector
var DefaultSnmp *Snmp
func init() {
DefaultSnmp = newSnmp()
}

146
gate/kcp/timedsched.go Normal file
View File

@@ -0,0 +1,146 @@
package kcp
import (
"container/heap"
"runtime"
"sync"
"time"
)
// SystemTimedSched is the library level timed-scheduler
var SystemTimedSched = NewTimedSched(runtime.NumCPU())
type timedFunc struct {
execute func()
ts time.Time
}
// a heap for sorted timed function
type timedFuncHeap []timedFunc
func (h timedFuncHeap) Len() int { return len(h) }
func (h timedFuncHeap) Less(i, j int) bool { return h[i].ts.Before(h[j].ts) }
func (h timedFuncHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
func (h *timedFuncHeap) Push(x interface{}) { *h = append(*h, x.(timedFunc)) }
func (h *timedFuncHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
old[n-1].execute = nil // avoid memory leak
*h = old[0 : n-1]
return x
}
// TimedSched represents the control struct for timed parallel scheduler
type TimedSched struct {
// prepending tasks
prependTasks []timedFunc
prependLock sync.Mutex
chPrependNotify chan struct{}
// tasks will be distributed through chTask
chTask chan timedFunc
dieOnce sync.Once
die chan struct{}
}
// NewTimedSched creates a parallel-scheduler with given parallelization
func NewTimedSched(parallel int) *TimedSched {
ts := new(TimedSched)
ts.chTask = make(chan timedFunc)
ts.die = make(chan struct{})
ts.chPrependNotify = make(chan struct{}, 1)
for i := 0; i < parallel; i++ {
go ts.sched()
}
go ts.prepend()
return ts
}
func (ts *TimedSched) sched() {
var tasks timedFuncHeap
timer := time.NewTimer(0)
drained := false
for {
select {
case task := <-ts.chTask:
now := time.Now()
if now.After(task.ts) {
// already delayed! execute immediately
task.execute()
} else {
heap.Push(&tasks, task)
// properly reset timer to trigger based on the top element
stopped := timer.Stop()
if !stopped && !drained {
<-timer.C
}
timer.Reset(tasks[0].ts.Sub(now))
drained = false
}
case now := <-timer.C:
drained = true
for tasks.Len() > 0 {
if now.After(tasks[0].ts) {
heap.Pop(&tasks).(timedFunc).execute()
} else {
timer.Reset(tasks[0].ts.Sub(now))
drained = false
break
}
}
case <-ts.die:
return
}
}
}
func (ts *TimedSched) prepend() {
var tasks []timedFunc
for {
select {
case <-ts.chPrependNotify:
ts.prependLock.Lock()
// keep cap to reuse slice
if cap(tasks) < cap(ts.prependTasks) {
tasks = make([]timedFunc, 0, cap(ts.prependTasks))
}
tasks = tasks[:len(ts.prependTasks)]
copy(tasks, ts.prependTasks)
for k := range ts.prependTasks {
ts.prependTasks[k].execute = nil // avoid memory leak
}
ts.prependTasks = ts.prependTasks[:0]
ts.prependLock.Unlock()
for k := range tasks {
select {
case ts.chTask <- tasks[k]:
tasks[k].execute = nil // avoid memory leak
case <-ts.die:
return
}
}
tasks = tasks[:0]
case <-ts.die:
return
}
}
}
// Put a function 'f' awaiting to be executed at 'deadline'
func (ts *TimedSched) Put(f func(), deadline time.Time) {
ts.prependLock.Lock()
ts.prependTasks = append(ts.prependTasks, timedFunc{f, deadline})
ts.prependLock.Unlock()
select {
case ts.chPrependNotify <- struct{}{}:
default:
}
}
// Close terminates this scheduler
func (ts *TimedSched) Close() { ts.dieOnce.Do(func() { close(ts.die) }) }

80
gate/kcp/tx.go Normal file
View File

@@ -0,0 +1,80 @@
package kcp
import (
"net"
"sync/atomic"
"github.com/pkg/errors"
"golang.org/x/net/ipv4"
)
func buildEnet(connType uint8, enetType uint32, conv uint64) []byte {
data := make([]byte, 20)
if connType == ConnEnetSyn {
copy(data[0:4], MagicEnetSynHead)
copy(data[16:20], MagicEnetSynTail)
} else if connType == ConnEnetEst {
copy(data[0:4], MagicEnetEstHead)
copy(data[16:20], MagicEnetEstTail)
} else if connType == ConnEnetFin {
copy(data[0:4], MagicEnetFinHead)
copy(data[16:20], MagicEnetFinTail)
} else {
return nil
}
// conv的高四个字节和低四个字节分开
// 例如 00 00 01 45 | LL LL LL LL | HH HH HH HH | 49 96 02 d2 | 14 51 45 45
data[4] = uint8(conv >> 24)
data[5] = uint8(conv >> 16)
data[6] = uint8(conv >> 8)
data[7] = uint8(conv >> 0)
data[8] = uint8(conv >> 56)
data[9] = uint8(conv >> 48)
data[10] = uint8(conv >> 40)
data[11] = uint8(conv >> 32)
// Enet
data[12] = uint8(enetType >> 24)
data[13] = uint8(enetType >> 16)
data[14] = uint8(enetType >> 8)
data[15] = uint8(enetType >> 0)
return data
}
func (l *Listener) defaultSendEnetNotifyToClient(enet *Enet) {
remoteAddr, err := net.ResolveUDPAddr("udp", enet.Addr)
if err != nil {
return
}
data := buildEnet(enet.ConnType, enet.EnetType, enet.ConvId)
if data == nil {
return
}
_, _ = l.conn.WriteTo(data, remoteAddr)
}
func (s *UDPSession) defaultSendEnetNotify(enet *Enet) {
data := buildEnet(enet.ConnType, enet.EnetType, s.GetConv())
if data == nil {
return
}
s.defaultTx([]ipv4.Message{{
Buffers: [][]byte{data},
Addr: s.remote,
}})
}
func (s *UDPSession) defaultTx(txqueue []ipv4.Message) {
nbytes := 0
npkts := 0
for k := range txqueue {
if n, err := s.conn.WriteTo(txqueue[k].Buffers[0], txqueue[k].Addr); err == nil {
nbytes += n
npkts++
} else {
s.notifyWriteError(errors.WithStack(err))
break
}
}
atomic.AddUint64(&DefaultSnmp.OutPkts, uint64(npkts))
atomic.AddUint64(&DefaultSnmp.OutBytes, uint64(nbytes))
}

20
gate/kcp/tx_generic.go Normal file
View File

@@ -0,0 +1,20 @@
//go:build !linux
// +build !linux
package kcp
import (
"golang.org/x/net/ipv4"
)
func (l *Listener) SendEnetNotifyToClient(enet *Enet) {
l.defaultSendEnetNotifyToClient(enet)
}
func (s *UDPSession) SendEnetNotify(enet *Enet) {
s.defaultSendEnetNotify(enet)
}
func (s *UDPSession) tx(txqueue []ipv4.Message) {
s.defaultTx(txqueue)
}

102
gate/kcp/tx_linux.go Normal file
View File

@@ -0,0 +1,102 @@
//go:build linux
// +build linux
package kcp
import (
"golang.org/x/net/ipv6"
"net"
"os"
"sync/atomic"
"github.com/pkg/errors"
"golang.org/x/net/ipv4"
)
func (l *Listener) SendEnetNotifyToClient(enet *Enet) {
var xconn batchConn
_, ok := l.conn.(*net.UDPConn)
if !ok {
return
}
localAddr, err := net.ResolveUDPAddr("udp", l.conn.LocalAddr().String())
if err != nil {
return
}
if localAddr.IP.To4() != nil {
xconn = ipv4.NewPacketConn(l.conn)
} else {
xconn = ipv6.NewPacketConn(l.conn)
}
// default version
if xconn == nil {
l.defaultSendEnetNotifyToClient(enet)
return
}
remoteAddr, err := net.ResolveUDPAddr("udp", enet.Addr)
if err != nil {
return
}
data := buildEnet(enet.ConnType, enet.EnetType, enet.ConvId)
if data == nil {
return
}
_, _ = xconn.WriteBatch([]ipv4.Message{{
Buffers: [][]byte{data},
Addr: remoteAddr,
}}, 0)
}
func (s *UDPSession) SendEnetNotify(enet *Enet) {
data := buildEnet(enet.ConnType, enet.EnetType, s.GetConv())
if data == nil {
return
}
s.tx([]ipv4.Message{{
Buffers: [][]byte{data},
Addr: s.remote,
}})
}
func (s *UDPSession) tx(txqueue []ipv4.Message) {
// default version
if s.xconn == nil || s.xconnWriteError != nil {
s.defaultTx(txqueue)
return
}
// x/net version
nbytes := 0
npkts := 0
for len(txqueue) > 0 {
if n, err := s.xconn.WriteBatch(txqueue, 0); err == nil {
for k := range txqueue[:n] {
nbytes += len(txqueue[k].Buffers[0])
}
npkts += n
txqueue = txqueue[n:]
} else {
// compatibility issue:
// for linux kernel<=2.6.32, support for sendmmsg is not available
// an error of type os.SyscallError will be returned
if operr, ok := err.(*net.OpError); ok {
if se, ok := operr.Err.(*os.SyscallError); ok {
if se.Syscall == "sendmmsg" {
s.xconnWriteError = se
s.defaultTx(txqueue)
return
}
}
}
s.notifyWriteError(errors.WithStack(err))
break
}
}
atomic.AddUint64(&DefaultSnmp.OutPkts, uint64(npkts))
atomic.AddUint64(&DefaultSnmp.OutBytes, uint64(nbytes))
}