Chore: wait for system stack to close

This commit is contained in:
gVisor bot 2022-04-22 05:37:44 +08:00
parent 751f3e8abd
commit ba507fc5e0
2 changed files with 29 additions and 25 deletions

View file

@ -27,10 +27,8 @@ func StartListener(device io.ReadWriteCloser, gateway, portal, broadcast netip.A
} }
func (t *StackListener) Close() error { func (t *StackListener) Close() error {
_ = t.tcp.Close()
_ = t.udp.Close() _ = t.udp.Close()
return t.tcp.Close()
return t.device.Close()
} }
func (t *StackListener) TCP() *nat.TCP { func (t *StackListener) TCP() *nat.TCP {

View file

@ -7,6 +7,7 @@ import (
"net/netip" "net/netip"
"runtime" "runtime"
"strconv" "strconv"
"sync"
"time" "time"
"github.com/Dreamacro/clash/adapter/inbound" "github.com/Dreamacro/clash/adapter/inbound"
@ -28,6 +29,8 @@ type sysStack struct {
device device.Device device device.Device
closed bool closed bool
once sync.Once
wg sync.WaitGroup
} }
func (s *sysStack) Close() error { func (s *sysStack) Close() error {
@ -38,10 +41,12 @@ func (s *sysStack) Close() error {
}() }()
s.closed = true s.closed = true
if s.stack != nil {
return s.stack.Close() err := s.stack.Close()
}
return nil s.wg.Wait()
return err
} }
func New(device device.Device, dnsHijack []netip.AddrPort, tunAddress netip.Prefix, tcpIn chan<- C.ConnContext, udpIn chan<- *inbound.PacketAdapter) (ipstack.Stack, error) { func New(device device.Device, dnsHijack []netip.AddrPort, tunAddress netip.Prefix, tcpIn chan<- C.ConnContext, udpIn chan<- *inbound.PacketAdapter) (ipstack.Stack, error) {
@ -67,16 +72,10 @@ func New(device device.Device, dnsHijack []netip.AddrPort, tunAddress netip.Pref
_ = tcp.Close() _ = tcp.Close()
}(stack.TCP()) }(stack.TCP())
defer log.Debugln("TCP: closed")
for !ipStack.closed { for !ipStack.closed {
if err = stack.TCP().SetDeadline(time.Time{}); err != nil {
break
}
conn, err := stack.TCP().Accept() conn, err := stack.TCP().Accept()
if err != nil { if err != nil {
log.Debugln("Accept connection: %v", err) log.Debugln("[STACK] accept connection error: %v", err)
continue continue
} }
@ -146,6 +145,8 @@ func New(device device.Device, dnsHijack []netip.AddrPort, tunAddress netip.Pref
tcpIn <- context.NewConnContext(conn, metadata) tcpIn <- context.NewConnContext(conn, metadata)
} }
ipStack.wg.Done()
} }
udp := func() { udp := func() {
@ -153,14 +154,13 @@ func New(device device.Device, dnsHijack []netip.AddrPort, tunAddress netip.Pref
_ = udp.Close() _ = udp.Close()
}(stack.UDP()) }(stack.UDP())
defer log.Debugln("UDP: closed")
for !ipStack.closed { for !ipStack.closed {
buf := pool.Get(pool.UDPBufferSize) buf := pool.Get(pool.UDPBufferSize)
n, lRAddr, rRAddr, err := stack.UDP().ReadFrom(buf) n, lRAddr, rRAddr, err := stack.UDP().ReadFrom(buf)
if err != nil { if err != nil {
return _ = pool.Put(buf)
break
} }
raw := buf[:n] raw := buf[:n]
@ -209,17 +209,23 @@ func New(device device.Device, dnsHijack []netip.AddrPort, tunAddress netip.Pref
default: default:
} }
} }
ipStack.wg.Done()
} }
go tcp() ipStack.once.Do(func() {
ipStack.wg.Add(1)
go tcp()
numUDPWorkers := 4 numUDPWorkers := 4
if num := runtime.GOMAXPROCS(0); num > numUDPWorkers { if num := runtime.GOMAXPROCS(0); num > numUDPWorkers {
numUDPWorkers = num numUDPWorkers = num
} }
for i := 0; i < numUDPWorkers; i++ { for i := 0; i < numUDPWorkers; i++ {
go udp() ipStack.wg.Add(1)
} go udp()
}
})
return ipStack, nil return ipStack, nil
} }