2020-11-25 14:01:53 +03:00
|
|
|
package quic
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2023-02-02 08:50:21 +03:00
|
|
|
"github.com/quic-go/quic-go"
|
|
|
|
"github.com/quic-go/quic-go/logging"
|
|
|
|
"github.com/quic-go/quic-go/qlog"
|
2020-12-04 04:36:16 +03:00
|
|
|
"github.com/xtls/xray-core/common"
|
|
|
|
"github.com/xtls/xray-core/common/net"
|
|
|
|
"github.com/xtls/xray-core/common/task"
|
|
|
|
"github.com/xtls/xray-core/transport/internet"
|
2021-12-15 03:28:47 +03:00
|
|
|
"github.com/xtls/xray-core/transport/internet/stat"
|
2020-12-04 04:36:16 +03:00
|
|
|
"github.com/xtls/xray-core/transport/internet/tls"
|
2020-11-25 14:01:53 +03:00
|
|
|
)
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
type connectionContext struct {
|
2020-11-25 14:01:53 +03:00
|
|
|
rawConn *sysConn
|
2022-04-09 07:48:02 +03:00
|
|
|
conn quic.Connection
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
var errConnectionClosed = newError("connection closed")
|
2020-11-25 14:01:53 +03:00
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
func (c *connectionContext) openStream(destAddr net.Addr) (*interConn, error) {
|
|
|
|
if !isActive(c.conn) {
|
|
|
|
return nil, errConnectionClosed
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
stream, err := c.conn.OpenStream()
|
2020-11-25 14:01:53 +03:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
conn := &interConn{
|
|
|
|
stream: stream,
|
2022-04-09 07:48:02 +03:00
|
|
|
local: c.conn.LocalAddr(),
|
2020-11-25 14:01:53 +03:00
|
|
|
remote: destAddr,
|
|
|
|
}
|
|
|
|
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
type clientConnections struct {
|
|
|
|
access sync.Mutex
|
|
|
|
conns map[net.Destination][]*connectionContext
|
|
|
|
cleanup *task.Periodic
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
func isActive(s quic.Connection) bool {
|
2020-11-25 14:01:53 +03:00
|
|
|
select {
|
|
|
|
case <-s.Context().Done():
|
|
|
|
return false
|
|
|
|
default:
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
func removeInactiveConnections(conns []*connectionContext) []*connectionContext {
|
|
|
|
activeConnections := make([]*connectionContext, 0, len(conns))
|
|
|
|
for i, s := range conns {
|
|
|
|
if isActive(s.conn) {
|
|
|
|
activeConnections = append(activeConnections, s)
|
2020-11-25 14:01:53 +03:00
|
|
|
continue
|
|
|
|
}
|
Quic related improvements (#915)
* DialSystem for Quic
DialSystem() is needed in case of Android client,
where the raw conn is protected for vpn service
* Fix client dialer log
Log such as:
tunneling request to tcp:www.google.com:80 via tcp:x.x.x.x:443
the second "tcp" is misleading when using mKcp or quic transport
Remove the second "tcp" and add the correct logging for transport dialer:
- transport/internet/tcp: dialing TCP to tcp:x.x.x.x:443
- transport/internet/quic: dialing quic to udp:x.x.x.x:443
* Quic new stream allocation mode
Currently this is how Quic works: client muxing all tcp and udp traffic through a single session, when there are more than 32 running streams in the session,
the next stream request will fail and open with a new session (port). Imagine lineup the session from left to right:
|
| |
| | |
As the streams finishes, we still open stream from the left, original session. So the base session will always be there and new sessions on the right come and go.
However, either due to QOS or bugs in Quic implementation, the traffic "wear out" the base session. It will become slower and in the end not receiving any data from server side.
I couldn't figure out a solution for this problem at the moment, as a workaround:
| |
| | |
| | |
I came up with this new stream allocation mode, that it will never open new streams in the old sessions, but only from current or new session from right.
The keeplive config is turned off from server and client side. This way old sessions will natually close and new sessions keep generating.
Note the frequency of new session is still controlled by the server side. Server can assign a large max stream limit. In this case the new allocation mode will be similar to the current mode.
2022-01-29 02:11:30 +03:00
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
newError("closing quic connection at index: ", i).WriteToLog()
|
|
|
|
if err := s.conn.CloseWithError(0, ""); err != nil {
|
|
|
|
newError("failed to close connection").Base(err).WriteToLog()
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
if err := s.rawConn.Close(); err != nil {
|
|
|
|
newError("failed to close raw connection").Base(err).WriteToLog()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
if len(activeConnections) < len(conns) {
|
|
|
|
newError("active quic connection reduced from ", len(conns), " to ", len(activeConnections)).WriteToLog()
|
|
|
|
return activeConnections
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
return conns
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
func (s *clientConnections) cleanConnections() error {
|
2020-11-25 14:01:53 +03:00
|
|
|
s.access.Lock()
|
|
|
|
defer s.access.Unlock()
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
if len(s.conns) == 0 {
|
2020-11-25 14:01:53 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
newConnMap := make(map[net.Destination][]*connectionContext)
|
2020-11-25 14:01:53 +03:00
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
for dest, conns := range s.conns {
|
|
|
|
conns = removeInactiveConnections(conns)
|
|
|
|
if len(conns) > 0 {
|
|
|
|
newConnMap[dest] = conns
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
s.conns = newConnMap
|
2020-11-25 14:01:53 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
func (s *clientConnections) openConnection(ctx context.Context, destAddr net.Addr, config *Config, tlsConfig *tls.Config, sockopt *internet.SocketConfig) (stat.Connection, error) {
|
2020-11-25 14:01:53 +03:00
|
|
|
s.access.Lock()
|
|
|
|
defer s.access.Unlock()
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
if s.conns == nil {
|
|
|
|
s.conns = make(map[net.Destination][]*connectionContext)
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
dest := net.DestinationFromAddr(destAddr)
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
var conns []*connectionContext
|
|
|
|
if s, found := s.conns[dest]; found {
|
|
|
|
conns = s
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
if len(conns) > 0 {
|
|
|
|
s := conns[len(conns)-1]
|
|
|
|
if isActive(s.conn) {
|
Quic related improvements (#915)
* DialSystem for Quic
DialSystem() is needed in case of Android client,
where the raw conn is protected for vpn service
* Fix client dialer log
Log such as:
tunneling request to tcp:www.google.com:80 via tcp:x.x.x.x:443
the second "tcp" is misleading when using mKcp or quic transport
Remove the second "tcp" and add the correct logging for transport dialer:
- transport/internet/tcp: dialing TCP to tcp:x.x.x.x:443
- transport/internet/quic: dialing quic to udp:x.x.x.x:443
* Quic new stream allocation mode
Currently this is how Quic works: client muxing all tcp and udp traffic through a single session, when there are more than 32 running streams in the session,
the next stream request will fail and open with a new session (port). Imagine lineup the session from left to right:
|
| |
| | |
As the streams finishes, we still open stream from the left, original session. So the base session will always be there and new sessions on the right come and go.
However, either due to QOS or bugs in Quic implementation, the traffic "wear out" the base session. It will become slower and in the end not receiving any data from server side.
I couldn't figure out a solution for this problem at the moment, as a workaround:
| |
| | |
| | |
I came up with this new stream allocation mode, that it will never open new streams in the old sessions, but only from current or new session from right.
The keeplive config is turned off from server and client side. This way old sessions will natually close and new sessions keep generating.
Note the frequency of new session is still controlled by the server side. Server can assign a large max stream limit. In this case the new allocation mode will be similar to the current mode.
2022-01-29 02:11:30 +03:00
|
|
|
conn, err := s.openStream(destAddr)
|
|
|
|
if err == nil {
|
|
|
|
return conn, nil
|
|
|
|
}
|
|
|
|
newError("failed to openStream: ").Base(err).WriteToLog()
|
|
|
|
} else {
|
2022-04-09 07:48:02 +03:00
|
|
|
newError("current quic connection is not active!").WriteToLog()
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
conns = removeInactiveConnections(conns)
|
Quic related improvements (#915)
* DialSystem for Quic
DialSystem() is needed in case of Android client,
where the raw conn is protected for vpn service
* Fix client dialer log
Log such as:
tunneling request to tcp:www.google.com:80 via tcp:x.x.x.x:443
the second "tcp" is misleading when using mKcp or quic transport
Remove the second "tcp" and add the correct logging for transport dialer:
- transport/internet/tcp: dialing TCP to tcp:x.x.x.x:443
- transport/internet/quic: dialing quic to udp:x.x.x.x:443
* Quic new stream allocation mode
Currently this is how Quic works: client muxing all tcp and udp traffic through a single session, when there are more than 32 running streams in the session,
the next stream request will fail and open with a new session (port). Imagine lineup the session from left to right:
|
| |
| | |
As the streams finishes, we still open stream from the left, original session. So the base session will always be there and new sessions on the right come and go.
However, either due to QOS or bugs in Quic implementation, the traffic "wear out" the base session. It will become slower and in the end not receiving any data from server side.
I couldn't figure out a solution for this problem at the moment, as a workaround:
| |
| | |
| | |
I came up with this new stream allocation mode, that it will never open new streams in the old sessions, but only from current or new session from right.
The keeplive config is turned off from server and client side. This way old sessions will natually close and new sessions keep generating.
Note the frequency of new session is still controlled by the server side. Server can assign a large max stream limit. In this case the new allocation mode will be similar to the current mode.
2022-01-29 02:11:30 +03:00
|
|
|
newError("dialing quic to ", dest).WriteToLog()
|
|
|
|
rawConn, err := internet.DialSystem(ctx, dest, sockopt)
|
2020-11-25 14:01:53 +03:00
|
|
|
if err != nil {
|
Quic related improvements (#915)
* DialSystem for Quic
DialSystem() is needed in case of Android client,
where the raw conn is protected for vpn service
* Fix client dialer log
Log such as:
tunneling request to tcp:www.google.com:80 via tcp:x.x.x.x:443
the second "tcp" is misleading when using mKcp or quic transport
Remove the second "tcp" and add the correct logging for transport dialer:
- transport/internet/tcp: dialing TCP to tcp:x.x.x.x:443
- transport/internet/quic: dialing quic to udp:x.x.x.x:443
* Quic new stream allocation mode
Currently this is how Quic works: client muxing all tcp and udp traffic through a single session, when there are more than 32 running streams in the session,
the next stream request will fail and open with a new session (port). Imagine lineup the session from left to right:
|
| |
| | |
As the streams finishes, we still open stream from the left, original session. So the base session will always be there and new sessions on the right come and go.
However, either due to QOS or bugs in Quic implementation, the traffic "wear out" the base session. It will become slower and in the end not receiving any data from server side.
I couldn't figure out a solution for this problem at the moment, as a workaround:
| |
| | |
| | |
I came up with this new stream allocation mode, that it will never open new streams in the old sessions, but only from current or new session from right.
The keeplive config is turned off from server and client side. This way old sessions will natually close and new sessions keep generating.
Note the frequency of new session is still controlled by the server side. Server can assign a large max stream limit. In this case the new allocation mode will be similar to the current mode.
2022-01-29 02:11:30 +03:00
|
|
|
return nil, newError("failed to dial to dest: ", err).AtWarning().Base(err)
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
quicConfig := &quic.Config{
|
2022-12-26 03:37:35 +03:00
|
|
|
KeepAlivePeriod: 0,
|
2022-11-23 10:13:25 +03:00
|
|
|
HandshakeIdleTimeout: time.Second * 8,
|
|
|
|
MaxIdleTimeout: time.Second * 300,
|
2023-09-30 05:56:57 +03:00
|
|
|
Tracer: func(ctx context.Context, p logging.Perspective, ci quic.ConnectionID) *logging.ConnectionTracer {
|
2023-06-18 20:42:17 +03:00
|
|
|
return qlog.NewConnectionTracer(&QlogWriter{connID: ci}, p, ci)
|
2023-06-02 03:59:07 +03:00
|
|
|
},
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
2023-11-14 02:00:04 +03:00
|
|
|
|
|
|
|
var udpConn *net.UDPConn
|
|
|
|
switch conn := rawConn.(type) {
|
|
|
|
case *net.UDPConn:
|
|
|
|
udpConn = conn
|
|
|
|
case *internet.PacketConnWrapper:
|
|
|
|
udpConn = conn.Conn.(*net.UDPConn)
|
|
|
|
default:
|
|
|
|
// TODO: Support sockopt for QUIC
|
|
|
|
rawConn.Close()
|
|
|
|
return nil, newError("QUIC with sockopt is unsupported").AtWarning()
|
Quic related improvements (#915)
* DialSystem for Quic
DialSystem() is needed in case of Android client,
where the raw conn is protected for vpn service
* Fix client dialer log
Log such as:
tunneling request to tcp:www.google.com:80 via tcp:x.x.x.x:443
the second "tcp" is misleading when using mKcp or quic transport
Remove the second "tcp" and add the correct logging for transport dialer:
- transport/internet/tcp: dialing TCP to tcp:x.x.x.x:443
- transport/internet/quic: dialing quic to udp:x.x.x.x:443
* Quic new stream allocation mode
Currently this is how Quic works: client muxing all tcp and udp traffic through a single session, when there are more than 32 running streams in the session,
the next stream request will fail and open with a new session (port). Imagine lineup the session from left to right:
|
| |
| | |
As the streams finishes, we still open stream from the left, original session. So the base session will always be there and new sessions on the right come and go.
However, either due to QOS or bugs in Quic implementation, the traffic "wear out" the base session. It will become slower and in the end not receiving any data from server side.
I couldn't figure out a solution for this problem at the moment, as a workaround:
| |
| | |
| | |
I came up with this new stream allocation mode, that it will never open new streams in the old sessions, but only from current or new session from right.
The keeplive config is turned off from server and client side. This way old sessions will natually close and new sessions keep generating.
Note the frequency of new session is still controlled by the server side. Server can assign a large max stream limit. In this case the new allocation mode will be similar to the current mode.
2022-01-29 02:11:30 +03:00
|
|
|
}
|
2023-11-14 02:00:04 +03:00
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
sysConn, err := wrapSysConn(udpConn, config)
|
2020-11-25 14:01:53 +03:00
|
|
|
if err != nil {
|
|
|
|
rawConn.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-06-02 03:59:07 +03:00
|
|
|
tr := quic.Transport{
|
|
|
|
ConnectionIDLength: 12,
|
|
|
|
Conn: sysConn,
|
|
|
|
}
|
|
|
|
conn, err := tr.Dial(context.Background(), destAddr, tlsConfig.GetTLSConfig(tls.WithDestination(dest)), quicConfig)
|
2020-11-25 14:01:53 +03:00
|
|
|
if err != nil {
|
2022-04-09 07:48:02 +03:00
|
|
|
sysConn.Close()
|
2020-11-25 14:01:53 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
context := &connectionContext{
|
|
|
|
conn: conn,
|
|
|
|
rawConn: sysConn,
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
2022-04-09 07:48:02 +03:00
|
|
|
s.conns[dest] = append(conns, context)
|
2020-11-25 14:01:53 +03:00
|
|
|
return context.openStream(destAddr)
|
|
|
|
}
|
|
|
|
|
2022-04-09 07:48:02 +03:00
|
|
|
var client clientConnections
|
2020-11-25 14:01:53 +03:00
|
|
|
|
|
|
|
func init() {
|
2022-04-09 07:48:02 +03:00
|
|
|
client.conns = make(map[net.Destination][]*connectionContext)
|
2020-11-25 14:01:53 +03:00
|
|
|
client.cleanup = &task.Periodic{
|
|
|
|
Interval: time.Minute,
|
2022-04-09 07:48:02 +03:00
|
|
|
Execute: client.cleanConnections,
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
common.Must(client.cleanup.Start())
|
|
|
|
}
|
|
|
|
|
2021-09-20 15:11:21 +03:00
|
|
|
func Dial(ctx context.Context, dest net.Destination, streamSettings *internet.MemoryStreamConfig) (stat.Connection, error) {
|
2020-11-25 14:01:53 +03:00
|
|
|
tlsConfig := tls.ConfigFromStreamSettings(streamSettings)
|
|
|
|
if tlsConfig == nil {
|
|
|
|
tlsConfig = &tls.Config{
|
|
|
|
ServerName: internalDomain,
|
|
|
|
AllowInsecure: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var destAddr *net.UDPAddr
|
|
|
|
if dest.Address.Family().IsIP() {
|
|
|
|
destAddr = &net.UDPAddr{
|
|
|
|
IP: dest.Address.IP(),
|
|
|
|
Port: int(dest.Port),
|
|
|
|
}
|
2023-12-25 05:07:56 +03:00
|
|
|
} else {
|
|
|
|
dialerIp := internet.DestIpAddress()
|
|
|
|
if dialerIp != nil {
|
|
|
|
destAddr = &net.UDPAddr{
|
|
|
|
IP: dialerIp,
|
|
|
|
Port: int(dest.Port),
|
|
|
|
}
|
|
|
|
newError("quic Dial use dialer dest addr: ", destAddr).WriteToLog()
|
|
|
|
} else {
|
|
|
|
addr, err := net.ResolveUDPAddr("udp", dest.NetAddr())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
destAddr = addr
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
config := streamSettings.ProtocolSettings.(*Config)
|
|
|
|
|
Quic related improvements (#915)
* DialSystem for Quic
DialSystem() is needed in case of Android client,
where the raw conn is protected for vpn service
* Fix client dialer log
Log such as:
tunneling request to tcp:www.google.com:80 via tcp:x.x.x.x:443
the second "tcp" is misleading when using mKcp or quic transport
Remove the second "tcp" and add the correct logging for transport dialer:
- transport/internet/tcp: dialing TCP to tcp:x.x.x.x:443
- transport/internet/quic: dialing quic to udp:x.x.x.x:443
* Quic new stream allocation mode
Currently this is how Quic works: client muxing all tcp and udp traffic through a single session, when there are more than 32 running streams in the session,
the next stream request will fail and open with a new session (port). Imagine lineup the session from left to right:
|
| |
| | |
As the streams finishes, we still open stream from the left, original session. So the base session will always be there and new sessions on the right come and go.
However, either due to QOS or bugs in Quic implementation, the traffic "wear out" the base session. It will become slower and in the end not receiving any data from server side.
I couldn't figure out a solution for this problem at the moment, as a workaround:
| |
| | |
| | |
I came up with this new stream allocation mode, that it will never open new streams in the old sessions, but only from current or new session from right.
The keeplive config is turned off from server and client side. This way old sessions will natually close and new sessions keep generating.
Note the frequency of new session is still controlled by the server side. Server can assign a large max stream limit. In this case the new allocation mode will be similar to the current mode.
2022-01-29 02:11:30 +03:00
|
|
|
return client.openConnection(ctx, destAddr, config, tlsConfig, streamSettings.SocketSettings)
|
2020-11-25 14:01:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
|
|
|
common.Must(internet.RegisterTransportDialer(protocolName, Dial))
|
|
|
|
}
|