Browse Source

Fix: packet tunnel stops relaying while tunnel connected

- Client will create a new channel within the current
  tunnel is necessary.

- Document that server will close idle packet tunnel
  port forwards.
Rod Hynes 8 years ago
parent
commit
345a46cea3
3 changed files with 22 additions and 6 deletions
  1. 7 0
      psiphon/common/tun/tun.go
  2. 2 2
      psiphon/controller.go
  3. 13 4
      psiphon/packetTunnelTransport.go

+ 7 - 0
psiphon/common/tun/tun.go

@@ -529,6 +529,13 @@ func (server *Server) runSessionReaper() {
 	// sessions. This action, removing the index from server.indexToSession,
 	// releases the IP addresses assigned  to the session.
 
+	// TODO: As-is, this will discard sessions for live SSH tunnels,
+	// as long as the SSH channel for such a session has been idle for
+	// a sufficient period. Should the session be retained as long as
+	// the SSH tunnel is alive (e.g., expose and call session.touch()
+	// on keepalive events)? Or is it better to free up resources held
+	// by idle sessions?
+
 	idleExpiry := server.sessionIdleExpiry()
 
 	ticker := time.NewTicker(idleExpiry / 2)

+ 2 - 2
psiphon/controller.go

@@ -692,7 +692,7 @@ loop:
 
 			// Set the new tunnel as the transport for the packet tunnel. The packet tunnel
 			// client remains up when reestablishing, but no packets are relayed while there
-			// is no connected tunnel. UseNewTunnel will establish a new packet tunnel SSH
+			// is no connected tunnel. UseTunnel will establish a new packet tunnel SSH
 			// channel over the new SSH tunnel and configure the packet tunnel client to use
 			// the new SSH channel as its transport.
 			//
@@ -702,7 +702,7 @@ loop:
 			// the packet tunnel is used.
 
 			if controller.packetTunnelTransport != nil {
-				controller.packetTunnelTransport.UseNewTunnel(establishedTunnel)
+				controller.packetTunnelTransport.UseTunnel(establishedTunnel)
 			}
 
 			// TODO: design issue -- might not be enough server entries with region/caps to ever fill tunnel slots;

+ 13 - 4
psiphon/packetTunnelTransport.go

@@ -180,10 +180,10 @@ func (p *PacketTunnelTransport) Close() error {
 	return nil
 }
 
-// UseNewTunnel sets the PacketTunnelTransport to use a new transport channel within
-// the specified tunnel. UseNewTunnel does not block on the open channel call; it spawns
+// UseTunnel sets the PacketTunnelTransport to use a new transport channel within
+// the specified tunnel. UseTunnel does not block on the open channel call; it spawns
 // a worker that calls tunnel.DialPacketTunnelChannel and uses the resulting channel.
-func (p *PacketTunnelTransport) UseNewTunnel(tunnel *Tunnel) {
+func (p *PacketTunnelTransport) UseTunnel(tunnel *Tunnel) {
 
 	p.workers.Add(1)
 	go func(tunnel *Tunnel) {
@@ -215,7 +215,7 @@ func (p *PacketTunnelTransport) setChannel(
 	p.channelMutex.Lock()
 
 	// Concurrency note: this check is within the mutex to ensure that a
-	// UseNewTunnel call concurrent with a Close call doesn't leave a channel
+	// UseTunnel call concurrent with a Close call doesn't leave a channel
 	// set.
 	select {
 	case <-p.runContext.Done():
@@ -290,6 +290,15 @@ func (p *PacketTunnelTransport) failedChannel(
 		p.channelTunnel = nil
 	}
 	p.channelMutex.Unlock()
+
+	// Try to establish a new channel within the current tunnel. If this
+	// fails, a port forward failure probe will be triggered which will
+	// ultimately trigger a SSH keep alive probe.
+	//
+	// One case where this is necessary is when the server closes an idle
+	// packet tunnel port forward for a live SSH tunnel.
+
+	p.UseTunnel(channelTunnel)
 }
 
 func (p *PacketTunnelTransport) monitor() {