فهرست منبع

Add OSL session cache

- Enables clients that disconnect from and
  reconnect to the same server in a short
  amount of time to carry OSL progress over
  between tunnels
- When clients disconnect, temporarily retain
  OSL seed state progress in a cache
- When clients connect, check in cache for
  a seed state to resume instead of starting
  fresh
Rod Hynes 9 سال پیش
والد
کامیت
c9f3caee71
2فایلهای تغییر یافته به همراه94 افزوده شده و 1 حذف شده
  1. 34 1
      psiphon/common/osl/osl.go
  2. 60 0
      psiphon/server/tunnelServer.go

+ 34 - 1
psiphon/common/osl/osl.go

@@ -189,9 +189,9 @@ type KeySplit struct {
 // across all schemes the client qualifies for.
 // across all schemes the client qualifies for.
 type ClientSeedState struct {
 type ClientSeedState struct {
 	propagationChannelID string
 	propagationChannelID string
-	signalIssueSLOKs     chan struct{}
 	seedProgress         []*ClientSeedProgress
 	seedProgress         []*ClientSeedProgress
 	mutex                sync.Mutex
 	mutex                sync.Mutex
+	signalIssueSLOKs     chan struct{}
 	issuedSLOKs          map[string]*SLOK
 	issuedSLOKs          map[string]*SLOK
 	payloadSLOKs         []*SLOK
 	payloadSLOKs         []*SLOK
 }
 }
@@ -400,6 +400,36 @@ func (config *Config) NewClientSeedState(
 	return state
 	return state
 }
 }
 
 
+// Hibernate clears references to short-lived objects (currently,
+// signalIssueSLOKs) so that a ClientSeedState can be stored for
+// later resumption without blocking garbage collection of the
+// short-lived objects.
+//
+// The ClientSeedState will still hold references to its Config;
+// the caller is responsible for discarding hibernated seed states
+// when the config changes.
+//
+// The caller should ensure that all ClientSeedPortForwards
+// associated with this ClientSeedState are closed before
+// hibernation.
+func (state *ClientSeedState) Hibernate() {
+	state.mutex.Lock()
+	defer state.mutex.Unlock()
+
+	state.signalIssueSLOKs = nil
+}
+
+// Resume resumes a hibernated ClientSeedState by resetting the required
+// objects (currently, signalIssueSLOKs) cleared by Hibernate.
+func (state *ClientSeedState) Resume(
+	signalIssueSLOKs chan struct{}) {
+
+	state.mutex.Lock()
+	defer state.mutex.Unlock()
+
+	state.signalIssueSLOKs = signalIssueSLOKs
+}
+
 // NewClientSeedPortForwardState creates a new client port forward
 // NewClientSeedPortForwardState creates a new client port forward
 // traffic progress tracker. Port forward progress reported to the
 // traffic progress tracker. Port forward progress reported to the
 // ClientSeedPortForward is added to seed state progress for all
 // ClientSeedPortForward is added to seed state progress for all
@@ -453,6 +483,9 @@ func (state *ClientSeedState) NewClientSeedPortForward(
 }
 }
 
 
 func (state *ClientSeedState) sendIssueSLOKsSignal() {
 func (state *ClientSeedState) sendIssueSLOKsSignal() {
+	state.mutex.Lock()
+	defer state.mutex.Unlock()
+
 	if state.signalIssueSLOKs != nil {
 	if state.signalIssueSLOKs != nil {
 		select {
 		select {
 		case state.signalIssueSLOKs <- *new(struct{}):
 		case state.signalIssueSLOKs <- *new(struct{}):

+ 60 - 0
psiphon/server/tunnelServer.go

@@ -34,6 +34,7 @@ import (
 	"time"
 	"time"
 
 
 	"github.com/Psiphon-Inc/crypto/ssh"
 	"github.com/Psiphon-Inc/crypto/ssh"
+	cache "github.com/Psiphon-Inc/go-cache"
 	"github.com/Psiphon-Inc/goarista/monotime"
 	"github.com/Psiphon-Inc/goarista/monotime"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl"
@@ -47,6 +48,7 @@ const (
 	SSH_TCP_PORT_FORWARD_QUEUE_SIZE       = 1024
 	SSH_TCP_PORT_FORWARD_QUEUE_SIZE       = 1024
 	SSH_SEND_OSL_INITIAL_RETRY_DELAY      = 30 * time.Second
 	SSH_SEND_OSL_INITIAL_RETRY_DELAY      = 30 * time.Second
 	SSH_SEND_OSL_RETRY_FACTOR             = 2
 	SSH_SEND_OSL_RETRY_FACTOR             = 2
+	OSL_SESSION_CACHE_TTL                 = 5 * time.Minute
 )
 )
 
 
 // TunnelServer is the main server that accepts Psiphon client
 // TunnelServer is the main server that accepts Psiphon client
@@ -242,6 +244,8 @@ type sshServer struct {
 	stoppingClients      bool
 	stoppingClients      bool
 	acceptedClientCounts map[string]map[string]int64
 	acceptedClientCounts map[string]map[string]int64
 	clients              map[string]*sshClient
 	clients              map[string]*sshClient
+	oslSessionCacheMutex sync.Mutex
+	oslSessionCache      *cache.Cache
 }
 }
 
 
 func newSSHServer(
 func newSSHServer(
@@ -259,6 +263,19 @@ func newSSHServer(
 		return nil, common.ContextError(err)
 		return nil, common.ContextError(err)
 	}
 	}
 
 
+	// The OSL session cache temporarily retains OSL seed state
+	// progress for disconnected clients. This enables clients
+	// that disconnect and immediately reconnect to the same
+	// server to resume their OSL progress. Cached progress
+	// is referenced by session ID and is retained for
+	// OSL_SESSION_CACHE_TTL after disconnect.
+	//
+	// Note: session IDs are assumed to be unpredictable. If a
+	// rogue client could guess the session ID of another client,
+	// it could resume its OSL progress and, if the OSL config
+	// were known, infer some activity.
+	oslSessionCache := cache.New(OSL_SESSION_CACHE_TTL, 1*time.Minute)
+
 	return &sshServer{
 	return &sshServer{
 		support:              support,
 		support:              support,
 		establishTunnels:     1,
 		establishTunnels:     1,
@@ -266,6 +283,7 @@ func newSSHServer(
 		sshHostKey:           signer,
 		sshHostKey:           signer,
 		acceptedClientCounts: make(map[string]map[string]int64),
 		acceptedClientCounts: make(map[string]map[string]int64),
 		clients:              make(map[string]*sshClient),
 		clients:              make(map[string]*sshClient),
+		oslSessionCache:      oslSessionCache,
 	}, nil
 	}, nil
 }
 }
 
 
@@ -570,6 +588,13 @@ func (sshServer *sshServer) resetAllClientTrafficRules() {
 
 
 func (sshServer *sshServer) resetAllClientOSLConfigs() {
 func (sshServer *sshServer) resetAllClientOSLConfigs() {
 
 
+	// Flush cached seed state. This has the same effect
+	// and same limitations as calling setOSLConfig for
+	// currently connected clients -- all progress is lost.
+	sshServer.oslSessionCacheMutex.Lock()
+	sshServer.oslSessionCache.Flush()
+	sshServer.oslSessionCacheMutex.Unlock()
+
 	sshServer.clientsMutex.Lock()
 	sshServer.clientsMutex.Lock()
 	clients := make(map[string]*sshClient)
 	clients := make(map[string]*sshClient)
 	for sessionID, client := range sshServer.clients {
 	for sessionID, client := range sshServer.clients {
@@ -847,6 +872,22 @@ func (sshClient *sshClient) run(clientConn net.Conn) {
 
 
 	sshClient.logTunnel()
 	sshClient.logTunnel()
 
 
+	// Transfer OSL seed state -- the OSL progress -- from the closing
+	// client to the session cache so the client can resume its progress
+	// if it reconnects to this same server.
+	// Note: following setOSLConfig order of locking.
+
+	sshClient.Lock()
+	if sshClient.oslClientSeedState != nil {
+		sshClient.sshServer.oslSessionCacheMutex.Lock()
+		sshClient.oslClientSeedState.Hibernate()
+		sshClient.sshServer.oslSessionCache.Set(
+			sshClient.sessionID, sshClient.oslClientSeedState, cache.DefaultExpiration)
+		sshClient.sshServer.oslSessionCacheMutex.Unlock()
+		sshClient.oslClientSeedState = nil
+	}
+	sshClient.Unlock()
+
 	// Initiate cleanup of the GeoIP session cache. To allow for post-tunnel
 	// Initiate cleanup of the GeoIP session cache. To allow for post-tunnel
 	// final status requests, the lifetime of cached GeoIP records exceeds the
 	// final status requests, the lifetime of cached GeoIP records exceeds the
 	// lifetime of the sshClient.
 	// lifetime of the sshClient.
@@ -1417,6 +1458,25 @@ func (sshClient *sshClient) setOSLConfig() {
 		return
 		return
 	}
 	}
 
 
+	// Use a cached seed state if one is found for the client's
+	// session ID. This enables resuming progress made in a previous
+	// tunnel.
+	// Note: go-cache is already concurency safe; the additional mutex
+	// is necessary to guarantee that Get/Delete is atomic; although in
+	// practice no two concurrent clients should ever supply the same
+	// session ID.
+
+	sshClient.sshServer.oslSessionCacheMutex.Lock()
+	oslClientSeedState, found := sshClient.sshServer.oslSessionCache.Get(sshClient.sessionID)
+	if found {
+		sshClient.sshServer.oslSessionCache.Delete(sshClient.sessionID)
+		sshClient.sshServer.oslSessionCacheMutex.Unlock()
+		sshClient.oslClientSeedState = oslClientSeedState.(*osl.ClientSeedState)
+		sshClient.oslClientSeedState.Resume(sshClient.signalIssueSLOKs)
+		return
+	}
+	sshClient.sshServer.oslSessionCacheMutex.Unlock()
+
 	// Two limitations when setOSLConfig() is invoked due to an
 	// Two limitations when setOSLConfig() is invoked due to an
 	// OSL config hot reload:
 	// OSL config hot reload:
 	//
 	//