Просмотр исходного кода

Download OSLs (in progress)
- Refactor remote server list code to
handle classic “common” RSL and new
OSL files. Like the common RSL, the
OSL fetch is triggered after failure
to connect (and runs in parallel).
In addition, the OSL fetch is triggered
when a new SLOK is received.
- Log remote server list download
competed events. These logs are use
the same persist-until-reported logic
as tunnel stat logs.

Rod Hynes 9 лет назад
Родитель
Сommit
efeb14c412
9 измененных файлов с 623 добавлено и 242 удалено
  1. 16 2
      psiphon/common/osl/osl.go
  2. 37 4
      psiphon/config.go
  3. 83 50
      psiphon/controller.go
  4. 154 99
      psiphon/dataStore.go
  5. 1 1
      psiphon/net.go
  6. 6 6
      psiphon/notice.go
  7. 249 46
      psiphon/remoteServerList.go
  8. 70 28
      psiphon/serverApi.go
  9. 7 6
      psiphon/tunnel.go

+ 16 - 2
psiphon/common/osl/osl.go

@@ -41,6 +41,7 @@ import (
 	"net"
 	"net/url"
 	"path"
+	"path/filepath"
 	"sync"
 	"sync/atomic"
 	"time"
@@ -910,12 +911,12 @@ func divideKeyWithSeedSpecSLOKs(
 	}, nil
 }
 
-// GetDirectoryURL returns the URL for an OSL directory. Clients
+// GetOSLDirectoryURL returns the URL for an OSL directory. Clients
 // call this when fetching the directory from out-of-band
 // distribution sites.
 // Clients are responsible for tracking whether the remote file has
 // changed or not before downloading.
-func GetDirectoryURL(baseURL string) string {
+func GetOSLDirectoryURL(baseURL string) string {
 	u, err := url.Parse(baseURL)
 	if err != nil {
 		return ""
@@ -924,6 +925,12 @@ func GetDirectoryURL(baseURL string) string {
 	return u.String()
 }
 
+// GetOSLDirectoryFilename returns an appropriate filename for
+// the resumable download destination for the OSL directory.
+func GetOSLDirectoryFilename(baseDirectory string) string {
+	return filepath.Join(baseDirectory, DIRECTORY_FILENAME)
+}
+
 // GetOSLFileURL returns the URL for an OSL file. Once the client
 // has determined, from GetSeededOSLIDs, which OSLs it has sufficiently
 // seeded, it calls this to fetch the OSLs for download and decryption.
@@ -939,6 +946,13 @@ func GetOSLFileURL(baseURL string, oslID []byte) string {
 	return u.String()
 }
 
+// GetOSLFilename returns an appropriate filename for the resumable
+// download destination for the OSL file.
+func GetOSLFilename(baseDirectory string, oslID []byte) string {
+	return filepath.Join(
+		baseDirectory, fmt.Sprintf(OSL_FILENAME_FORMAT, hex.EncodeToString(oslID)))
+}
+
 // LoadDirectory authenticates the signed directory package -- which is the
 // contents of the paved directory file. It then returns the directory data.
 // Clients call this to process downloaded directory files.

+ 37 - 4
psiphon/config.go

@@ -67,7 +67,7 @@ const (
 	PSIPHON_API_STATUS_REQUEST_PADDING_MAX_BYTES         = 256
 	PSIPHON_API_CONNECTED_REQUEST_PERIOD                 = 24 * time.Hour
 	PSIPHON_API_CONNECTED_REQUEST_RETRY_PERIOD           = 5 * time.Second
-	PSIPHON_API_TUNNEL_STATS_MAX_COUNT                   = 100
+	PSIPHON_API_PERSISTENT_STATS_MAX_COUNT               = 100
 	PSIPHON_API_CLIENT_VERIFICATION_REQUEST_RETRY_PERIOD = 5 * time.Second
 	PSIPHON_API_CLIENT_VERIFICATION_REQUEST_MAX_RETRIES  = 10
 	FETCH_ROUTES_TIMEOUT_SECONDS                         = 60
@@ -123,9 +123,7 @@ type Config struct {
 	// RemoteServerListDownloadFilename specifies a target filename for
 	// storing the remote server list download. Data is stored in co-located
 	// files (RemoteServerListDownloadFilename.part*) to allow for resumable
-	// downloading. If not specified, the default is to use the
-	// remote object name as the filename, stored in the current working
-	// directory.
+	// downloading.
 	RemoteServerListDownloadFilename string
 
 	// RemoteServerListSignaturePublicKey specifies a public key that's
@@ -134,6 +132,18 @@ type Config struct {
 	// typically embedded in the client binary.
 	RemoteServerListSignaturePublicKey string
 
+	// ObfuscatedServerListRootURL is a URL which specifies the root location
+	// from which to fetch obfuscated server list files.
+	// This value is supplied by and depends on the Psiphon Network, and is
+	// typically embedded in the client binary.
+	ObfuscatedServerListRootURL string
+
+	// ObfuscatedServerListDownloadDirectory specifies a target directory for
+	// storing the obfuscated remote server list downloads. Data is stored in
+	// co-located files (<OSL filename>.part*) to allow for resumable
+	// downloading.
+	ObfuscatedServerListDownloadDirectory string
+
 	// ClientVersion is the client version number that the client reports
 	// to the server. The version number refers to the host client application,
 	// not the core tunnel library. One purpose of this value is to enable
@@ -496,6 +506,29 @@ func LoadConfig(configJson []byte) (*Config, error) {
 			"UpgradeDownloadUrl requires UpgradeDownloadClientVersionHeader and UpgradeDownloadFilename"))
 	}
 
+	if !config.DisableRemoteServerListFetcher {
+
+		if config.RemoteServerListSignaturePublicKey == "" {
+			return nil, common.ContextError(errors.New("missing RemoteServerListSignaturePublicKey"))
+		}
+
+		if config.RemoteServerListUrl == "" {
+			return nil, common.ContextError(errors.New("missing RemoteServerListUrl"))
+		}
+
+		if config.RemoteServerListDownloadFilename == "" {
+			return nil, common.ContextError(errors.New("missing RemoteServerListDownloadFilename"))
+		}
+
+		if config.ObfuscatedServerListRootURL == "" {
+			return nil, common.ContextError(errors.New("missing ObfuscatedServerListRootURL"))
+		}
+
+		if config.ObfuscatedServerListDownloadDirectory == "" {
+			return nil, common.ContextError(errors.New("missing ObfuscatedServerListDownloadDirectory"))
+		}
+	}
+
 	if config.TunnelConnectTimeoutSeconds == nil {
 		defaultTunnelConnectTimeoutSeconds := TUNNEL_CONNECT_TIMEOUT_SECONDS
 		config.TunnelConnectTimeoutSeconds = &defaultTunnelConnectTimeoutSeconds

+ 83 - 50
psiphon/controller.go

@@ -39,32 +39,33 @@ import (
 // connect to; establishes and monitors tunnels; and runs local proxies which
 // route traffic through the tunnels.
 type Controller struct {
-	config                         *Config
-	sessionId                      string
-	componentFailureSignal         chan struct{}
-	shutdownBroadcast              chan struct{}
-	runWaitGroup                   *sync.WaitGroup
-	establishedTunnels             chan *Tunnel
-	failedTunnels                  chan *Tunnel
-	tunnelMutex                    sync.Mutex
-	establishedOnce                bool
-	tunnels                        []*Tunnel
-	nextTunnel                     int
-	startedConnectedReporter       bool
-	isEstablishing                 bool
-	establishWaitGroup             *sync.WaitGroup
-	stopEstablishingBroadcast      chan struct{}
-	candidateServerEntries         chan *candidateServerEntry
-	establishPendingConns          *common.Conns
-	untunneledPendingConns         *common.Conns
-	untunneledDialConfig           *DialConfig
-	splitTunnelClassifier          *SplitTunnelClassifier
-	signalFetchRemoteServerList    chan struct{}
-	signalDownloadUpgrade          chan string
-	impairedProtocolClassification map[string]int
-	signalReportConnected          chan struct{}
-	serverAffinityDoneBroadcast    chan struct{}
-	newClientVerificationPayload   chan string
+	config                            *Config
+	sessionId                         string
+	componentFailureSignal            chan struct{}
+	shutdownBroadcast                 chan struct{}
+	runWaitGroup                      *sync.WaitGroup
+	establishedTunnels                chan *Tunnel
+	failedTunnels                     chan *Tunnel
+	tunnelMutex                       sync.Mutex
+	establishedOnce                   bool
+	tunnels                           []*Tunnel
+	nextTunnel                        int
+	startedConnectedReporter          bool
+	isEstablishing                    bool
+	establishWaitGroup                *sync.WaitGroup
+	stopEstablishingBroadcast         chan struct{}
+	candidateServerEntries            chan *candidateServerEntry
+	establishPendingConns             *common.Conns
+	untunneledPendingConns            *common.Conns
+	untunneledDialConfig              *DialConfig
+	splitTunnelClassifier             *SplitTunnelClassifier
+	signalFetchCommonRemoteServerList chan struct{}
+	signalFetchObfuscatedServerLists  chan struct{}
+	signalDownloadUpgrade             chan string
+	impairedProtocolClassification    map[string]int
+	signalReportConnected             chan struct{}
+	serverAffinityDoneBroadcast       chan struct{}
+	newClientVerificationPayload      chan string
 }
 
 type candidateServerEntry struct {
@@ -133,9 +134,10 @@ func NewController(config *Config) (controller *Controller, err error) {
 		// TODO: Add a buffer of 1 so we don't miss a signal while receiver is
 		// starting? Trade-off is potential back-to-back fetch remotes. As-is,
 		// establish will eventually signal another fetch remote.
-		signalFetchRemoteServerList: make(chan struct{}),
-		signalDownloadUpgrade:       make(chan string),
-		signalReportConnected:       make(chan struct{}),
+		signalFetchCommonRemoteServerList: make(chan struct{}),
+		signalFetchObfuscatedServerLists:  make(chan struct{}),
+		signalDownloadUpgrade:             make(chan string),
+		signalReportConnected:             make(chan struct{}),
 		// Buffer allows SetClientVerificationPayloadForActiveTunnels to submit one
 		// new payload without blocking or dropping it.
 		newClientVerificationPayload: make(chan string, 1),
@@ -183,8 +185,25 @@ func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
 	defer httpProxy.Close()
 
 	if !controller.config.DisableRemoteServerListFetcher {
+
+		retryPeriod := time.Duration(
+			*controller.config.FetchRemoteServerListRetryPeriodSeconds) * time.Second
+
 		controller.runWaitGroup.Add(1)
-		go controller.remoteServerListFetcher()
+		go controller.remoteServerListFetcher(
+			"common",
+			FetchCommonRemoteServerList,
+			controller.signalFetchCommonRemoteServerList,
+			retryPeriod,
+			FETCH_REMOTE_SERVER_LIST_STALE_PERIOD)
+
+		controller.runWaitGroup.Add(1)
+		go controller.remoteServerListFetcher(
+			"obfuscated",
+			FetchObfuscatedServerLists,
+			controller.signalFetchObfuscatedServerLists,
+			retryPeriod,
+			FETCH_REMOTE_SERVER_LIST_STALE_PERIOD)
 	}
 
 	if controller.config.UpgradeDownloadUrl != "" &&
@@ -279,17 +298,13 @@ func (controller *Controller) SetClientVerificationPayloadForActiveTunnels(clien
 // remoteServerListFetcher fetches an out-of-band list of server entries
 // for more tunnel candidates. It fetches when signalled, with retries
 // on failure.
-func (controller *Controller) remoteServerListFetcher() {
-	defer controller.runWaitGroup.Done()
+func (controller *Controller) remoteServerListFetcher(
+	name string,
+	fetcher RemoteServerListFetcher,
+	signal <-chan struct{},
+	retryPeriod, stalePeriod time.Duration) {
 
-	if controller.config.RemoteServerListUrl == "" {
-		NoticeAlert("remote server list URL is blank")
-		return
-	}
-	if controller.config.RemoteServerListSignaturePublicKey == "" {
-		NoticeAlert("remote server list signature public key blank")
-		return
-	}
+	defer controller.runWaitGroup.Done()
 
 	var lastFetchTime monotime.Time
 
@@ -297,7 +312,7 @@ fetcherLoop:
 	for {
 		// Wait for a signal before fetching
 		select {
-		case <-controller.signalFetchRemoteServerList:
+		case <-signal:
 		case <-controller.shutdownBroadcast:
 			break fetcherLoop
 		}
@@ -305,7 +320,7 @@ fetcherLoop:
 		// Skip fetch entirely (i.e., send no request at all, even when ETag would save
 		// on response size) when a recent fetch was successful
 		if lastFetchTime != 0 &&
-			lastFetchTime.Add(FETCH_REMOTE_SERVER_LIST_STALE_PERIOD).After(monotime.Now()) {
+			lastFetchTime.Add(stalePeriod).After(monotime.Now()) {
 			continue
 		}
 
@@ -323,7 +338,7 @@ fetcherLoop:
 			// no active tunnel, the untunneledDialConfig will be used.
 			tunnel := controller.getNextActiveTunnel()
 
-			err := FetchRemoteServerList(
+			err := fetcher(
 				controller.config,
 				tunnel,
 				controller.untunneledDialConfig)
@@ -333,10 +348,9 @@ fetcherLoop:
 				break retryLoop
 			}
 
-			NoticeAlert("failed to fetch remote server list: %s", err)
+			NoticeAlert("failed to fetch %s remote server list: %s", name, err)
 
-			timeout := time.After(
-				time.Duration(*controller.config.FetchRemoteServerListRetryPeriodSeconds) * time.Second)
+			timeout := time.After(retryPeriod)
 			select {
 			case <-timeout:
 			case <-controller.shutdownBroadcast:
@@ -345,7 +359,7 @@ fetcherLoop:
 		}
 	}
 
-	NoticeInfo("exiting remote server list fetcher")
+	NoticeInfo("exiting %s remote server list fetcher", name)
 }
 
 // establishTunnelWatcher terminates the controller if a tunnel
@@ -711,6 +725,17 @@ func (controller *Controller) isImpairedProtocol(protocol string) bool {
 	return ok && count >= IMPAIRED_PROTOCOL_CLASSIFICATION_THRESHOLD
 }
 
+// SignalSeededNewSLOK implements the TunnelOwner interface. This function
+// is called by Tunnel.operateTunnel when the tunnel has received a new,
+// previously unknown SLOK from the server. The Controller triggers an OSL
+// fetch, as the new SLOK may be sufficient to access new OSLs.
+func (controller *Controller) SignalSeededNewSLOK() {
+	select {
+	case controller.signalFetchObfuscatedServerLists <- *new(struct{}):
+	default:
+	}
+}
+
 // SignalTunnelFailure implements the TunnelOwner interface. This function
 // is called by Tunnel.operateTunnel when the tunnel has detected that it
 // has failed. The Controller will signal runTunnels to create a new
@@ -1105,15 +1130,23 @@ loop:
 		// Free up resources now, but don't reset until after the pause.
 		iterator.Close()
 
-		// Trigger a fetch remote server list, since we may have failed to
-		// connect with all known servers. Don't block sending signal, since
+		// Trigger a common remote server list fetch, since we may have failed
+		// to connect with all known servers. Don't block sending signal, since
 		// this signal may have already been sent.
 		// Don't wait for fetch remote to succeed, since it may fail and
 		// enter a retry loop and we're better off trying more known servers.
 		// TODO: synchronize the fetch response, so it can be incorporated
 		// into the server entry iterator as soon as available.
 		select {
-		case controller.signalFetchRemoteServerList <- *new(struct{}):
+		case controller.signalFetchCommonRemoteServerList <- *new(struct{}):
+		default:
+		}
+
+		// Trigger an OSL fetch in parallel. Both fetches are run in parallel
+		// so that if one out of the common RLS and OSL set is large, it doesn't
+		// doesn't entirely block fetching the other.
+		select {
+		case controller.signalFetchObfuscatedServerLists <- *new(struct{}):
 		default:
 		}
 

+ 154 - 99
psiphon/dataStore.go

@@ -58,10 +58,18 @@ const (
 	urlETagsBucket              = "urlETags"
 	keyValueBucket              = "keyValues"
 	tunnelStatsBucket           = "tunnelStats"
+	remoteServerListStatsBucket = "remoteServerListStats"
 	slokBucket                  = "SLOKs"
 	rankedServerEntryCount      = 100
 )
 
+const (
+	DATA_STORE_LAST_CONNECTED_KEY           = "lastConnected"
+	DATA_STORE_OSL_DIRECTORY_KEY            = "OSLDirectory"
+	PERSISTENT_STAT_TYPE_TUNNEL             = tunnelStatsBucket
+	PERSISTENT_STAT_TYPE_REMOTE_SERVER_LIST = remoteServerListStatsBucket
+)
+
 var singleton dataStore
 
 // InitDataStore initializes the singleton instance of dataStore. This
@@ -105,6 +113,7 @@ func InitDataStore(config *Config) (err error) {
 				urlETagsBucket,
 				keyValueBucket,
 				tunnelStatsBucket,
+				remoteServerListStatsBucket,
 				slokBucket,
 			}
 			for _, bucket := range requiredBuckets {
@@ -139,7 +148,7 @@ func InitDataStore(config *Config) (err error) {
 			migrateEntries(migratableServerEntries, filepath.Join(config.DataStoreDirectory, LEGACY_DATA_STORE_FILENAME))
 		}
 
-		resetAllTunnelStatsToUnreported()
+		resetAllPersistentStatsToUnreported()
 	})
 
 	return err
@@ -806,196 +815,241 @@ func GetKeyValue(key string) (value string, err error) {
 	return value, nil
 }
 
-// Tunnel stats records in the tunnelStatsStateUnreported
+// Persistent stat records in the persistentStatStateUnreported
 // state are available for take out.
-// Records in the tunnelStatsStateReporting have been
-// taken out and are pending either deleting (for a
-// successful request) or change to StateUnreported (for
-// a failed request).
-// All tunnel stats records are reverted to StateUnreported
+//
+// Records in the persistentStatStateReporting have been taken
+// out and are pending either deletion (for a successful request)
+// or change to StateUnreported (for a failed request).
+//
+// All persistent stat records are reverted to StateUnreported
 // when the datastore is initialized at start up.
 
-var tunnelStatsStateUnreported = []byte("0")
-var tunnelStatsStateReporting = []byte("1")
+var persistentStatStateUnreported = []byte("0")
+var persistentStatStateReporting = []byte("1")
 
-// StoreTunnelStats adds a new tunnel stats record, which is
-// set to StateUnreported and is an immediate candidate for
+var persistentStatTypes = []string{
+	PERSISTENT_STAT_TYPE_REMOTE_SERVER_LIST,
+	PERSISTENT_STAT_TYPE_TUNNEL,
+}
+
+// StorePersistentStats adds a new persistent stat record, which
+// is set to StateUnreported and is an immediate candidate for
 // reporting.
-// tunnelStats is a JSON byte array containing fields as
-// required by the Psiphon server API (see RecordTunnelStats).
-// It's assumed that the JSON value contains enough unique
-// information for the value to function as a key in the
-// key/value datastore. This assumption is currently satisfied
-// by the fields sessionId + tunnelNumber.
-func StoreTunnelStats(tunnelStats []byte) error {
+//
+// The stat is a JSON byte array containing fields as
+// required by the Psiphon server API. It's assumed that the
+// JSON value contains enough unique information for the value to
+// function as a key in the key/value datastore. This assumption
+// is currently satisfied by the fields sessionId + tunnelNumber
+// for tunnel stats, and URL + ETag for remote server list stats.
+func StorePersistentStat(statType string, stat []byte) error {
 	checkInitDataStore()
 
+	if !common.Contains(persistentStatTypes, statType) {
+		return common.ContextError(fmt.Errorf("invalid persistent stat type: %s", statType))
+	}
+
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		err := bucket.Put(tunnelStats, tunnelStatsStateUnreported)
+		bucket := tx.Bucket([]byte(statType))
+		err := bucket.Put(stat, persistentStatStateUnreported)
 		return err
 	})
 
 	if err != nil {
 		return common.ContextError(err)
 	}
+
 	return nil
 }
 
-// CountUnreportedTunnelStats returns the number of tunnel
-// stats records in StateUnreported.
-func CountUnreportedTunnelStats() int {
+// CountUnreportedPersistentStats returns the number of persistent
+// stat records in StateUnreported.
+func CountUnreportedPersistentStats() int {
 	checkInitDataStore()
 
 	unreported := 0
 
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		cursor := bucket.Cursor()
-		for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
-			if 0 == bytes.Compare(value, tunnelStatsStateUnreported) {
-				unreported++
-				break
+
+		for _, statType := range persistentStatTypes {
+
+			bucket := tx.Bucket([]byte(statType))
+			cursor := bucket.Cursor()
+			for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
+				if 0 == bytes.Compare(value, persistentStatStateUnreported) {
+					unreported++
+					break
+				}
 			}
 		}
 		return nil
 	})
 
 	if err != nil {
-		NoticeAlert("CountUnreportedTunnelStats failed: %s", err)
+		NoticeAlert("CountUnreportedPersistentStats failed: %s", err)
 		return 0
 	}
 
 	return unreported
 }
 
-// TakeOutUnreportedTunnelStats returns up to maxCount tunnel
-// stats records that are in StateUnreported. The records are set
-// to StateReporting. If the records are successfully reported,
-// clear them with ClearReportedTunnelStats. If the records are
-// not successfully reported, restore them with
-// PutBackUnreportedTunnelStats.
-func TakeOutUnreportedTunnelStats(maxCount int) ([][]byte, error) {
+// TakeOutUnreportedPersistentStats returns up to maxCount persistent
+// stats records that are in StateUnreported. The records are set to
+// StateReporting. If the records are successfully reported, clear them
+// with ClearReportedPersistentStats. If the records are not successfully
+// reported, restore them with PutBackUnreportedPersistentStats.
+func TakeOutUnreportedPersistentStats(maxCount int) (map[string][][]byte, error) {
 	checkInitDataStore()
 
-	tunnelStats := make([][]byte, 0)
+	stats := make(map[string][][]byte)
 
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		cursor := bucket.Cursor()
-		for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
 
-			// Perform a test JSON unmarshaling. In case of data corruption or a bug,
-			// skip the record.
-			var jsonData interface{}
-			err := json.Unmarshal(key, &jsonData)
-			if err != nil {
-				NoticeAlert(
-					"Invalid key in TakeOutUnreportedTunnelStats: %s: %s",
-					string(key), err)
-				continue
-			}
+		count := 0
 
-			if 0 == bytes.Compare(value, tunnelStatsStateUnreported) {
-				// Must make a copy as slice is only valid within transaction.
-				data := make([]byte, len(key))
-				copy(data, key)
-				tunnelStats = append(tunnelStats, data)
-				if len(tunnelStats) >= maxCount {
+		for _, statType := range persistentStatTypes {
+
+			stats[statType] = make([][]byte, 0)
+
+			bucket := tx.Bucket([]byte(tunnelStatsBucket))
+			cursor := bucket.Cursor()
+			for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
+
+				if count >= maxCount {
 					break
 				}
+
+				// Perform a test JSON unmarshaling. In case of data corruption or a bug,
+				// skip the record.
+				var jsonData interface{}
+				err := json.Unmarshal(key, &jsonData)
+				if err != nil {
+					NoticeAlert(
+						"Invalid key in TakeOutUnreportedPersistentStats: %s: %s",
+						string(key), err)
+					continue
+				}
+
+				if 0 == bytes.Compare(value, persistentStatStateUnreported) {
+					// Must make a copy as slice is only valid within transaction.
+					data := make([]byte, len(key))
+					copy(data, key)
+					stats[statType] = append(stats[statType], data)
+					count += 1
+				}
 			}
-		}
-		for _, key := range tunnelStats {
-			err := bucket.Put(key, tunnelStatsStateReporting)
-			if err != nil {
-				return err
+
+			for _, key := range stats[statType] {
+				err := bucket.Put(key, persistentStatStateReporting)
+				if err != nil {
+					return err
+				}
 			}
-		}
 
+		}
 		return nil
 	})
 
 	if err != nil {
 		return nil, common.ContextError(err)
 	}
-	return tunnelStats, nil
+
+	return stats, nil
 }
 
-// PutBackUnreportedTunnelStats restores a list of tunnel
-// stats records to StateUnreported.
-func PutBackUnreportedTunnelStats(tunnelStats [][]byte) error {
+// PutBackUnreportedPersistentStats restores a list of persistent
+// stat records to StateUnreported.
+func PutBackUnreportedPersistentStats(stats map[string][][]byte) error {
 	checkInitDataStore()
 
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		for _, key := range tunnelStats {
-			err := bucket.Put(key, tunnelStatsStateUnreported)
-			if err != nil {
-				return err
+
+		for _, statType := range persistentStatTypes {
+
+			bucket := tx.Bucket([]byte(statType))
+			for _, key := range stats[statType] {
+				err := bucket.Put(key, persistentStatStateUnreported)
+				if err != nil {
+					return err
+				}
 			}
 		}
+
 		return nil
 	})
 
 	if err != nil {
 		return common.ContextError(err)
 	}
+
 	return nil
 }
 
-// ClearReportedTunnelStats deletes a list of tunnel
-// stats records that were succesdfully reported.
-func ClearReportedTunnelStats(tunnelStats [][]byte) error {
+// ClearReportedPersistentStats deletes a list of persistent
+// stat records that were successfully reported.
+func ClearReportedPersistentStats(stats map[string][][]byte) error {
 	checkInitDataStore()
 
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		for _, key := range tunnelStats {
-			err := bucket.Delete(key)
-			if err != nil {
-				return err
+
+		for _, statType := range persistentStatTypes {
+
+			bucket := tx.Bucket([]byte(statType))
+			for _, key := range stats[statType] {
+				err := bucket.Delete(key)
+				if err != nil {
+					return err
+				}
 			}
 		}
+
 		return nil
 	})
 
 	if err != nil {
 		return common.ContextError(err)
 	}
+
 	return nil
 }
 
-// resetAllTunnelStatsToUnreported sets all tunnel
-// stats records to StateUnreported. This reset is called
-// when the datastore is initialized at start up, as we do
-// not know if tunnel records in StateReporting were reported
-// or not.
-func resetAllTunnelStatsToUnreported() error {
+// resetAllPersistentStatsToUnreported sets all persistent stat
+// records to StateUnreported. This reset is called when the
+// datastore is initialized at start up, as we do not know if
+// persistent records in StateReporting were reported or not.
+func resetAllPersistentStatsToUnreported() error {
 	checkInitDataStore()
 
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		resetKeys := make([][]byte, 0)
-		cursor := bucket.Cursor()
-		for key, _ := cursor.First(); key != nil; key, _ = cursor.Next() {
-			resetKeys = append(resetKeys, key)
-		}
-		// TODO: data mutation is done outside cursor. Is this
-		// strictly necessary in this case?
-		// https://godoc.org/github.com/boltdb/bolt#Cursor
-		for _, key := range resetKeys {
-			err := bucket.Put(key, tunnelStatsStateUnreported)
-			if err != nil {
-				return err
+
+		for _, statType := range persistentStatTypes {
+
+			bucket := tx.Bucket([]byte(statType))
+			resetKeys := make([][]byte, 0)
+			cursor := bucket.Cursor()
+			for key, _ := cursor.First(); key != nil; key, _ = cursor.Next() {
+				resetKeys = append(resetKeys, key)
+			}
+			// TODO: data mutation is done outside cursor. Is this
+			// strictly necessary in this case? As is, this means
+			// all stats need to be loaded into memory at once.
+			// https://godoc.org/github.com/boltdb/bolt#Cursor
+			for _, key := range resetKeys {
+				err := bucket.Put(key, persistentStatStateUnreported)
+				if err != nil {
+					return err
+				}
 			}
 		}
+
 		return nil
 	})
 
 	if err != nil {
 		return common.ContextError(err)
 	}
+
 	return nil
 }
 
@@ -1014,6 +1068,7 @@ func DeleteSLOKs() error {
 	if err != nil {
 		return common.ContextError(err)
 	}
+
 	return nil
 }
 

+ 1 - 1
psiphon/net.go

@@ -373,7 +373,7 @@ func MakeDownloadHttpClient(
 // downloadFilename.part and downloadFilename.part.etag.
 // Any existing downloadFilename file will be overwritten.
 //
-// In the case where the remote object has change while a partial download
+// In the case where the remote object has changed while a partial download
 // is to be resumed, the partial state is reset and resumeDownload fails.
 // The caller must restart the download.
 //

+ 6 - 6
psiphon/notice.go

@@ -364,15 +364,15 @@ func NoticeExiting() {
 	outputNotice("Exiting", 0)
 }
 
-// NoticeRemoteServerListDownloadedBytes reports remote server list download progress.
-func NoticeRemoteServerListDownloadedBytes(bytes int64) {
-	outputNotice("RemoteServerListDownloadedBytes", noticeIsDiagnostic, "bytes", bytes)
+// NoticeRemoteServerListResourceDownloadedBytes reports remote server list download progress.
+func NoticeRemoteServerListResourceDownloadedBytes(url string, bytes int64) {
+	outputNotice("RemoteServerListResourceDownloadedBytes", noticeIsDiagnostic, "url", url, "bytes", bytes)
 }
 
-// NoticeRemoteServerListDownloaded indicates that a remote server list download
+// NoticeRemoteServerListResourceDownloaded indicates that a remote server list download
 // completed successfully.
-func NoticeRemoteServerListDownloaded(filename string) {
-	outputNotice("RemoteServerListDownloaded", noticeIsDiagnostic, "filename", filename)
+func NoticeRemoteServerListResourceDownloaded(url string) {
+	outputNotice("RemoteServerListResourceDownloaded", noticeIsDiagnostic, "url", url)
 }
 
 func NoticeClientVerificationRequestCompleted(ipAddress string) {

+ 249 - 46
psiphon/remoteServerList.go

@@ -21,116 +21,319 @@ package psiphon
 
 import (
 	"compress/zlib"
+	"errors"
+	"fmt"
 	"io/ioutil"
 	"os"
-	"strings"
 	"time"
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
-// FetchRemoteServerList downloads a remote server list JSON record from
-// config.RemoteServerListUrl; validates its digital signature using the
-// public key config.RemoteServerListSignaturePublicKey; and parses the
+type RemoteServerListFetcher func(
+	config *Config, tunnel *Tunnel, untunneledDialConfig *DialConfig) error
+
+// FetchCommonRemoteServerList downloads the common remote server list from
+// config.RemoteServerListUrl. It validates its digital signature using the
+// public key config.RemoteServerListSignaturePublicKey and parses the
 // data field into ServerEntry records.
-func FetchRemoteServerList(
+// config.RemoteServerListDownloadFilename is the location to store the
+// download. As the download is resumed after failure, this filename must
+// be unique and persistent.
+func FetchCommonRemoteServerList(
 	config *Config,
 	tunnel *Tunnel,
 	untunneledDialConfig *DialConfig) error {
 
-	NoticeInfo("fetching remote server list")
-
-	// Select tunneled or untunneled configuration
+	NoticeInfo("fetching common remote server list")
 
-	httpClient, requestUrl, err := MakeDownloadHttpClient(
+	newETag, err := downloadRemoteServerListFile(
 		config,
 		tunnel,
 		untunneledDialConfig,
 		config.RemoteServerListUrl,
-		time.Duration(*config.FetchRemoteServerListTimeoutSeconds)*time.Second)
+		config.RemoteServerListDownloadFilename)
 	if err != nil {
-		return common.ContextError(err)
+		return fmt.Errorf("failed to download common remote server list: %s", common.ContextError(err))
+	}
+
+	// When the resource is unchanged, skip.
+	if newETag == "" {
+		return nil
 	}
 
-	// Proceed with download
+	serverListPayload, err := unpackRemoteServerListFile(config, config.RemoteServerListDownloadFilename)
+	if err != nil {
+		return fmt.Errorf("failed to unpack common remote server list: %s", common.ContextError(err))
+	}
 
-	downloadFilename := config.RemoteServerListDownloadFilename
-	if downloadFilename == "" {
-		splitPath := strings.Split(config.RemoteServerListUrl, "/")
-		downloadFilename = splitPath[len(splitPath)-1]
+	err = storeServerEntries(serverListPayload)
+	if err != nil {
+		return fmt.Errorf("failed to store common remote server list: %s", common.ContextError(err))
 	}
 
-	lastETag, err := GetUrlETag(config.RemoteServerListUrl)
+	// Now that the server entries are successfully imported, store the response
+	// ETag so we won't re-download this same data again.
+	err = SetUrlETag(config.RemoteServerListUrl, newETag)
 	if err != nil {
-		return common.ContextError(err)
+		NoticeAlert("failed to set ETag for common remote server list: %s", common.ContextError(err))
+		// This fetch is still reported as a success, even if we can't store the etag
+	}
+
+	return nil
+}
+
+// FetchObfuscatedServerLists downloads the obfuscated remote server lists
+// from config.ObfuscatedServerListRootURL.
+// It first downloads the OSL directory, and then downloads each seeded OSL
+// advertised in the directory. All downloads are resumable, ETags are used
+// to skip both an unchanged directory or unchanged OSL files, and when an
+// individual download fails, the fetch proceeds if it can.
+// Authenticated package digital signatures are validated using the
+// public key config.RemoteServerListSignaturePublicKey.
+// config.ObfuscatedServerListDownloadDirectory is the location to store the
+// downloaded files. As  downloads are resumed after failure, this directory
+// must be unique and persistent.
+func FetchObfuscatedServerLists(
+	config *Config,
+	tunnel *Tunnel,
+	untunneledDialConfig *DialConfig) error {
+
+	NoticeInfo("fetching obfuscated remote server lists")
+
+	downloadFilename := osl.GetOSLDirectoryFilename(config.ObfuscatedServerListDownloadDirectory)
+	downloadURL := osl.GetOSLDirectoryURL(config.ObfuscatedServerListRootURL)
+
+	// failed is set if any operation fails and should trigger a retry. When the OSL directory
+	// fails to download, any cached directory is used instead; when any single OSL fails
+	// to download, the overall operation proceeds. So this flag records whether to report
+	// failure at the end when downloading has proceeded after a failure.
+	// TODO: should disk-full conditions not trigger retries?
+	var failed bool
+
+	var oslDirectoryPayload string
+
+	newETag, err := downloadRemoteServerListFile(
+		config,
+		tunnel,
+		untunneledDialConfig,
+		downloadURL,
+		downloadFilename)
+	if err != nil {
+		failed = true
+		NoticeAlert("failed to download obfuscated server list directory: %s", common.ContextError(err))
+	} else if newETag != "" {
+		oslDirectoryPayload, err = unpackRemoteServerListFile(config, downloadFilename)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to unpack obfuscated server list directory: %s", common.ContextError(err))
+		}
+		err = SetKeyValue(DATA_STORE_OSL_DIRECTORY_KEY, string(oslDirectoryPayload))
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to set cached obfuscated server list directory: %s", common.ContextError(err))
+		}
+	}
+
+	if failed || newETag == "" {
+		// Proceed with the cached OSL directory.
+		oslDirectoryPayload, err = GetKeyValue(DATA_STORE_OSL_DIRECTORY_KEY)
+		if err != nil {
+			return fmt.Errorf("failed to get cache obfuscated server list directory: %s", common.ContextError(err))
+		}
+	}
+
+	// *TODO* fix double authenticated package unwrapping: make LoadDirectory take JSON string
+
+	oslDirectory, err := osl.LoadDirectory([]byte(oslDirectoryPayload), config.RemoteServerListSignaturePublicKey)
+	if err != nil {
+		return fmt.Errorf("failed to load obfuscated server list directory: %s", common.ContextError(err))
+	}
+
+	// When a new directory is downloaded, validated, and parsed, store the
+	// response ETag so we won't re-download this same data again.
+	if !failed && newETag != "" {
+		err = SetUrlETag(config.RemoteServerListUrl, newETag)
+		if err != nil {
+			NoticeAlert("failed to set ETag for obfuscated server list directory: %s", common.ContextError(err))
+			// This fetch is still reported as a success, even if we can't store the etag
+		}
+	}
+
+	// Note: we proceed to check individual OSLs even if the direcory is unchanged,
+	// as the set of local SLOKs may have changed.
+
+	oslIDs := oslDirectory.GetSeededOSLIDs(
+
+		// Lookup SLOKs in local datastore
+		func(slokID []byte) []byte {
+			key, err := GetSLOK(slokID)
+			if err != nil {
+				NoticeAlert("GetSLOK failed: %s", err)
+			}
+			return key
+		},
+
+		func(err error) {
+			NoticeAlert("GetSeededOSLIDs failed: %s", err)
+		})
+
+	for _, oslID := range oslIDs {
+		downloadFilename := osl.GetOSLFilename(config.ObfuscatedServerListDownloadDirectory, oslID)
+		downloadURL := osl.GetOSLFileURL(config.ObfuscatedServerListRootURL, oslID)
+
+		// *TODO* ETags in OSL directory to enable skipping request entirely
+
+		newETag, err := downloadRemoteServerListFile(
+			config,
+			tunnel,
+			untunneledDialConfig,
+			downloadURL,
+			downloadFilename)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to download obfuscated server list file (%s): %s", oslID, common.ContextError(err))
+			continue
+		}
+
+		// When the resource is unchanged, skip.
+		if newETag == "" {
+			continue
+		}
+
+		// *TODO* DecryptOSL; also, compress before encrypt?
+
+		serverListPayload, err := unpackRemoteServerListFile(config, downloadFilename)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to unpack obfuscated server list file (%s): %s", oslID, common.ContextError(err))
+			continue
+		}
+
+		err = storeServerEntries(serverListPayload)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to store obfuscated server list file (%s): %s", oslID, common.ContextError(err))
+			continue
+		}
+
+		// Now that the server entries are successfully imported, store the response
+		// ETag so we won't re-download this same data again.
+		err = SetUrlETag(config.RemoteServerListUrl, newETag)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to set Etag for obfuscated server list file (%s): %s", oslID, common.ContextError(err))
+			continue
+			// This fetch is still reported as a success, even if we can't store the etag
+		}
+	}
+
+	if failed {
+		return errors.New("failed to fetch obfuscated remote server lists")
+	}
+	return nil
+}
+
+// downloadRemoteServerListFile downloads the source URL to
+// the destination file, performing a resumable download. When
+// the download completes and the file content has changed, the
+// new resource ETag is returned. Otherwise, blank is returned.
+// The caller is responsible for calling SetUrlETag once the file
+// content has been validated.
+func downloadRemoteServerListFile(
+	config *Config,
+	tunnel *Tunnel,
+	untunneledDialConfig *DialConfig,
+	sourceURL, destinationFilename string) (string, error) {
+
+	// MakeDownloadHttpClient will select either a tunneled
+	// or untunneled configuration.
+
+	httpClient, requestURL, err := MakeDownloadHttpClient(
+		config,
+		tunnel,
+		untunneledDialConfig,
+		sourceURL,
+		time.Duration(*config.FetchRemoteServerListTimeoutSeconds)*time.Second)
+	if err != nil {
+		return "", common.ContextError(err)
+	}
+
+	lastETag, err := GetUrlETag(sourceURL)
+	if err != nil {
+		return "", common.ContextError(err)
 	}
 
 	n, responseETag, err := ResumeDownload(
-		httpClient, requestUrl, downloadFilename, lastETag)
+		httpClient, requestURL, destinationFilename, lastETag)
 
-	NoticeRemoteServerListDownloadedBytes(n)
+	NoticeRemoteServerListResourceDownloadedBytes(sourceURL, n)
 
 	if err != nil {
-		return common.ContextError(err)
+		return "", common.ContextError(err)
 	}
 
 	if responseETag == lastETag {
-		// The remote server list is unchanged and no data was downloaded
-		return nil
+		return "", nil
 	}
 
-	NoticeRemoteServerListDownloaded(downloadFilename)
+	NoticeRemoteServerListResourceDownloaded(sourceURL)
+
+	RecordRemoteServerListStat(sourceURL, responseETag)
+
+	return responseETag, nil
+}
 
-	// The downloaded content is a zlib compressed authenticated
-	// data package containing a list of encoded server entries.
+// unpackRemoteServerListFile reads a file that contains a
+// zlib compressed authenticated data package, validates
+// the package, and returns the payload.
+func unpackRemoteServerListFile(
+	config *Config, filename string) (string, error) {
 
-	downloadContent, err := os.Open(downloadFilename)
+	fileReader, err := os.Open(filename)
 	if err != nil {
-		return common.ContextError(err)
+		return "", common.ContextError(err)
 	}
-	defer downloadContent.Close()
+	defer fileReader.Close()
 
-	zlibReader, err := zlib.NewReader(downloadContent)
+	zlibReader, err := zlib.NewReader(fileReader)
 	if err != nil {
-		return common.ContextError(err)
+		return "", common.ContextError(err)
 	}
 
 	dataPackage, err := ioutil.ReadAll(zlibReader)
 	zlibReader.Close()
 	if err != nil {
-		return common.ContextError(err)
+		return "", common.ContextError(err)
 	}
 
-	remoteServerList, err := common.ReadAuthenticatedDataPackage(
+	payload, err := common.ReadAuthenticatedDataPackage(
 		dataPackage, config.RemoteServerListSignaturePublicKey)
 	if err != nil {
-		return common.ContextError(err)
+		return "", common.ContextError(err)
 	}
 
+	return payload, nil
+}
+
+func storeServerEntries(serverList string) error {
+
 	serverEntries, err := DecodeAndValidateServerEntryList(
-		remoteServerList,
+		serverList,
 		common.GetCurrentTimestamp(),
 		protocol.SERVER_ENTRY_SOURCE_REMOTE)
 	if err != nil {
 		return common.ContextError(err)
 	}
 
+	// TODO: record stats for newly discovered servers
+
 	err = StoreServerEntries(serverEntries, true)
 	if err != nil {
 		return common.ContextError(err)
 	}
 
-	// Now that the server entries are successfully imported, store the response
-	// ETag so we won't re-download this same data again.
-
-	if responseETag != "" {
-		err := SetUrlETag(config.RemoteServerListUrl, responseETag)
-		if err != nil {
-			NoticeAlert("failed to set remote server list ETag: %s", common.ContextError(err))
-			// This fetch is still reported as a success, even if we can't store the etag
-		}
-	}
-
 	return nil
 }

+ 70 - 28
psiphon/serverApi.go

@@ -252,7 +252,6 @@ func (serverContext *ServerContext) DoConnectedRequest() error {
 
 	params := serverContext.getBaseParams()
 
-	const DATA_STORE_LAST_CONNECTED_KEY = "lastConnected"
 	lastConnected, err := GetKeyValue(DATA_STORE_LAST_CONNECTED_KEY)
 	if err != nil {
 		return common.ContextError(err)
@@ -397,25 +396,25 @@ func (serverContext *ServerContext) getStatusParams(isTunneled bool) requestJSON
 // either "clear" or "put back" status request payload data depending
 // on whether or not the request succeeded.
 type statusRequestPayloadInfo struct {
-	serverId      string
-	transferStats *transferstats.AccumulatedStats
-	tunnelStats   [][]byte
+	serverId        string
+	transferStats   *transferstats.AccumulatedStats
+	persistentStats map[string][][]byte
 }
 
 func makeStatusRequestPayload(
 	serverId string) ([]byte, *statusRequestPayloadInfo, error) {
 
 	transferStats := transferstats.TakeOutStatsForServer(serverId)
-	tunnelStats, err := TakeOutUnreportedTunnelStats(
-		PSIPHON_API_TUNNEL_STATS_MAX_COUNT)
+	persistentStats, err := TakeOutUnreportedPersistentStats(
+		PSIPHON_API_PERSISTENT_STATS_MAX_COUNT)
 	if err != nil {
 		NoticeAlert(
-			"TakeOutUnreportedTunnelStats failed: %s", common.ContextError(err))
-		tunnelStats = nil
+			"TakeOutUnreportedPersistentStats failed: %s", common.ContextError(err))
+		persistentStats = nil
 		// Proceed with transferStats only
 	}
 	payloadInfo := &statusRequestPayloadInfo{
-		serverId, transferStats, tunnelStats}
+		serverId, transferStats, persistentStats}
 
 	payload := make(map[string]interface{})
 
@@ -427,12 +426,19 @@ func makeStatusRequestPayload(
 	payload["page_views"] = make([]string, 0)
 	payload["https_requests"] = make([]string, 0)
 
-	// Tunnel stats records are already in JSON format
-	jsonTunnelStats := make([]json.RawMessage, len(tunnelStats))
-	for i, tunnelStatsRecord := range tunnelStats {
-		jsonTunnelStats[i] = json.RawMessage(tunnelStatsRecord)
+	persistentStatPayloadNames := make(map[string]string)
+	persistentStatPayloadNames[PERSISTENT_STAT_TYPE_TUNNEL] = "tunnel_stats"
+	persistentStatPayloadNames[PERSISTENT_STAT_TYPE_REMOTE_SERVER_LIST] = "remote_server_list"
+
+	for statType, stats := range persistentStats {
+
+		// Persistent stats records are already in JSON format
+		jsonStats := make([]json.RawMessage, len(stats))
+		for i, stat := range stats {
+			jsonStats[i] = json.RawMessage(stat)
+		}
+		payload[persistentStatPayloadNames[statType]] = jsonStats
 	}
-	payload["tunnel_stats"] = jsonTunnelStats
 
 	jsonPayload, err := json.Marshal(payload)
 	if err != nil {
@@ -449,21 +455,21 @@ func makeStatusRequestPayload(
 func putBackStatusRequestPayload(payloadInfo *statusRequestPayloadInfo) {
 	transferstats.PutBackStatsForServer(
 		payloadInfo.serverId, payloadInfo.transferStats)
-	err := PutBackUnreportedTunnelStats(payloadInfo.tunnelStats)
+	err := PutBackUnreportedPersistentStats(payloadInfo.persistentStats)
 	if err != nil {
-		// These tunnel stats records won't be resent under after a
+		// These persistent stats records won't be resent until after a
 		// datastore re-initialization.
 		NoticeAlert(
-			"PutBackUnreportedTunnelStats failed: %s", common.ContextError(err))
+			"PutBackUnreportedPersistentStats failed: %s", common.ContextError(err))
 	}
 }
 
 func confirmStatusRequestPayload(payloadInfo *statusRequestPayloadInfo) {
-	err := ClearReportedTunnelStats(payloadInfo.tunnelStats)
+	err := ClearReportedPersistentStats(payloadInfo.persistentStats)
 	if err != nil {
-		// These tunnel stats records may be resent.
+		// These persistent stats records may be resent.
 		NoticeAlert(
-			"ClearReportedTunnelStats failed: %s", common.ContextError(err))
+			"ClearReportedPersistentStats failed: %s", common.ContextError(err))
 	}
 }
 
@@ -552,7 +558,7 @@ func (serverContext *ServerContext) doUntunneledStatusRequest(
 	return nil
 }
 
-// RecordTunnelStats records a tunnel duration and bytes
+// RecordTunnelStat records a tunnel duration and bytes
 // sent and received for subsequent reporting and quality
 // analysis.
 //
@@ -595,7 +601,7 @@ func (serverContext *ServerContext) doUntunneledStatusRequest(
 // Duplicate reporting may also occur when a server receives and
 // processes a status request but the client fails to receive
 // the response.
-func RecordTunnelStats(
+func RecordTunnelStat(
 	sessionId string,
 	tunnelNumber int64,
 	tunnelServerIpAddress string,
@@ -605,7 +611,7 @@ func RecordTunnelStats(
 	totalBytesSent int64,
 	totalBytesReceived int64) error {
 
-	tunnelStats := struct {
+	tunnelStat := struct {
 		SessionId                string `json:"session_id"`
 		TunnelNumber             int64  `json:"tunnel_number"`
 		TunnelServerIpAddress    string `json:"tunnel_server_ip_address"`
@@ -625,12 +631,38 @@ func RecordTunnelStats(
 		totalBytesReceived,
 	}
 
-	tunnelStatsJson, err := json.Marshal(tunnelStats)
+	tunnelStatJson, err := json.Marshal(tunnelStat)
 	if err != nil {
 		return common.ContextError(err)
 	}
 
-	return StoreTunnelStats(tunnelStatsJson)
+	return StorePersistentStat(
+		PERSISTENT_STAT_TYPE_TUNNEL, tunnelStatJson)
+}
+
+// RecordRemoteServerListStat records a completed common or OSL
+// remote server list resource download. These stats use the same
+// persist-until-reported mechanism described in RecordTunnelStats.
+func RecordRemoteServerListStat(
+	url, etag string) error {
+
+	remoteServerListStat := struct {
+		ClientDownloadTimestamp string `json:"client_download_timestamp"`
+		URL                     string `json:"url"`
+		ETag                    string `json:"etag"`
+	}{
+		common.TruncateTimestampToHour(common.GetCurrentTimestamp()),
+		url,
+		etag,
+	}
+
+	remoteServerListStatJson, err := json.Marshal(remoteServerListStat)
+	if err != nil {
+		return common.ContextError(err)
+	}
+
+	return StorePersistentStat(
+		PERSISTENT_STAT_TYPE_REMOTE_SERVER_LIST, remoteServerListStatJson)
 }
 
 // DoClientVerificationRequest performs the "client_verification" API
@@ -906,17 +938,19 @@ func makePsiphonHttpsClient(tunnel *Tunnel) (httpsClient *http.Client, err error
 	}, nil
 }
 
-func HandleServerRequest(tunnel *Tunnel, name string, payload []byte) error {
+func HandleServerRequest(
+	tunnelOwner TunnelOwner, tunnel *Tunnel, name string, payload []byte) error {
 
 	switch name {
 	case protocol.PSIPHON_API_OSL_REQUEST_NAME:
-		return HandleOSLRequest(tunnel, payload)
+		return HandleOSLRequest(tunnelOwner, tunnel, payload)
 	}
 
 	return common.ContextError(fmt.Errorf("invalid request name: %s", name))
 }
 
-func HandleOSLRequest(tunnel *Tunnel, payload []byte) error {
+func HandleOSLRequest(
+	tunnelOwner TunnelOwner, tunnel *Tunnel, payload []byte) error {
 
 	var oslRequest protocol.OSLRequest
 	err := json.Unmarshal(payload, &oslRequest)
@@ -928,11 +962,15 @@ func HandleOSLRequest(tunnel *Tunnel, payload []byte) error {
 		DeleteSLOKs()
 	}
 
+	seededNewSLOK := false
+
 	for _, slok := range oslRequest.SeedPayload.SLOKs {
 		duplicate, err := SetSLOK(slok.ID, slok.Key)
 		if err != nil {
 			// TODO: return error to trigger retry?
 			NoticeAlert("SetSLOK failed: %s", common.ContextError(err))
+		} else if !duplicate {
+			seededNewSLOK = true
 		}
 
 		if tunnel.config.ReportSLOKs {
@@ -940,5 +978,9 @@ func HandleOSLRequest(tunnel *Tunnel, payload []byte) error {
 		}
 	}
 
+	if seededNewSLOK {
+		tunnelOwner.SignalSeededNewSLOK()
+	}
+
 	return nil
 }

+ 7 - 6
psiphon/tunnel.go

@@ -60,6 +60,7 @@ type Tunneler interface {
 // owner when it has failed. The owner may, as in the case of the Controller,
 // remove the tunnel from its list of active tunnels.
 type TunnelOwner interface {
+	SignalSeededNewSLOK()
 	SignalTunnelFailure(tunnel *Tunnel)
 }
 
@@ -842,16 +843,16 @@ func (tunnel *Tunnel) operateTunnel(tunnelOwner TunnelOwner) {
 	defer statsTimer.Stop()
 
 	// Schedule an immediate status request to deliver any unreported
-	// tunnel stats.
+	// persistent stats.
 	// Note: this may not be effective when there's an outstanding
 	// asynchronous untunneled final status request is holding the
-	// tunnel stats records. It may also conflict with other
+	// persistent stats records. It may also conflict with other
 	// tunnel candidates which attempt to send an immediate request
 	// before being discarded. For now, we mitigate this with a short,
 	// random delay.
-	unreported := CountUnreportedTunnelStats()
+	unreported := CountUnreportedPersistentStats()
 	if unreported > 0 {
-		NoticeInfo("Unreported tunnel stats: %d", unreported)
+		NoticeInfo("Unreported persistent stats: %d", unreported)
 		statsTimer.Reset(makeRandomPeriod(
 			PSIPHON_API_STATUS_REQUEST_SHORT_PERIOD_MIN,
 			PSIPHON_API_STATUS_REQUEST_SHORT_PERIOD_MAX))
@@ -1001,7 +1002,7 @@ func (tunnel *Tunnel) operateTunnel(tunnelOwner TunnelOwner) {
 
 		case serverRequest := <-tunnel.sshServerRequests:
 			if serverRequest != nil {
-				err := HandleServerRequest(tunnel, serverRequest.Type, serverRequest.Payload)
+				err := HandleServerRequest(tunnelOwner, tunnel, serverRequest.Type, serverRequest.Payload)
 				if err == nil {
 					serverRequest.Reply(true, nil)
 				} else {
@@ -1064,7 +1065,7 @@ func (tunnel *Tunnel) operateTunnel(tunnelOwner TunnelOwner) {
 
 		tunnelDuration := tunnel.conn.GetLastActivityMonotime().Sub(tunnel.establishedTime)
 
-		err := RecordTunnelStats(
+		err := RecordTunnelStat(
 			tunnel.serverContext.sessionId,
 			tunnel.serverContext.tunnelNumber,
 			tunnel.serverEntry.IpAddress,