Parcourir la source

Mitigate slow datastore operations

- Remove potentially slow, establishment-blocking, and redundant server entry
scans used for CandidateServers and AvailableEgressRegions and replace with
asynchronous and multipurpose serverEntriesReporter.

- Allow establishment to begin and possibly end before these notices emit.

- Emit one CandidateServers notice per establishment, instead of one per round.

- In MobileLibrary and ClientLibrary, asynchronously import embedded server
entries unless no entries (for MobileLibrary, this reinstates
https://github.com/Psiphon-Labs/psiphon-tunnel-core/commit/6bd3e268ffd61e5d2b88ebbb17bda67c36efb233);
and failure to import is no longer fatal.

- Fix: in ConsoleClient, asynchronously import embedded server entries unless
no entries.

- In all of ConsoleClient/MobileLibrary/ClientLibrary, replace server entry
scan with fast HasServerEntries; and post a notice when import is synchronous.

- In ClientLibrary, set up notices before calling OpenDataStore, which emits
notices.

- Change mutex scheme in datastore to allow nested Open/Close to not block on
open transactions.

- Update error notices in controller.go: consistent severity, tracing, etc.

- Fix: AvailEgressRegionsNotice now posts [] instead of "nil" for empty lists.
Rod Hynes il y a 5 ans
Parent
commit
2c8a63fa13

+ 49 - 33
ClientLibrary/clientlib/clientlib.go

@@ -28,9 +28,7 @@ import (
 	"sync"
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon"
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors"
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 // Parameters provide an easier way to modify the tunnel config at runtime.
@@ -72,8 +70,9 @@ type Parameters struct {
 // PsiphonTunnel is the tunnel object. It can be used for stopping the tunnel and
 // retrieving proxy ports.
 type PsiphonTunnel struct {
-	controllerWaitGroup sync.WaitGroup
-	stopController      context.CancelFunc
+	embeddedServerListWaitGroup sync.WaitGroup
+	controllerWaitGroup         sync.WaitGroup
+	stopController              context.CancelFunc
 
 	// The port on which the HTTP proxy is running
 	HTTPProxyPort int
@@ -118,7 +117,7 @@ var ErrTimeout = std_errors.New("clientlib: tunnel establishment timeout")
 func StartTunnel(ctx context.Context,
 	configJSON []byte, embeddedServerEntryList string,
 	params Parameters, paramsDelta ParametersDelta,
-	noticeReceiver func(NoticeEvent)) (tunnel *PsiphonTunnel, retErr error) {
+	noticeReceiver func(NoticeEvent)) (retTunnel *PsiphonTunnel, retErr error) {
 
 	config, err := psiphon.LoadConfig(configJSON)
 	if err != nil {
@@ -170,31 +169,6 @@ func StartTunnel(ctx context.Context,
 		}
 	}
 
-	err = psiphon.OpenDataStore(config)
-	if err != nil {
-		return nil, errors.TraceMsg(err, "failed to open data store")
-	}
-	// Make sure we close the datastore in case of error
-	defer func() {
-		if retErr != nil {
-			psiphon.CloseDataStore()
-		}
-	}()
-
-	// Store embedded server entries
-	serverEntries, err := protocol.DecodeServerEntryList(
-		embeddedServerEntryList,
-		common.TruncateTimestampToHour(common.GetCurrentTimestamp()),
-		protocol.SERVER_ENTRY_SOURCE_EMBEDDED)
-	if err != nil {
-		return nil, errors.TraceMsg(err, "failed to decode server entry list")
-	}
-
-	err = psiphon.StoreServerEntries(config, serverEntries, false)
-	if err != nil {
-		return nil, errors.TraceMsg(err, "failed to store server entries")
-	}
-
 	// Will receive a value when the tunnel has successfully connected.
 	connected := make(chan struct{})
 	// Will receive a value if the tunnel times out trying to connect.
@@ -203,7 +177,7 @@ func StartTunnel(ctx context.Context,
 	errored := make(chan error)
 
 	// Create the tunnel object
-	tunnel = new(PsiphonTunnel)
+	tunnel := new(PsiphonTunnel)
 
 	// Set up notice handling
 	psiphon.SetNoticeWriter(psiphon.NewNoticeReceiver(
@@ -249,6 +223,49 @@ func StartTunnel(ctx context.Context,
 			}
 		}))
 
+	err = psiphon.OpenDataStore(config)
+	if err != nil {
+		return nil, errors.TraceMsg(err, "failed to open data store")
+	}
+	// Make sure we close the datastore in case of error
+	defer func() {
+		if retErr != nil {
+			tunnel.controllerWaitGroup.Wait()
+			tunnel.embeddedServerListWaitGroup.Wait()
+			psiphon.CloseDataStore()
+		}
+	}()
+
+	// If specified, the embedded server list is loaded and stored. When there
+	// are no server candidates at all, we wait for this import to complete
+	// before starting the Psiphon controller. Otherwise, we import while
+	// concurrently starting the controller to minimize delay before attempting
+	// to connect to existing candidate servers.
+	//
+	// If the import fails, an error notice is emitted, but the controller is
+	// still started: either existing candidate servers may suffice, or the
+	// remote server list fetch may obtain candidate servers.
+	//
+	// TODO: abort import if controller run ctx is cancelled. Currently, this
+	// import will block shutdown.
+	tunnel.embeddedServerListWaitGroup.Add(1)
+	go func() {
+		defer tunnel.embeddedServerListWaitGroup.Done()
+
+		err = psiphon.ImportEmbeddedServerEntries(
+			config,
+			"",
+			embeddedServerEntryList)
+		if err != nil {
+			psiphon.NoticeError("error importing embedded server entry list: %s", err)
+			return
+		}
+	}()
+	if !psiphon.HasServerEntries() {
+		psiphon.NoticeInfo("awaiting embedded server entry list import")
+		tunnel.embeddedServerListWaitGroup.Wait()
+	}
+
 	// Create the Psiphon controller
 	controller, err := psiphon.NewController(config)
 	if err != nil {
@@ -292,8 +309,7 @@ func (tunnel *PsiphonTunnel) Stop() {
 	if tunnel.stopController != nil {
 		tunnel.stopController()
 	}
-
 	tunnel.controllerWaitGroup.Wait()
-
+	tunnel.embeddedServerListWaitGroup.Wait()
 	psiphon.CloseDataStore()
 }

+ 21 - 27
ConsoleClient/main.go

@@ -37,7 +37,6 @@ import (
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/buildinfo"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors"
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tun"
 )
 
@@ -363,6 +362,7 @@ type Worker interface {
 // TunnelWorker is the Worker protocol implementation used for tunnel mode.
 type TunnelWorker struct {
 	embeddedServerEntryListFilename string
+	embeddedServerListWaitGroup     *sync.WaitGroup
 	controller                      *psiphon.Controller
 }
 
@@ -377,47 +377,38 @@ func (w *TunnelWorker) Init(config *psiphon.Config) error {
 		os.Exit(1)
 	}
 
-	// Handle optional embedded server list file parameter
 	// If specified, the embedded server list is loaded and stored. When there
 	// are no server candidates at all, we wait for this import to complete
 	// before starting the Psiphon controller. Otherwise, we import while
 	// concurrently starting the controller to minimize delay before attempting
 	// to connect to existing candidate servers.
+	//
 	// If the import fails, an error notice is emitted, but the controller is
 	// still started: either existing candidate servers may suffice, or the
 	// remote server list fetch may obtain candidate servers.
+	//
+	// TODO: abort import if controller run ctx is cancelled. Currently, this
+	// import will block shutdown.
 	if w.embeddedServerEntryListFilename != "" {
-		embeddedServerListWaitGroup := new(sync.WaitGroup)
-		embeddedServerListWaitGroup.Add(1)
+		w.embeddedServerListWaitGroup = new(sync.WaitGroup)
+		w.embeddedServerListWaitGroup.Add(1)
 		go func() {
-			defer embeddedServerListWaitGroup.Done()
-			serverEntryList, err := ioutil.ReadFile(w.embeddedServerEntryListFilename)
-			if err != nil {
-				psiphon.NoticeError("error loading embedded server entry list file: %s", err)
-				return
-			}
-			// TODO: stream embedded server list data? also, the cast makes an unnecessary copy of a large buffer?
-			serverEntries, err := protocol.DecodeServerEntryList(
-				string(serverEntryList),
-				common.GetCurrentTimestamp(),
-				protocol.SERVER_ENTRY_SOURCE_EMBEDDED)
-			if err != nil {
-				psiphon.NoticeError("error decoding embedded server entry list file: %s", err)
-				return
-			}
-			// Since embedded server list entries may become stale, they will not
-			// overwrite existing stored entries for the same server.
-			err = psiphon.StoreServerEntries(config, serverEntries, false)
+			defer w.embeddedServerListWaitGroup.Done()
+
+			err = psiphon.ImportEmbeddedServerEntries(
+				config,
+				w.embeddedServerEntryListFilename,
+				"")
+
 			if err != nil {
-				psiphon.NoticeError("error storing embedded server entry list data: %s", err)
+				psiphon.NoticeError("error importing embedded server entry list: %s", err)
 				return
 			}
 		}()
 
-		if psiphon.CountServerEntries() == 0 {
-			embeddedServerListWaitGroup.Wait()
-		} else {
-			defer embeddedServerListWaitGroup.Wait()
+		if !psiphon.HasServerEntries() {
+			psiphon.NoticeInfo("awaiting embedded server entry list import")
+			w.embeddedServerListWaitGroup.Wait()
 		}
 	}
 
@@ -434,6 +425,9 @@ func (w *TunnelWorker) Init(config *psiphon.Config) error {
 // Run implements the Worker interface.
 func (w *TunnelWorker) Run(ctx context.Context) error {
 	defer psiphon.CloseDataStore()
+	if w.embeddedServerListWaitGroup != nil {
+		defer w.embeddedServerListWaitGroup.Wait()
+	}
 
 	w.controller.Run(ctx)
 	return nil

+ 32 - 51
MobileLibrary/psi/psi.go

@@ -36,7 +36,6 @@ import (
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/buildinfo"
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tun"
 )
 
@@ -109,6 +108,7 @@ func UpgradeDownloadFilePath(rootDataDirectoryPath string) string {
 }
 
 var controllerMutex sync.Mutex
+var embeddedServerListWaitGroup *sync.WaitGroup
 var controller *psiphon.Controller
 var controllerCtx context.Context
 var stopController context.CancelFunc
@@ -184,17 +184,40 @@ func Start(
 		return fmt.Errorf("error initializing datastore: %s", err)
 	}
 
-	// Stores list of server entries.
-	err = storeServerEntries(
-		config,
-		embeddedServerEntryListFilename,
-		embeddedServerEntryList)
-	if err != nil {
-		return err
+	// If specified, the embedded server list is loaded and stored. When there
+	// are no server candidates at all, we wait for this import to complete
+	// before starting the Psiphon controller. Otherwise, we import while
+	// concurrently starting the controller to minimize delay before attempting
+	// to connect to existing candidate servers.
+	//
+	// If the import fails, an error notice is emitted, but the controller is
+	// still started: either existing candidate servers may suffice, or the
+	// remote server list fetch may obtain candidate servers.
+	//
+	// TODO: abort import if controller run ctx is cancelled. Currently, this
+	// import will block shutdown.
+	embeddedServerListWaitGroup = new(sync.WaitGroup)
+	embeddedServerListWaitGroup.Add(1)
+	go func() {
+		defer embeddedServerListWaitGroup.Done()
+
+		err = psiphon.ImportEmbeddedServerEntries(
+			config,
+			embeddedServerEntryListFilename,
+			embeddedServerEntryList)
+		if err != nil {
+			psiphon.NoticeError("error importing embedded server entry list: %s", err)
+			return
+		}
+	}()
+	if !psiphon.HasServerEntries() {
+		psiphon.NoticeInfo("awaiting embedded server entry list import")
+		embeddedServerListWaitGroup.Wait()
 	}
 
 	controller, err = psiphon.NewController(config)
 	if err != nil {
+		embeddedServerListWaitGroup.Wait()
 		psiphon.CloseDataStore()
 		return fmt.Errorf("error initializing controller: %s", err)
 	}
@@ -219,6 +242,7 @@ func Stop() {
 	if controller != nil {
 		stopController()
 		controllerWaitGroup.Wait()
+		embeddedServerListWaitGroup.Wait()
 		psiphon.CloseDataStore()
 		controller = nil
 		controllerCtx = nil
@@ -448,49 +472,6 @@ func WriteRuntimeProfiles(outputDirectory string, cpuSampleDurationSeconds, bloc
 		blockSampleDurationSeconds)
 }
 
-// Helper function to store a list of server entries.
-// if embeddedServerEntryListFilename is not empty, embeddedServerEntryList will be ignored.
-func storeServerEntries(
-	config *psiphon.Config,
-	embeddedServerEntryListFilename, embeddedServerEntryList string) error {
-
-	if embeddedServerEntryListFilename != "" {
-
-		file, err := os.Open(embeddedServerEntryListFilename)
-		if err != nil {
-			return fmt.Errorf("error reading embedded server list file: %s", err)
-		}
-		defer file.Close()
-
-		err = psiphon.StreamingStoreServerEntries(
-			config,
-			protocol.NewStreamingServerEntryDecoder(
-				file,
-				common.TruncateTimestampToHour(common.GetCurrentTimestamp()),
-				protocol.SERVER_ENTRY_SOURCE_EMBEDDED),
-			false)
-		if err != nil {
-			return fmt.Errorf("error storing embedded server list: %s", err)
-		}
-
-	} else {
-
-		serverEntries, err := protocol.DecodeServerEntryList(
-			embeddedServerEntryList,
-			common.TruncateTimestampToHour(common.GetCurrentTimestamp()),
-			protocol.SERVER_ENTRY_SOURCE_EMBEDDED)
-		if err != nil {
-			return fmt.Errorf("error decoding embedded server list: %s", err)
-		}
-		err = psiphon.StoreServerEntries(config, serverEntries, false)
-		if err != nil {
-			return fmt.Errorf("error storing embedded server list: %s", err)
-		}
-	}
-
-	return nil
-}
-
 type mutexPsiphonProvider struct {
 	sync.Mutex
 	p PsiphonProvider

+ 310 - 116
psiphon/controller.go

@@ -74,6 +74,7 @@ type Controller struct {
 	signalFetchCommonRemoteServerList       chan struct{}
 	signalFetchObfuscatedServerLists        chan struct{}
 	signalDownloadUpgrade                   chan string
+	signalReportServerEntries               chan *serverEntriesReportRequest
 	signalReportConnected                   chan struct{}
 	signalRestartEstablishing               chan struct{}
 	serverAffinityDoneBroadcast             chan struct{}
@@ -117,6 +118,7 @@ func NewController(config *Config) (controller *Controller, err error) {
 		establishedOnce:      false,
 		isEstablishing:       false,
 		untunneledDialConfig: untunneledDialConfig,
+
 		// TODO: Add a buffer of 1 so we don't miss a signal while receiver is
 		// starting? Trade-off is potential back-to-back fetch remotes. As-is,
 		// establish will eventually signal another fetch remote.
@@ -125,6 +127,12 @@ func NewController(config *Config) (controller *Controller, err error) {
 		signalDownloadUpgrade:             make(chan string),
 		signalReportConnected:             make(chan struct{}),
 
+		// Using a buffer of 1 to ensure there's no race between the first signal
+		// sent and a channel receiver initializing; a side effect is that this
+		// allows 1 additional scan to enqueue while a scan is in progress, possibly
+		// resulting in one unnecessary scan.
+		signalReportServerEntries: make(chan *serverEntriesReportRequest, 1),
+
 		// signalRestartEstablishing has a buffer of 1 to ensure sending the
 		// signal doesn't block and receiving won't miss a signal.
 		signalRestartEstablishing: make(chan struct{}, 1),
@@ -189,7 +197,7 @@ func (controller *Controller) Run(ctx context.Context) {
 			err = fmt.Errorf("no IPv4 address for interface %s", controller.config.ListenInterface)
 		}
 		if err != nil {
-			NoticeError("error getting listener IP: %s", errors.Trace(err))
+			NoticeError("error getting listener IP: %v", errors.Trace(err))
 			return
 		}
 		listenIP = IPv4Address.String()
@@ -198,7 +206,7 @@ func (controller *Controller) Run(ctx context.Context) {
 	if !controller.config.DisableLocalSocksProxy {
 		socksProxy, err := NewSocksProxy(controller.config, controller, listenIP)
 		if err != nil {
-			NoticeWarning("error initializing local SOCKS proxy: %s", err)
+			NoticeError("error initializing local SOCKS proxy: %v", errors.Trace(err))
 			return
 		}
 		defer socksProxy.Close()
@@ -207,7 +215,7 @@ func (controller *Controller) Run(ctx context.Context) {
 	if !controller.config.DisableLocalHTTPProxy {
 		httpProxy, err := NewHttpProxy(controller.config, controller, listenIP)
 		if err != nil {
-			NoticeWarning("error initializing local HTTP proxy: %s", err)
+			NoticeError("error initializing local HTTP proxy: %v", errors.Trace(err))
 			return
 		}
 		defer httpProxy.Close()
@@ -237,6 +245,9 @@ func (controller *Controller) Run(ctx context.Context) {
 		go controller.upgradeDownloader()
 	}
 
+	controller.runWaitGroup.Add(1)
+	go controller.serverEntriesReporter()
+
 	controller.runWaitGroup.Add(1)
 	go controller.connectedReporter()
 
@@ -394,7 +405,8 @@ fetcherLoop:
 				break retryLoop
 			}
 
-			NoticeWarning("failed to fetch %s remote server list: %s", name, err)
+			NoticeWarning("failed to fetch %s remote server list: %v",
+				name, errors.Trace(err))
 
 			retryPeriod := controller.config.GetParameters().Get().Duration(
 				parameters.FetchRemoteServerListRetryPeriod)
@@ -482,7 +494,7 @@ downloadLoop:
 				break retryLoop
 			}
 
-			NoticeWarning("failed to download upgrade: %s", err)
+			NoticeWarning("failed to download upgrade: %v", errors.Trace(err))
 
 			timeout := controller.config.GetParameters().Get().Duration(
 				parameters.FetchUpgradeRetryPeriod)
@@ -500,6 +512,167 @@ downloadLoop:
 	NoticeInfo("exiting upgrade downloader")
 }
 
+type serverEntriesReportRequest struct {
+	constraints   *protocolSelectionConstraints
+	awaitResponse chan *serverEntriesReportResponse
+}
+
+type serverEntriesReportResponse struct {
+	err                              error
+	candidates                       int
+	initialCandidates                int
+	initialCandidatesAnyEgressRegion int
+	availableEgressRegions           []string
+}
+
+// serverEntriesReporter performs scans over all server entries to report on
+// available tunnel candidates, subject to protocol selection constraints, and
+// available egress regions.
+//
+// Because scans may be slow, depending on the client device and server entry
+// list size, serverEntriesReporter is used to perform asychronous, background
+// operations that would otherwise block establishment. This includes emitting
+// diagnotic notices that are informational (CandidateServers) or which do not
+// need to emit before establishment starts (AvailableEgressRegions).
+//
+// serverEntriesReporter also serves to combine these scans, which would
+// otherwise be logically independent, due to the performance impact of scans.
+//
+// The underlying datastore implementation _may_ block write transactions
+// while there are open read transactions. For example, bolt write
+// transactions which need to  re-map the data file (when the datastore grows)
+// will block on open read transactions. In these scenarios, a slow scan will
+// still block other operations.
+//
+// serverEntriesReporter runs beyond the establishment phase, since it's
+// important for notices such as AvailableEgressRegions to eventually emit
+// even if already established. serverEntriesReporter scans are cancellable,
+// so controller shutdown is not blocked by slow scans.
+//
+// In some special cases, establishment cannot begin without candidate counts
+// up front. In these cases only, the request contains a non-nil
+// awaitResponse, a channel which is used by the requester to block until the
+// scan is complete and the candidate counts are available.
+func (controller *Controller) serverEntriesReporter() {
+	defer controller.runWaitGroup.Done()
+
+loop:
+	for {
+
+		var request *serverEntriesReportRequest
+
+		select {
+		case request = <-controller.signalReportServerEntries:
+		case <-controller.runCtx.Done():
+			break loop
+		}
+
+		egressRegion := controller.config.EgressRegion
+		constraints := request.constraints
+		// When CountServerEntriesWithConstraints is called only
+		// limitTunnelProtocolState is fixed; excludeIntensive is transitory.
+		excludeIntensive := false
+
+		var response serverEntriesReportResponse
+
+		regions := make(map[string]bool)
+
+		callback := func(serverEntry *protocol.ServerEntry) bool {
+
+			isInitialCandidate := constraints.isInitialCandidate(excludeIntensive, serverEntry)
+			isCandidate := constraints.isCandidate(excludeIntensive, serverEntry)
+
+			if isInitialCandidate {
+				if egressRegion == "" || serverEntry.Region == egressRegion {
+					response.initialCandidates += 1
+				}
+				response.initialCandidatesAnyEgressRegion += 1
+			}
+
+			if isCandidate {
+				response.candidates += 1
+			}
+
+			// Available egress regions is subject to an initial limit constraint, if
+			// present: see AvailableEgressRegions comment in launchEstablishing.
+			if (constraints.hasInitialProtocols() && isInitialCandidate) || isCandidate {
+				// Ignore server entries with no region field.
+				if serverEntry.Region != "" {
+					regions[serverEntry.Region] = true
+				}
+			}
+
+			select {
+			case <-controller.runCtx.Done():
+				// Don't block controller shutdown: cancel the scan.
+				return false
+			default:
+				return true
+			}
+		}
+
+		response.err = ScanServerEntries(callback)
+
+		response.availableEgressRegions = make([]string, 0, len(regions))
+		for region := range regions {
+			response.availableEgressRegions = append(response.availableEgressRegions, region)
+		}
+
+		if response.err != nil {
+
+			// For diagnostics, we'll post this even when cancelled due to shutdown.
+			NoticeWarning("ScanServerEntries failed: %v", errors.Trace(response.err))
+
+			// Continue and send error reponse. Clear any partial data to avoid
+			// misuse.
+			response.candidates = 0
+			response.initialCandidates = 0
+			response.initialCandidatesAnyEgressRegion = 0
+			response.availableEgressRegions = []string{}
+		}
+
+		if request.awaitResponse != nil {
+			select {
+			case request.awaitResponse <- &response:
+			case <-controller.runCtx.Done():
+				// The receiver may be gone when shutting down.
+			}
+		}
+
+		if response.err == nil {
+
+			NoticeCandidateServers(
+				controller.config.EgressRegion,
+				controller.protocolSelectionConstraints,
+				response.initialCandidates,
+				response.candidates)
+
+			NoticeAvailableEgressRegions(
+				response.availableEgressRegions)
+		}
+	}
+
+	NoticeInfo("exiting server entries reporter")
+}
+
+// signalServerEntriesReporter triggers a new server entry report. Set
+// request.awaitResponse to obtain the report output. When awaitResponse is
+// set, signalServerEntriesReporter blocks until the reporter receives the
+// request, guaranteeing the new report runs. Otherwise, the report is
+// considered to be informational and may or may not run, depending on whether
+// another run is already in progress.
+func (controller *Controller) signalServerEntriesReporter(request *serverEntriesReportRequest) {
+
+	if request.awaitResponse == nil {
+		select {
+		case controller.signalReportServerEntries <- request:
+		default:
+		}
+	} else {
+		controller.signalReportServerEntries <- request
+	}
+}
+
 // connectedReporter sends periodic "connected" requests to the Psiphon API.
 // These requests are for server-side unique user stats calculation. See the
 // comment in DoConnectedRequest for a description of the request mechanism.
@@ -542,7 +715,8 @@ loop:
 			if err == nil {
 				reported = true
 			} else {
-				NoticeWarning("failed to make connected request: %s", err)
+				NoticeWarning("failed to make connected request: %v",
+					errors.Trace(err))
 			}
 		}
 
@@ -710,16 +884,18 @@ loop:
 				err := connectedTunnel.Activate(controller.runCtx, controller)
 
 				if err != nil {
-					NoticeWarning("failed to activate %s: %s",
-						connectedTunnel.dialParams.ServerEntry.GetDiagnosticID(), err)
+					NoticeWarning("failed to activate %s: %v",
+						connectedTunnel.dialParams.ServerEntry.GetDiagnosticID(),
+						errors.Trace(err))
 					discardTunnel = true
 				} else {
 					// It's unlikely that registerTunnel will fail, since only this goroutine
 					// calls registerTunnel -- and after checking numTunnels; so failure is not
 					// expected.
 					if !controller.registerTunnel(connectedTunnel) {
-						NoticeWarning("failed to register %s: %s",
-							connectedTunnel.dialParams.ServerEntry.GetDiagnosticID(), err)
+						NoticeWarning("failed to register %s: %v",
+							connectedTunnel.dialParams.ServerEntry.GetDiagnosticID(),
+							errors.Trace(err))
 						discardTunnel = true
 					}
 				}
@@ -1307,10 +1483,9 @@ func (controller *Controller) launchEstablishing() {
 		}
 	}
 
-	// LimitTunnelProtocols and ConnectionWorkerPoolSize may be set by
-	// tactics.
-
-	// Initial- and LimitTunnelProtocols are set once per establishment, for
+	// Initial- and LimitTunnelProtocols may be set by tactics.
+	//
+	// These protocol limits are fixed once per establishment, for
 	// consistent application of related probabilities (applied by
 	// ParametersAccessor.TunnelProtocols). The
 	// establishLimitTunnelProtocolsState field must be read-only after this
@@ -1326,6 +1501,8 @@ func (controller *Controller) launchEstablishing() {
 		replayCandidateCount:                p.Int(parameters.ReplayCandidateCount),
 	}
 
+	// ConnectionWorkerPoolSize may be set by tactics.
+
 	workerPoolSize := p.Int(parameters.ConnectionWorkerPoolSize)
 
 	// When TargetServerEntry is used, override any worker pool size config or
@@ -1355,98 +1532,123 @@ func (controller *Controller) launchEstablishing() {
 		controller.config.TargetServerEntry != "" {
 		tunnelPoolSize = 1
 	}
-	if tunnelPoolSize > 1 {
-		// Initial count is ignored as count candidates will eventually become
-		// available.
-		_, count := CountServerEntriesWithConstraints(
-			controller.config.UseUpstreamProxy(),
-			controller.config.EgressRegion,
-			controller.protocolSelectionConstraints)
-		if count < tunnelPoolSize {
-			if count < 1 {
-				count = 1
-			}
-			tunnelPoolSize = count
-		}
-	}
-	controller.setTunnelPoolSize(tunnelPoolSize)
 
 	p.Close()
 
-	// If InitialLimitTunnelProtocols is configured but cannot be satisfied,
-	// skip the initial phase in this establishment. This avoids spinning,
-	// unable to connect, in this case. InitialLimitTunnelProtocols is
-	// intended to prioritize certain protocols, but not strictly select them.
-	//
-	// The candidate count check is made with egress region selection unset.
-	// When an egress region is selected, it's the responsibility of the outer
-	// client to react to the following ReportAvailableRegions output and
-	// clear the user's selected region to prevent spinning, unable to
-	// connect. The initial phase is skipped only when
-	// InitialLimitTunnelProtocols cannot be satisfied _regardless_ of region
-	// selection.
-	//
-	// We presume that, in practise, most clients will have embedded server
-	// entries with capabilities for most protocols; and that clients will
-	// often perform RSL checks. So clients should most often have the
-	// necessary capabilities to satisfy InitialLimitTunnelProtocols. When
-	// this check fails, RSL/OSL/upgrade checks are triggered in order to gain
-	// new capabilities.
-	//
-	// LimitTunnelProtocols remains a hard limit, as using prohibited
-	// protocols may have some bad effect, such as a firewall blocking all
-	// traffic from a host.
-
-	if controller.protocolSelectionConstraints.initialLimitProtocolsCandidateCount > 0 {
+	// Trigger CandidateServers and AvailableEgressRegions notices. By default,
+	// this is an asynchronous operation, as the underlying full server entry
+	// list enumeration may be a slow operation. In certain cases, where
+	// candidate counts are required up front, await the result before
+	// proceeding.
 
-		egressRegion := "" // no egress region
+	awaitResponse := tunnelPoolSize > 1 ||
+		controller.protocolSelectionConstraints.initialLimitProtocolsCandidateCount > 0
 
-		initialCount, count := CountServerEntriesWithConstraints(
-			controller.config.UseUpstreamProxy(),
-			egressRegion,
-			controller.protocolSelectionConstraints)
-
-		if initialCount == 0 {
-			NoticeCandidateServers(
-				egressRegion,
-				controller.protocolSelectionConstraints,
-				initialCount,
-				count)
-			NoticeWarning("skipping initial limit tunnel protocols")
-			controller.protocolSelectionConstraints.initialLimitProtocolsCandidateCount = 0
-
-			// Since we were unable to satisfy the InitialLimitTunnelProtocols
-			// tactic, trigger RSL, OSL, and upgrade fetches to potentially
-			// gain new capabilities.
-			controller.triggerFetches()
-		}
-	}
-
-	// Report available egress regions. After a fresh install, the outer
-	// client may not have a list of regions to display; and
-	// LimitTunnelProtocols may reduce the number of available regions.
+	// AvailableEgressRegions: after a fresh install, the outer client may not
+	// have a list of regions to display; and LimitTunnelProtocols may reduce the
+	// number of available regions.
 	//
 	// When the outer client receives NoticeAvailableEgressRegions and the
 	// configured EgressRegion is not included in the region list, the outer
-	// client _should_ stop tunnel-core and prompt the user to change the
-	// region selection, as there are insufficient servers/capabilities to
-	// establish a tunnel in the selected region.
+	// client _should_ stop tunnel-core and prompt the user to change the region
+	// selection, as there are insufficient servers/capabilities to establish a
+	// tunnel in the selected region.
 	//
-	// This report is delayed until after tactics are likely to be applied;
-	// this avoids a ReportAvailableRegions reporting too many regions,
-	// followed shortly by a ReportAvailableRegions reporting fewer regions.
-	// That sequence could cause issues in the outer client UI.
+	// This report is delayed until after tactics are likely to be applied,
+	// above; this avoids a ReportAvailableRegions reporting too many regions,
+	// followed shortly by a ReportAvailableRegions reporting fewer regions. That
+	// sequence could cause issues in the outer client UI.
 	//
-	// The reported regions are limited by protocolSelectionConstraints;
-	// in the case where an initial limit is in place, only regions available
-	// for the initial limit are reported. The initial phase will not complete
-	// if EgressRegion is set such that there are no server entries with the
+	// The reported regions are limited by protocolSelectionConstraints; in the
+	// case where an initial limit is in place, only regions available for the
+	// initial limit are reported. The initial phase will not complete if
+	// EgressRegion is set such that there are no server entries with the
 	// necessary protocol capabilities (either locally or from a remote server
 	// list fetch).
 
-	ReportAvailableRegions(
-		controller.config,
-		controller.protocolSelectionConstraints)
+	// Concurrency note: controller.protocolSelectionConstraints may be
+	// overwritten before serverEntriesReporter reads it, and so cannot be
+	// accessed directly by serverEntriesReporter.
+	reportRequest := &serverEntriesReportRequest{
+		constraints: controller.protocolSelectionConstraints,
+	}
+
+	if awaitResponse {
+		// Buffer size of 1 ensures the sender, serverEntryReporter, won't block on
+		// sending the response in the case where launchEstablishing exits due to
+		// stopping establishment.
+		reportRequest.awaitResponse = make(chan *serverEntriesReportResponse, 1)
+	}
+
+	controller.signalServerEntriesReporter(reportRequest)
+
+	if awaitResponse {
+
+		var reportResponse *serverEntriesReportResponse
+		select {
+		case reportResponse = <-reportRequest.awaitResponse:
+		case <-controller.establishCtx.Done():
+			// The sender may be gone when shutting down, or may not send until after
+			// stopping establishment.
+			return
+		}
+		if reportResponse.err != nil {
+			NoticeError("failed to report server entries: %v",
+				errors.Trace(reportResponse.err))
+			controller.SignalComponentFailure()
+			return
+		}
+
+		// Make adjustments based on candidate counts.
+
+		if tunnelPoolSize > 1 {
+			// Initial canidate count is ignored as count candidates will eventually
+			// become available.
+			if reportResponse.candidates < tunnelPoolSize {
+				tunnelPoolSize = reportResponse.candidates
+			}
+			if tunnelPoolSize < 1 {
+				tunnelPoolSize = 1
+			}
+		}
+		controller.setTunnelPoolSize(tunnelPoolSize)
+
+		// If InitialLimitTunnelProtocols is configured but cannot be satisfied,
+		// skip the initial phase in this establishment. This avoids spinning,
+		// unable to connect, in this case. InitialLimitTunnelProtocols is
+		// intended to prioritize certain protocols, but not strictly select them.
+		//
+		// The candidate count check ignores egress region selection. When an egress
+		// region is selected, it's the responsibility of the outer client to react
+		// to the following ReportAvailableRegions output and clear the user's
+		// selected region to prevent spinning, unable to connect. The initial phase
+		// is skipped only when InitialLimitTunnelProtocols cannot be satisfied
+		// _regardless_ of region selection.
+		//
+		// We presume that, in practise, most clients will have embedded server
+		// entries with capabilities for most protocols; and that clients will
+		// often perform RSL checks. So clients should most often have the
+		// necessary capabilities to satisfy InitialLimitTunnelProtocols. When
+		// this check fails, RSL/OSL/upgrade checks are triggered in order to gain
+		// new capabilities.
+		//
+		// LimitTunnelProtocols remains a hard limit, as using prohibited
+		// protocols may have some bad effect, such as a firewall blocking all
+		// traffic from a host.
+
+		if controller.protocolSelectionConstraints.initialLimitProtocolsCandidateCount > 0 {
+
+			if reportResponse.initialCandidatesAnyEgressRegion == 0 {
+				NoticeWarning("skipping initial limit tunnel protocols")
+				controller.protocolSelectionConstraints.initialLimitProtocolsCandidateCount = 0
+
+				// Since we were unable to satisfy the InitialLimitTunnelProtocols
+				// tactic, trigger RSL, OSL, and upgrade fetches to potentially
+				// gain new capabilities.
+				controller.triggerFetches()
+			}
+		}
+	}
 
 	for i := 0; i < workerPoolSize; i++ {
 		controller.establishWaitGroup.Add(1)
@@ -1508,7 +1710,7 @@ func (controller *Controller) establishCandidateGenerator() {
 
 	applyServerAffinity, iterator, err := NewServerEntryIterator(controller.config)
 	if err != nil {
-		NoticeWarning("failed to iterate over candidates: %s", err)
+		NoticeError("failed to iterate over candidates: %s", errors.Trace(err))
 		controller.SignalComponentFailure()
 		return
 	}
@@ -1529,23 +1731,6 @@ loop:
 	// Repeat until stopped
 	for {
 
-		// For diagnostics, emits counts of the number of known server
-		// entries that satisfy both the egress region and tunnel protocol
-		// requirements (excluding excludeIntensive logic).
-		// Counts may change during establishment due to remote server
-		// list fetches, etc.
-
-		initialCount, count := CountServerEntriesWithConstraints(
-			controller.config.UseUpstreamProxy(),
-			controller.config.EgressRegion,
-			controller.protocolSelectionConstraints)
-
-		NoticeCandidateServers(
-			controller.config.EgressRegion,
-			controller.protocolSelectionConstraints,
-			initialCount,
-			count)
-
 		// A "round" consists of a new shuffle of the server entries and attempted
 		// connections up to the end of the server entry iterator, or
 		// parameters.EstablishTunnelWorkTime elapsed. Time spent waiting for
@@ -1560,6 +1745,12 @@ loop:
 		// candidates, in which case rounds end instantly due to the complete server
 		// entry iteration. An exception is made for an empty server entry iterator;
 		// in that case fetches may be triggered immediately.
+		//
+		// The number of server candidates may change during this loop, due to
+		// remote server list fetches. Due to the performance impact, we will not
+		// trigger additional, informational CandidateServer notices while in the
+		// establishing loop. Clients typically re-establish often enough that we
+		// will see the effect of the remote server list fetch in diagnostics.
 
 		roundStartTime := time.Now()
 		var roundNetworkWaitDuration time.Duration
@@ -1584,7 +1775,7 @@ loop:
 
 			serverEntry, err := iterator.Next()
 			if err != nil {
-				NoticeWarning("failed to get next candidate: %s", err)
+				NoticeError("failed to get next candidate: %v", errors.Trace(err))
 				controller.SignalComponentFailure()
 				break loop
 			}
@@ -1710,6 +1901,7 @@ func (controller *Controller) establishTunnelWorker() {
 	defer controller.establishWaitGroup.Done()
 loop:
 	for candidateServerEntry := range controller.candidateServerEntries {
+
 		// Note: don't receive from candidateServerEntries and isStopEstablishing
 		// in the same select, since we want to prioritize receiving the stop signal
 		if controller.isStopEstablishing() {
@@ -1861,8 +2053,9 @@ loop:
 			// and the excludeIntensive flag.
 			// Silently skip the candidate in this case. Otherwise, emit error.
 			if err != nil {
-				NoticeInfo("failed to select protocol for %s: %s",
-					candidateServerEntry.serverEntry.GetDiagnosticID(), err)
+				NoticeInfo("failed to select protocol for %s: %v",
+					candidateServerEntry.serverEntry.GetDiagnosticID(),
+					errors.Trace(err))
 			}
 
 			// Unblock other candidates immediately when server affinity
@@ -1968,8 +2161,9 @@ loop:
 				break loop
 			}
 
-			NoticeInfo("failed to connect to %s: %s",
-				candidateServerEntry.serverEntry.GetDiagnosticID(), err)
+			NoticeInfo("failed to connect to %s: %v",
+				candidateServerEntry.serverEntry.GetDiagnosticID(),
+				errors.Trace(err))
 
 			continue
 		}

+ 30 - 1
psiphon/controller_test.go

@@ -617,6 +617,8 @@ func controllerRun(t *testing.T, runConfig *controllerRunConfig) {
 	upgradeDownloaded := make(chan struct{}, 1)
 	remoteServerListDownloaded := make(chan struct{}, 1)
 	confirmedLatestVersion := make(chan struct{}, 1)
+	candidateServers := make(chan struct{}, 1)
+	availableEgressRegions := make(chan struct{}, 1)
 
 	var clientUpgradeDownloadedBytesCount int32
 	var remoteServerListDownloadedBytesCount int32
@@ -696,6 +698,20 @@ func controllerRun(t *testing.T, runConfig *controllerRunConfig) {
 					default:
 					}
 				}
+
+			case "CandidateServers":
+
+				select {
+				case candidateServers <- struct{}{}:
+				default:
+				}
+
+			case "AvailableEgressRegions":
+
+				select {
+				case availableEgressRegions <- struct{}{}:
+				default:
+				}
 			}
 		}))
 
@@ -740,11 +756,24 @@ func controllerRun(t *testing.T, runConfig *controllerRunConfig) {
 
 		select {
 		case <-tunnelEstablished:
-
 		case <-establishTimeout.C:
 			t.Fatalf("tunnel establish timeout exceeded")
 		}
 
+		// Test: asynchronous server entry scans must complete
+
+		select {
+		case <-candidateServers:
+		case <-establishTimeout.C:
+			t.Fatalf("missing candidate servers notice")
+		}
+
+		select {
+		case <-availableEgressRegions:
+		case <-establishTimeout.C:
+			t.Fatalf("missing available egress regions notice")
+		}
+
 		// Test: if starting with no server entries, a fetch remote
 		// server list must have succeeded. With disruptNetwork, the
 		// fetch must have been resumed at least once.

+ 142 - 86
psiphon/dataStore.go

@@ -23,6 +23,7 @@ import (
 	"bytes"
 	"encoding/json"
 	"math"
+	"os"
 	"sync"
 	"time"
 
@@ -54,9 +55,10 @@ var (
 	datastorePersistentStatTypeFailedTunnel     = string(datastoreFailedTunnelStatsBucket)
 	datastoreServerEntryFetchGCThreshold        = 10
 
-	datastoreMutex          sync.RWMutex
-	datastoreReferenceCount int64
-	activeDatastoreDB       *datastoreDB
+	datastoreReferenceCountMutex sync.RWMutex
+	datastoreReferenceCount      int64
+	datastoreMutex               sync.RWMutex
+	activeDatastoreDB            *datastoreDB
 )
 
 // OpenDataStore opens and initializes the singleton datastore instance.
@@ -78,30 +80,60 @@ func OpenDataStoreWithoutReset(config *Config) error {
 
 func openDataStore(config *Config, retryAndReset bool) error {
 
-	datastoreMutex.Lock()
+	// The datastoreReferenceCountMutex/datastoreMutex mutex pair allow for:
+	//
+	// _Nested_ OpenDataStore/CloseDataStore calls to not block when a
+	// datastoreView is in progress (for example, a GetDialParameters call while
+	// a slow ScanServerEntries is running). In this case the nested
+	// OpenDataStore/CloseDataStore calls will lock only
+	// datastoreReferenceCountMutex and not datastoreMutex.
+	//
+	// Synchronized access, for OpenDataStore/CloseDataStore, to
+	// activeDatastoreDB based on a consistent view of datastoreReferenceCount
+	// via locking first datastoreReferenceCount and then datastoreMutex while
+	// holding datastoreReferenceCount.
+	//
+	// Concurrent access, for datastoreView/datastoreUpdate, to activeDatastoreDB
+	// via datastoreMutex read locks.
+	//
+	// Exclusive access, for OpenDataStore/CloseDataStore, to activeDatastoreDB,
+	// with no running datastoreView/datastoreUpdate, by aquiring a
+	// datastoreMutex write lock.
+
+	datastoreReferenceCountMutex.Lock()
 
 	if datastoreReferenceCount < 0 || datastoreReferenceCount == math.MaxInt64 {
-		datastoreMutex.Unlock()
+		datastoreReferenceCountMutex.Unlock()
 		return errors.Tracef(
 			"invalid datastore reference count: %d", datastoreReferenceCount)
 	}
 
 	if datastoreReferenceCount > 0 {
 
-		if activeDatastoreDB == nil {
-			datastoreMutex.Unlock()
+		// For this sanity check, we need only the read-only lock; and must use the
+		// read-only lock to allow concurrent datastoreView calls.
+
+		datastoreMutex.RLock()
+		isNil := activeDatastoreDB == nil
+		datastoreMutex.RUnlock()
+		if isNil {
 			return errors.TraceNew("datastore unexpectedly closed")
 		}
 
 		// Add a reference to the open datastore.
 
 		datastoreReferenceCount += 1
-		datastoreMutex.Unlock()
+		datastoreReferenceCountMutex.Unlock()
 		return nil
 	}
 
+	// Only lock datastoreMutex now that it's necessary.
+	// datastoreReferenceCountMutex remains locked.
+	datastoreMutex.Lock()
+
 	if activeDatastoreDB != nil {
 		datastoreMutex.Unlock()
+		datastoreReferenceCountMutex.Unlock()
 		return errors.TraceNew("datastore unexpectedly open")
 	}
 
@@ -111,12 +143,14 @@ func openDataStore(config *Config, retryAndReset bool) error {
 		config.GetDataStoreDirectory(), retryAndReset)
 	if err != nil {
 		datastoreMutex.Unlock()
+		datastoreReferenceCountMutex.Unlock()
 		return errors.Trace(err)
 	}
 
 	datastoreReferenceCount = 1
 	activeDatastoreDB = newDB
 	datastoreMutex.Unlock()
+	datastoreReferenceCountMutex.Unlock()
 
 	_ = resetAllPersistentStatsToUnreported()
 
@@ -126,8 +160,8 @@ func openDataStore(config *Config, retryAndReset bool) error {
 // CloseDataStore closes the singleton datastore instance, if open.
 func CloseDataStore() {
 
-	datastoreMutex.Lock()
-	defer datastoreMutex.Unlock()
+	datastoreReferenceCountMutex.Lock()
+	defer datastoreReferenceCountMutex.Unlock()
 
 	if datastoreReferenceCount <= 0 {
 		NoticeWarning(
@@ -139,6 +173,11 @@ func CloseDataStore() {
 		return
 	}
 
+	// Only lock datastoreMutex now that it's necessary.
+	// datastoreReferenceCountMutex remains locked.
+	datastoreMutex.Lock()
+	defer datastoreMutex.Unlock()
+
 	if activeDatastoreDB == nil {
 		return
 	}
@@ -362,6 +401,52 @@ func StreamingStoreServerEntries(
 	return nil
 }
 
+// ImportEmbeddedServerEntries loads, decodes, and stores a list of server
+// entries. If embeddedServerEntryListFilename is not empty,
+// embeddedServerEntryList will be ignored and the encoded server entry list
+// will be loaded from the specified file.
+func ImportEmbeddedServerEntries(
+	config *Config,
+	embeddedServerEntryListFilename string,
+	embeddedServerEntryList string) error {
+
+	if embeddedServerEntryListFilename != "" {
+
+		file, err := os.Open(embeddedServerEntryListFilename)
+		if err != nil {
+			return errors.Trace(err)
+		}
+		defer file.Close()
+
+		err = StreamingStoreServerEntries(
+			config,
+			protocol.NewStreamingServerEntryDecoder(
+				file,
+				common.TruncateTimestampToHour(common.GetCurrentTimestamp()),
+				protocol.SERVER_ENTRY_SOURCE_EMBEDDED),
+			false)
+		if err != nil {
+			return errors.Trace(err)
+		}
+
+	} else {
+
+		serverEntries, err := protocol.DecodeServerEntryList(
+			embeddedServerEntryList,
+			common.TruncateTimestampToHour(common.GetCurrentTimestamp()),
+			protocol.SERVER_ENTRY_SOURCE_EMBEDDED)
+		if err != nil {
+			return errors.Trace(err)
+		}
+		err = StoreServerEntries(config, serverEntries, false)
+		if err != nil {
+			return errors.Trace(err)
+		}
+	}
+
+	return nil
+}
+
 // PromoteServerEntry sets the server affinity server entry ID to the
 // specified server entry IP address.
 func PromoteServerEntry(config *Config, ipAddress string) error {
@@ -1179,21 +1264,36 @@ func deleteServerEntryHelper(
 	return nil
 }
 
-func scanServerEntries(scanner func(*protocol.ServerEntry)) error {
+// ScanServerEntries iterates over all stored server entries, unmarshals each,
+// and passes it to callback for processing. If callback returns false, the
+// iteration is cancelled and an error is returned.
+//
+// ScanServerEntries may be slow to execute, particularly for older devices
+// and/or very large server lists. Callers should avoid blocking on
+// ScanServerEntries where possible; and use the canel option to interrupt
+// scans that are no longer required.
+func ScanServerEntries(callback func(*protocol.ServerEntry) bool) error {
 	err := datastoreView(func(tx *datastoreTx) error {
+
 		bucket := tx.bucket(datastoreServerEntriesBucket)
 		cursor := bucket.cursor()
 		n := 0
+
 		for key, value := cursor.first(); key != nil; key, value = cursor.next() {
+
 			var serverEntry *protocol.ServerEntry
 			err := json.Unmarshal(value, &serverEntry)
 			if err != nil {
 				// In case of data corruption or a bug causing this condition,
 				// do not stop iterating.
-				NoticeWarning("scanServerEntries: %s", errors.Trace(err))
+				NoticeWarning("ScanServerEntries: %s", errors.Trace(err))
 				continue
 			}
-			scanner(serverEntry)
+
+			if !callback(serverEntry) {
+				cursor.close()
+				return errors.TraceNew("scan cancelled")
+			}
 
 			n += 1
 			if n == datastoreServerEntryFetchGCThreshold {
@@ -1212,96 +1312,52 @@ func scanServerEntries(scanner func(*protocol.ServerEntry)) error {
 	return nil
 }
 
-// CountServerEntries returns a count of stored server entries.
-func CountServerEntries() int {
-	count := 0
-	err := scanServerEntries(func(_ *protocol.ServerEntry) {
-		count += 1
-	})
-
-	if err != nil {
-		NoticeWarning("CountServerEntries failed: %s", err)
-		return 0
-	}
+// HasServerEntries returns a bool indicating if the data store contains at
+// least one server entry. This is a faster operation than CountServerEntries.
+// On failure, HasServerEntries returns false.
+func HasServerEntries() bool {
 
-	return count
-}
+	hasServerEntries := false
 
-// CountServerEntriesWithConstraints returns a count of stored server entries for
-// the specified region and tunnel protocol limits.
-func CountServerEntriesWithConstraints(
-	useUpstreamProxy bool,
-	region string,
-	constraints *protocolSelectionConstraints) (int, int) {
-
-	// When CountServerEntriesWithConstraints is called only
-	// limitTunnelProtocolState is fixed; excludeIntensive is transitory.
-	excludeIntensive := false
-
-	initialCount := 0
-	count := 0
-	err := scanServerEntries(func(serverEntry *protocol.ServerEntry) {
-		if region == "" || serverEntry.Region == region {
-
-			if constraints.isInitialCandidate(excludeIntensive, serverEntry) {
-				initialCount += 1
-			}
-
-			if constraints.isCandidate(excludeIntensive, serverEntry) {
-				count += 1
-			}
-
-		}
+	err := datastoreView(func(tx *datastoreTx) error {
+		bucket := tx.bucket(datastoreServerEntriesBucket)
+		cursor := bucket.cursor()
+		key, _ := cursor.first()
+		hasServerEntries = (key != nil)
+		cursor.close()
+		return nil
 	})
 
 	if err != nil {
-		NoticeWarning("CountServerEntriesWithConstraints failed: %s", err)
-		return 0, 0
+		NoticeWarning("HasServerEntries failed: %s", errors.Trace(err))
+		return false
 	}
 
-	return initialCount, count
+	return hasServerEntries
 }
 
-// ReportAvailableRegions prints a notice with the available egress regions.
-// When limitState has initial protocols, the available regions are limited
-// to those available for the initial protocols; or if limitState has general
-// limited protocols, the available regions are similarly limited.
-func ReportAvailableRegions(config *Config, constraints *protocolSelectionConstraints) {
-
-	// When ReportAvailableRegions is called only limitTunnelProtocolState is
-	// fixed; excludeIntensive is transitory.
-	excludeIntensive := false
-
-	regions := make(map[string]bool)
-	err := scanServerEntries(func(serverEntry *protocol.ServerEntry) {
+// CountServerEntries returns a count of stored server entries. On failure,
+// CountServerEntries returns 0.
+func CountServerEntries() int {
 
-		isCandidate := false
-		if constraints.hasInitialProtocols() {
-			isCandidate = constraints.isInitialCandidate(excludeIntensive, serverEntry)
-		} else {
-			isCandidate = constraints.isCandidate(excludeIntensive, serverEntry)
-		}
+	count := 0
 
-		if isCandidate {
-			regions[serverEntry.Region] = true
+	err := datastoreView(func(tx *datastoreTx) error {
+		bucket := tx.bucket(datastoreServerEntriesBucket)
+		cursor := bucket.cursor()
+		for key, _ := cursor.first(); key != nil; key, _ = cursor.next() {
+			count += 1
 		}
+		cursor.close()
+		return nil
 	})
 
 	if err != nil {
-		NoticeWarning("ReportAvailableRegions failed: %s", err)
-		return
-	}
-
-	regionList := make([]string, 0, len(regions))
-	for region := range regions {
-		// Some server entries do not have a region, but it makes no sense to return
-		// an empty string as an "available region".
-		if region != "" {
-			regionList = append(regionList, region)
-		}
+		NoticeWarning("CountServerEntries failed: %s", err)
+		return 0
 	}
 
-	NoticeAvailableEgressRegions(regionList)
+	return count
 }
 
 // SetSplitTunnelRoutes updates the cached routes data for

+ 1 - 1
psiphon/notice.go

@@ -417,7 +417,7 @@ func NoticeCandidateServers(
 // NoticeAvailableEgressRegions is what regions are available for egress from.
 // Consecutive reports of the same list of regions are suppressed.
 func NoticeAvailableEgressRegions(regions []string) {
-	sortedRegions := append([]string(nil), regions...)
+	sortedRegions := append([]string{}, regions...)
 	sort.Strings(sortedRegions)
 	repetitionMessage := strings.Join(sortedRegions, "")
 	outputRepetitiveNotice(