Просмотр исходного кода

Merge branch 'master' into staging-client

Rod Hynes 1 год назад
Родитель
Сommit
8fcc4b9a51
46 измененных файлов с 3159 добавлено и 325 удалено
  1. 8 0
      MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java
  2. 7 0
      MobileLibrary/Android/SampleApps/TunneledWebView/app/build.gradle
  3. 9 0
      MobileLibrary/Android/SampleApps/TunneledWebView/app/src/main/java/ca/psiphon/tunneledwebview/MainActivity.java
  4. 8 0
      MobileLibrary/go-mobile/cmd/gomobile/bind_iosapp.go
  5. 6 0
      MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h
  6. 12 0
      MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m
  7. 4 0
      MobileLibrary/iOS/SampleApps/TunneledWebRequest/TunneledWebRequest/AppDelegate.swift
  8. 16 0
      README.md
  9. 2 0
      go.mod
  10. 8 0
      go.sum
  11. 53 15
      psiphon/common/osl/osl.go
  12. 76 13
      psiphon/common/osl/osl_test.go
  13. 3 0
      psiphon/common/parameters/parameters.go
  14. 2 1
      psiphon/controller.go
  15. 3 2
      psiphon/notice.go
  16. 1 1
      psiphon/remoteServerList_test.go
  17. 1 19
      psiphon/server/api.go
  18. 166 0
      psiphon/server/discovery.go
  19. 182 0
      psiphon/server/discovery/classic.go
  20. 143 0
      psiphon/server/discovery/classic_test.go
  21. 114 0
      psiphon/server/discovery/consistent.go
  22. 67 0
      psiphon/server/discovery/consistent_test.go
  23. 254 0
      psiphon/server/discovery/discovery.go
  24. 374 0
      psiphon/server/discovery/discovery_test.go
  25. 2 1
      psiphon/server/geoip.go
  26. 51 126
      psiphon/server/psinet/psinet.go
  27. 20 128
      psiphon/server/psinet/psinet_test.go
  28. 188 16
      psiphon/server/server_test.go
  29. 38 0
      psiphon/server/services.go
  30. 9 3
      psiphon/server/tunnelServer.go
  31. 24 0
      vendor/github.com/Psiphon-Labs/consistent/.gitignore
  32. 1 0
      vendor/github.com/Psiphon-Labs/consistent/.travis.yml
  33. 21 0
      vendor/github.com/Psiphon-Labs/consistent/LICENSE
  34. 255 0
      vendor/github.com/Psiphon-Labs/consistent/README.md
  35. 397 0
      vendor/github.com/Psiphon-Labs/consistent/consistent.go
  36. 22 0
      vendor/github.com/cespare/xxhash/LICENSE.txt
  37. 50 0
      vendor/github.com/cespare/xxhash/README.md
  38. 14 0
      vendor/github.com/cespare/xxhash/rotate.go
  39. 14 0
      vendor/github.com/cespare/xxhash/rotate19.go
  40. 168 0
      vendor/github.com/cespare/xxhash/xxhash.go
  41. 12 0
      vendor/github.com/cespare/xxhash/xxhash_amd64.go
  42. 233 0
      vendor/github.com/cespare/xxhash/xxhash_amd64.s
  43. 75 0
      vendor/github.com/cespare/xxhash/xxhash_other.go
  44. 10 0
      vendor/github.com/cespare/xxhash/xxhash_safe.go
  45. 30 0
      vendor/github.com/cespare/xxhash/xxhash_unsafe.go
  46. 6 0
      vendor/modules.txt

+ 8 - 0
MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java

@@ -131,6 +131,11 @@ public class PsiphonTunnel {
         default public void onTrafficRateLimits(long upstreamBytesPerSecond, long downstreamBytesPerSecond) {}
         default public void onApplicationParameters(Object parameters) {}
         default public void onServerAlert(String reason, String subject, List<String> actionURLs) {}
+        /**
+         * Called when tunnel-core reports connected server region information.
+         * @param region The server region received.
+         */
+        default public void onConnectedServerRegion(String region) {}
         default public void onExiting() {}
     }
 
@@ -1079,6 +1084,9 @@ public class PsiphonTunnel {
                       enableUdpGwKeepalive();
                     }
                 }
+                // Also report the tunnel's egress region to the host service
+                mHostService.onConnectedServerRegion(
+                        notice.getJSONObject("data").getString("serverRegion"));
             } else if (noticeType.equals("ApplicationParameters")) {
                 mHostService.onApplicationParameters(
                     notice.getJSONObject("data").get("parameters"));

+ 7 - 0
MobileLibrary/Android/SampleApps/TunneledWebView/app/build.gradle

@@ -35,4 +35,11 @@ dependencies {
     implementation 'androidx.appcompat:appcompat:1.0.0'
     // always specify exact library version in your real project to avoid non-deterministic builds
     implementation 'ca.psiphon:psiphontunnel:2.+'
+
+    // For the latest version compile the library from source, see MobileLibrary/Android/README.md
+    // in the Psiphon-Labs/psiphon-tunnel-core repository, copy the ca.psiphon.aar artifact to
+    // the libs folder under the app module and replace the above line
+    // (e.g. replace implementation 'ca.psiphon:psiphontunnel:2.+')
+    // with the following line:
+    // implementation files('libs/ca.psiphon.aar')
 }

+ 9 - 0
MobileLibrary/Android/SampleApps/TunneledWebView/app/src/main/java/ca/psiphon/tunneledwebview/MainActivity.java

@@ -8,6 +8,8 @@ package ca.psiphon.tunneledwebview;
 import android.content.Context;
 import android.os.Bundle;
 import androidx.appcompat.app.AppCompatActivity;
+
+import android.util.Log;
 import android.webkit.WebSettings;
 import android.webkit.WebView;
 import android.widget.ArrayAdapter;
@@ -61,6 +63,7 @@ import ca.psiphon.PsiphonTunnel;
 public class MainActivity extends AppCompatActivity
         implements PsiphonTunnel.HostService {
 
+    private static final String TAG = "TunneledWebView";
     private ListView mListView;
     private WebView mWebView;
 
@@ -152,6 +155,7 @@ public class MainActivity extends AppCompatActivity
             public void run() {
                 mLogMessages.add(message);
                 mListView.setSelection(mLogMessages.getCount() - 1);
+                Log.d(TAG, "logMessage: " + message);
             }
         });
     }
@@ -249,6 +253,11 @@ public class MainActivity extends AppCompatActivity
         loadWebView();
     }
 
+    @Override
+    public void onConnectedServerRegion(String region) {
+        logMessage("connected server region: " + region);
+    }
+
     @Override
     public void onHomepage(String url) {
         logMessage("home page: " + url);

+ 8 - 0
MobileLibrary/go-mobile/cmd/gomobile/bind_iosapp.go

@@ -269,6 +269,14 @@ func goAppleBind(gobind string, pkgs []*packages.Package, targets []targetInfo)
 	xcframeworkArgs := []string{"-create-xcframework"}
 
 	for _, dir := range frameworkDirs {
+		// On macOS, a temporary directory starts with /var, which is a symbolic link to /private/var.
+		// And in gomobile, a temporary directory is usually used as a working directly.
+		// Unfortunately, xcodebuild in Xcode 15 seems to have a bug and might not be able to understand fullpaths with symbolic links.
+		// As a workaround, resolve the path with symbolic links by filepath.EvalSymlinks.
+		dir, err := filepath.EvalSymlinks(dir)
+		if err != nil {
+			return err
+		}
 		xcframeworkArgs = append(xcframeworkArgs, "-framework", dir)
 	}
 

+ 6 - 0
MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.h

@@ -299,6 +299,12 @@ WWAN or vice versa or VPN state changed
  */
 - (void)onApplicationParameters:(NSDictionary * _Nonnull)parameters;
 
+
+/*!
+ Called when tunnel-core reports connected server region information
+ @param region The server region received.
+ */
+- (void)onConnectedServerRegion:(NSString * _Nonnull)region;
 @end
 
 /*!

+ 12 - 0
MobileLibrary/iOS/PsiphonTunnel/PsiphonTunnel/PsiphonTunnel.m

@@ -1174,6 +1174,18 @@ typedef NS_ERROR_ENUM(PsiphonTunnelErrorDomain, PsiphonTunnelErrorCode) {
             });
         }
     }
+    else if ([noticeType isEqualToString:@"ActiveTunnel"]) {
+        id region = [notice valueForKeyPath:@"data.serverRegion"];
+        if (![region isKindOfClass:[NSString class]]) {
+            [self logMessage:[NSString stringWithFormat: @"ActiveTunnel notice missing data.serverRegion: %@", noticeJSON]];
+            return;
+        }
+        if ([self.tunneledAppDelegate respondsToSelector:@selector(onConnectedServerRegion:)]) {
+            dispatch_sync(self->callbackQueue, ^{
+                [self.tunneledAppDelegate onConnectedServerRegion:region];
+            });
+        }
+    }
     else if ([noticeType isEqualToString:@"InternalError"]) {
         internalError = TRUE;
     }

+ 4 - 0
MobileLibrary/iOS/SampleApps/TunneledWebRequest/TunneledWebRequest/AppDelegate.swift

@@ -365,4 +365,8 @@ extension AppDelegate: TunneledAppDelegate {
             self.httpProxyPort = port
         }
     }
+
+    func onConnectedServerRegion(_ region: String) {
+        NSLog("onConnectedServerRegion(%@)", region)
+    }
 }

+ 16 - 0
README.md

@@ -146,6 +146,22 @@ $ ./ConsoleClient -config ./client.config
 Use the local SOCKS proxy (port 1080) or HTTP proxy (port 8080) to tunnel traffic.
 
 
+Using Psiphon with Go modules
+--------------------------------------------------------------------------------
+
+The github.com/Psiphon-Labs/psiphon-tunnel-core Go module may be imported into
+other Go programs. Due to legacy release tags predating use of Go modules in
+this repository, neither `go get ...@latest` nor `go get ...@tag` are
+supported at this time. To use the psiphon-tunnel-core Go module and its
+dependencies, reference a specific commit, or reference the `staging-client`
+branch, which is the client-side, production-ready branch:
+
+```
+% go get github.com/Psiphon-Labs/psiphon-tunnel-core@staging-client
+go: added github.com/Psiphon-Labs/psiphon-tunnel-core v1.0.11-0.20240424194431-3612a5a6fb4c
+```
+
+
 Acknowledgements
 --------------------------------------------------------------------------------
 

+ 2 - 0
go.mod

@@ -9,11 +9,13 @@ replace github.com/pion/dtls/v2 => github.com/mingyech/dtls/v2 v2.0.0
 require (
 	github.com/Psiphon-Inc/rotate-safe-writer v0.0.0-20210303140923-464a7a37606e
 	github.com/Psiphon-Labs/bolt v0.0.0-20200624191537-23cedaef7ad7
+	github.com/Psiphon-Labs/consistent v0.0.0-20240322131436-20aaa4e05737
 	github.com/Psiphon-Labs/goptlib v0.0.0-20200406165125-c0e32a7a3464
 	github.com/Psiphon-Labs/psiphon-tls v0.0.0-20240424193802-52b2602ec60c
 	github.com/Psiphon-Labs/quic-go v0.0.0-20240424181006-45545f5e1536
 	github.com/armon/go-proxyproto v0.0.0-20180202201750-5b7edb60ff5f
 	github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61
+	github.com/cespare/xxhash v1.1.0
 	github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9
 	github.com/cognusion/go-cache-lru v0.0.0-20170419142635-f73e2280ecea
 	github.com/deckarep/golang-set v0.0.0-20171013212420-1d4478f51bed

+ 8 - 0
go.sum

@@ -7,10 +7,14 @@ github.com/AndreasBriese/bbloom v0.0.0-20170702084017-28f7e881ca57/go.mod h1:bOv
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
 github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
 github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
+github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
 github.com/Psiphon-Inc/rotate-safe-writer v0.0.0-20210303140923-464a7a37606e h1:NPfqIbzmijrl0VclX2t8eO5EPBhqe47LLGKpRrcVjXk=
 github.com/Psiphon-Inc/rotate-safe-writer v0.0.0-20210303140923-464a7a37606e/go.mod h1:ZdY5pBfat/WVzw3eXbIf7N1nZN0XD5H5+X8ZMDWbCs4=
 github.com/Psiphon-Labs/bolt v0.0.0-20200624191537-23cedaef7ad7 h1:Hx/NCZTnvoKZuIBwSmxE58KKoNLXIGG6hBJYN7pj9Ag=
 github.com/Psiphon-Labs/bolt v0.0.0-20200624191537-23cedaef7ad7/go.mod h1:alTtZBo3j4AWFvUrAH6F5ZaHcTj4G5Y01nHz8dkU6vU=
+github.com/Psiphon-Labs/consistent v0.0.0-20240322131436-20aaa4e05737 h1:QTMy7Uc2Xc7fz6O/Khy1xi0VBND13GqzLUE2mHw6HUU=
+github.com/Psiphon-Labs/consistent v0.0.0-20240322131436-20aaa4e05737/go.mod h1:Enj/Gszv2zCbuRbHbabmNvfO9EM+5kmaGj8CyjwNPlY=
 github.com/Psiphon-Labs/goptlib v0.0.0-20200406165125-c0e32a7a3464 h1:VmnMMMheFXwLV0noxYhbJbLmkV4iaVW3xNnj6xcCNHo=
 github.com/Psiphon-Labs/goptlib v0.0.0-20200406165125-c0e32a7a3464/go.mod h1:Pe5BqN2DdIdChorAXl6bDaQd/wghpCleJfid2NoSli0=
 github.com/Psiphon-Labs/psiphon-tls v0.0.0-20240305020009-09f917290799 h1:dHFQz6jeIr2RdtlioyGIdJw2UfKF7G+g7GYnQxhbgrk=
@@ -29,6 +33,8 @@ github.com/armon/go-proxyproto v0.0.0-20180202201750-5b7edb60ff5f h1:SaJ6yqg936T
 github.com/armon/go-proxyproto v0.0.0-20180202201750-5b7edb60ff5f/go.mod h1:QmP9hvJ91BbJmGVGSbutW19IC0Q9phDCLGaomwTJbgU=
 github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61 h1:BU+NxuoaYPIvvp8NNkNlLr8aA0utGyuunf4Q3LJ0bh0=
 github.com/bifurcation/mint v0.0.0-20180306135233-198357931e61/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
 github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
 github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9 h1:a1zrFsLFac2xoM6zG1u72DWJwZG3ayttYLfmLbxVETk=
@@ -201,6 +207,8 @@ github.com/sergeyfrolov/bsbuffer v0.0.0-20180903213811-94e85abb8507 h1:ML7ZNtcln
 github.com/sergeyfrolov/bsbuffer v0.0.0-20180903213811-94e85abb8507/go.mod h1:DbI1gxrXI2jRGw7XGEUZQOOMd6PsnKzRrCKabvvMrwM=
 github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
 github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
 github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=

+ 53 - 15
psiphon/common/osl/osl.go

@@ -103,7 +103,7 @@ type Scheme struct {
 	// SeedSpecs is the set of different client network activity patterns
 	// that will result in issuing SLOKs. For a given time period, a distinct
 	// SLOK is issued for each SeedSpec.
-	// Duplicate subnets may appear in multiple SeedSpecs.
+	// Duplicate subnets and ASNs may appear in multiple SeedSpecs.
 	SeedSpecs []*SeedSpec
 
 	// SeedSpecThreshold is the threshold scheme for combining SLOKs to
@@ -135,7 +135,7 @@ type Scheme struct {
 	//   SeedPeriodNanoseconds = 100,000,000 = 100 milliseconds
 	//   SeedPeriodKeySplits = [{10, 7}, {60, 5}]
 	//
-	//   In these scheme, up to 3 distinct SLOKs, one per spec, are issued
+	//   In this scheme, up to 3 distinct SLOKs, one per spec, are issued
 	//   every 100 milliseconds.
 	//
 	//   Distinct OSLs are paved for every minute (60 seconds). Each OSL
@@ -156,15 +156,16 @@ type Scheme struct {
 // SeedSpec defines a client traffic pattern that results in a seeded SLOK.
 // For each time period, a unique SLOK is issued to a client that meets the
 // traffic levels specified in Targets. All upstream port forward traffic to
-// UpstreamSubnets is counted towards the targets.
+// UpstreamSubnets and UpstreamASNs are counted towards the targets.
 //
 // ID is a SLOK key derivation component and must be 32 random bytes, base64
-// encoded. UpstreamSubnets is a list of CIDRs. Description is not used; it's
-// for JSON config file comments.
+// encoded. UpstreamSubnets is a list of CIDRs. UpstreamASNs is a list of
+// ASNs. Description is not used; it's for JSON config file comments.
 type SeedSpec struct {
 	Description     string
 	ID              []byte
 	UpstreamSubnets []string
+	UpstreamASNs    []string
 	Targets         TrafficValues
 }
 
@@ -213,7 +214,7 @@ type ClientSeedProgress struct {
 
 // ClientSeedPortForward map a client port forward, which is relaying
 // traffic to a specific upstream address, to all seed state progress
-// counters for SeedSpecs with subnets containing the upstream address.
+// counters for SeedSpecs with subnets and ASNs containing the upstream address.
 // As traffic is relayed through the port forwards, the bytes transferred
 // and duration count towards the progress of these SeedSpecs and
 // associated SLOKs.
@@ -342,6 +343,16 @@ func LoadConfig(configJSON []byte) (*Config, error) {
 			}
 
 			scheme.subnetLookups[index] = subnetLookup
+
+			// Ensure there are no duplicates.
+			ASNs := make(map[string]struct{}, len(seedSpec.UpstreamASNs))
+			for _, ASN := range seedSpec.UpstreamASNs {
+				if _, ok := ASNs[ASN]; ok {
+					return nil, errors.Tracef("invalid upstream ASNs, duplicate ASN: %s", ASN)
+				} else {
+					ASNs[ASN] = struct{}{}
+				}
+			}
 		}
 
 		if !isValidShamirSplit(len(scheme.SeedSpecs), scheme.SeedSpecThreshold) {
@@ -450,13 +461,14 @@ func (state *ClientSeedState) Resume(
 // NewClientSeedPortForward creates a new client port forward
 // traffic progress tracker. Port forward progress reported to the
 // ClientSeedPortForward is added to seed state progress for all
-// seed specs containing upstreamIPAddress in their subnets.
+// seed specs containing upstreamIPAddress in their subnets or ASNs.
 // The return value will be nil when activity for upstreamIPAddress
 // does not count towards any progress.
 // NewClientSeedPortForward may be invoked concurrently by many
 // psiphond port forward establishment goroutines.
 func (state *ClientSeedState) NewClientSeedPortForward(
-	upstreamIPAddress net.IP) *ClientSeedPortForward {
+	upstreamIPAddress net.IP,
+	lookupASN func(net.IP) string) *ClientSeedPortForward {
 
 	// Concurrency: access to ClientSeedState is unsynchronized
 	// but references only read-only fields.
@@ -467,18 +479,46 @@ func (state *ClientSeedState) NewClientSeedPortForward(
 
 	var progressReferences []progressReference
 
-	// Determine which seed spec subnets contain upstreamIPAddress
+	// Determine which seed spec subnets and ASNs contain upstreamIPAddress
 	// and point to the progress for each. When progress is reported,
 	// it is added directly to all of these TrafficValues instances.
-	// Assumes state.progress entries correspond 1-to-1 with
+	// Assumes state.seedProgress entries correspond 1-to-1 with
 	// state.scheme.subnetLookups.
 	// Note: this implementation assumes a small number of schemes and
 	// seed specs. For larger numbers, instead of N SubnetLookups, create
 	// a single SubnetLookup which returns, for a given IP address, all
 	// matching subnets and associated seed specs.
 	for seedProgressIndex, seedProgress := range state.seedProgress {
-		for trafficProgressIndex, subnetLookup := range seedProgress.scheme.subnetLookups {
-			if subnetLookup.ContainsIPAddress(upstreamIPAddress) {
+
+		var upstreamASN string
+		var upstreamASNSet bool
+
+		for trafficProgressIndex, seedSpec := range seedProgress.scheme.SeedSpecs {
+
+			matchesSeedSpec := false
+
+			// First check for subnet match before performing more expensive
+			// check for ASN match.
+			subnetLookup := seedProgress.scheme.subnetLookups[trafficProgressIndex]
+			matchesSeedSpec = subnetLookup.ContainsIPAddress(upstreamIPAddress)
+
+			if !matchesSeedSpec && lookupASN != nil {
+				// No subnet match. Check for ASN match.
+				if len(seedSpec.UpstreamASNs) > 0 {
+					// Lookup ASN on demand and only once.
+					if !upstreamASNSet {
+						upstreamASN = lookupASN(upstreamIPAddress)
+						upstreamASNSet = true
+					}
+					// TODO: use a map for faster lookups when the number of
+					// string values to compare against exceeds a threshold
+					// where benchmarks show maps are faster than looping
+					// through a string slice.
+					matchesSeedSpec = common.Contains(seedSpec.UpstreamASNs, upstreamASN)
+				}
+			}
+
+			if matchesSeedSpec {
 				progressReferences = append(
 					progressReferences,
 					progressReference{
@@ -671,9 +711,7 @@ func (state *ClientSeedState) GetSeedPayload() *SeedPayload {
 	state.issueSLOKs()
 
 	sloks := make([]*SLOK, len(state.payloadSLOKs))
-	for index, slok := range state.payloadSLOKs {
-		sloks[index] = slok
-	}
+	copy(sloks, state.payloadSLOKs)
 
 	return &SeedPayload{
 		SLOKs: sloks,

+ 76 - 13
psiphon/common/osl/osl_test.go

@@ -62,6 +62,7 @@ func TestOSL(t *testing.T) {
           "Description": "spec2",
           "ID" : "qvpIcORLE2Pi5TZmqRtVkEp+OKov0MhfsYPLNV7FYtI=",
           "UpstreamSubnets" : ["192.168.0.0/16", "10.0.0.0/8"],
+          "UpstreamASNs" : ["0000"],
           "Targets" :
           {
               "BytesRead" : 10,
@@ -171,11 +172,16 @@ func TestOSL(t *testing.T) {
 		t.Fatalf("LoadConfig failed: %s", err)
 	}
 
+	portForwardASN := new(string)
+	lookupASN := func(net.IP) string {
+		return *portForwardASN
+	}
+
 	t.Run("ineligible client, sufficient transfer", func(t *testing.T) {
 
 		clientSeedState := config.NewClientSeedState("US", "C5E8D2EDFD093B50D8D65CF59D0263CA", nil)
 
-		seedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("192.168.0.1"))
+		seedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("192.168.0.1"), lookupASN)
 
 		if seedPortForward != nil {
 			t.Fatalf("expected nil client seed port forward")
@@ -195,7 +201,7 @@ func TestOSL(t *testing.T) {
 
 	t.Run("eligible client, insufficient transfer", func(t *testing.T) {
 
-		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(5, 5, 5)
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1"), lookupASN).UpdateProgress(5, 5, 5)
 
 		if len(clientSeedState.GetSeedPayload().SLOKs) != 0 {
 			t.Fatalf("expected 0 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
@@ -212,18 +218,18 @@ func TestOSL(t *testing.T) {
 
 		rolloverToNextSLOKTime()
 
-		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(5, 5, 5)
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1"), lookupASN).UpdateProgress(5, 5, 5)
 
 		if len(clientSeedState.GetSeedPayload().SLOKs) != 0 {
 			t.Fatalf("expected 0 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
 		}
 	})
 
-	t.Run("eligible client, sufficient transfer, one port forward", func(t *testing.T) {
+	t.Run("eligible client, sufficient transfer, one port forward, match by ip", func(t *testing.T) {
 
 		rolloverToNextSLOKTime()
 
-		clientSeedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1"))
+		clientSeedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1"), lookupASN)
 
 		clientSeedPortForward.UpdateProgress(5, 5, 5)
 
@@ -240,13 +246,19 @@ func TestOSL(t *testing.T) {
 		}
 	})
 
-	t.Run("eligible client, sufficient transfer, multiple port forwards", func(t *testing.T) {
+	t.Run("eligible client, sufficient transfer, one port forward, match by asn", func(t *testing.T) {
 
 		rolloverToNextSLOKTime()
 
-		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(5, 5, 5)
+		*portForwardASN = "0000"
+
+		clientSeedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("11.0.0.1"), lookupASN)
+
+		clientSeedPortForward.UpdateProgress(5, 5, 5)
+
+		clientSeedPortForward.UpdateProgress(5, 5, 5)
 
-		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(5, 5, 5)
+		*portForwardASN = ""
 
 		select {
 		case <-signalIssueSLOKs:
@@ -260,13 +272,44 @@ func TestOSL(t *testing.T) {
 		}
 	})
 
-	t.Run("eligible client, sufficient transfer multiple SLOKs", func(t *testing.T) {
+	t.Run("eligible client, sufficient transfer, one port forward, match by ip and asn", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		*portForwardASN = "0000"
+
+		clientSeedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1"), lookupASN)
+
+		clientSeedPortForward.UpdateProgress(5, 5, 5)
+
+		// Check that progress is not double counted.
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 2 {
+			t.Fatalf("expected 2 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+
+		clientSeedPortForward.UpdateProgress(5, 5, 5)
+
+		*portForwardASN = ""
+
+		select {
+		case <-signalIssueSLOKs:
+		default:
+			t.Fatalf("expected issue SLOKs signal")
+		}
+
+		// Expect 3 SLOKS: 1 new, and 2 remaining in payload.
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 3 {
+			t.Fatalf("expected 3 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("eligible client, sufficient transfer, multiple port forwards", func(t *testing.T) {
 
 		rolloverToNextSLOKTime()
 
-		clientSeedState.NewClientSeedPortForward(net.ParseIP("192.168.0.1")).UpdateProgress(5, 5, 5)
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1"), lookupASN).UpdateProgress(5, 5, 5)
 
-		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(5, 5, 5)
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1"), lookupASN).UpdateProgress(5, 5, 5)
 
 		select {
 		case <-signalIssueSLOKs:
@@ -274,12 +317,32 @@ func TestOSL(t *testing.T) {
 			t.Fatalf("expected issue SLOKs signal")
 		}
 
-		// Expect 4 SLOKS: 2 new, and 2 remaining in payload.
+		// Expect 4 SLOKS: 1 new, and 3 remaining in payload.
 		if len(clientSeedState.GetSeedPayload().SLOKs) != 4 {
 			t.Fatalf("expected 4 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
 		}
 	})
 
+	t.Run("eligible client, sufficient transfer multiple SLOKs", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("192.168.0.1"), lookupASN).UpdateProgress(5, 5, 5)
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1"), lookupASN).UpdateProgress(5, 5, 5)
+
+		select {
+		case <-signalIssueSLOKs:
+		default:
+			t.Fatalf("expected issue SLOKs signal")
+		}
+
+		// Expect 6 SLOKS: 2 new, and 4 remaining in payload.
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 6 {
+			t.Fatalf("expected 6 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
 	t.Run("clear payload", func(t *testing.T) {
 		clientSeedState.ClearSeedPayload()
 
@@ -305,7 +368,7 @@ func TestOSL(t *testing.T) {
 
 		clientSeedState := config.NewClientSeedState("US", "B4A780E67695595FA486E9B900EA7335", nil)
 
-		clientSeedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("192.168.0.1"))
+		clientSeedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("192.168.0.1"), lookupASN)
 
 		clientSeedPortForward.UpdateProgress(10, 10, 10)
 

+ 3 - 0
psiphon/common/parameters/parameters.go

@@ -366,6 +366,7 @@ const (
 	SteeringIPCacheTTL                               = "SteeringIPCacheTTL"
 	SteeringIPCacheMaxEntries                        = "SteeringIPCacheMaxEntries"
 	SteeringIPProbability                            = "SteeringIPProbability"
+	ServerDiscoveryStrategy                          = "ServerDiscoveryStrategy"
 
 	// Retired parameters
 
@@ -782,6 +783,8 @@ var defaultParameters = map[string]struct {
 	SteeringIPCacheTTL:        {value: 1 * time.Hour, minimum: time.Duration(0)},
 	SteeringIPCacheMaxEntries: {value: 65536, minimum: 0},
 	SteeringIPProbability:     {value: 1.0, minimum: 0.0},
+
+	ServerDiscoveryStrategy: {value: "", flags: serverSideOnly},
 }
 
 // IsServerSideOnly indicates if the parameter specified by name is used

+ 2 - 1
psiphon/controller.go

@@ -1001,7 +1001,8 @@ loop:
 			NoticeActiveTunnel(
 				connectedTunnel.dialParams.ServerEntry.GetDiagnosticID(),
 				connectedTunnel.dialParams.TunnelProtocol,
-				connectedTunnel.dialParams.ServerEntry.SupportsSSHAPIRequests())
+				connectedTunnel.dialParams.ServerEntry.SupportsSSHAPIRequests(),
+				connectedTunnel.dialParams.ServerEntry.Region)
 
 			if isFirstTunnel {
 

+ 3 - 2
psiphon/notice.go

@@ -678,12 +678,13 @@ func NoticeRequestedTactics(dialParams *DialParameters) {
 }
 
 // NoticeActiveTunnel is a successful connection that is used as an active tunnel for port forwarding
-func NoticeActiveTunnel(diagnosticID, protocol string, isTCS bool) {
+func NoticeActiveTunnel(diagnosticID, protocol string, isTCS bool, serverRegion string) {
 	singletonNoticeLogger.outputNotice(
 		"ActiveTunnel", noticeIsDiagnostic,
 		"diagnosticID", diagnosticID,
 		"protocol", protocol,
-		"isTCS", isTCS)
+		"isTCS", isTCS,
+		"serverRegion", serverRegion)
 }
 
 // NoticeSocksProxyPortInUse is a failure to use the configured LocalSocksProxyPort

+ 1 - 1
psiphon/remoteServerList_test.go

@@ -230,7 +230,7 @@ func testObfuscatedRemoteServerLists(t *testing.T, omitMD5Sums bool) {
 	}
 
 	seedState := oslConfig.NewClientSeedState("", propagationChannelID, nil)
-	seedPortForward := seedState.NewClientSeedPortForward(net.ParseIP("0.0.0.0"))
+	seedPortForward := seedState.NewClientSeedPortForward(net.ParseIP("0.0.0.0"), nil)
 	seedPortForward.UpdateProgress(1, 1, 1)
 	payload := seedState.GetSeedPayload()
 	if len(payload.SLOKs) != 1 {

+ 1 - 19
psiphon/server/api.go

@@ -20,8 +20,6 @@
 package server
 
 import (
-	"crypto/hmac"
-	"crypto/sha256"
 	"crypto/subtle"
 	"encoding/base64"
 	"encoding/json"
@@ -348,8 +346,7 @@ func handshakeAPIRequestHandler(
 			return nil, errors.TraceNew("missing client IP")
 		}
 
-		encodedServerList = db.DiscoverServers(
-			calculateDiscoveryValue(support.Config.DiscoveryValueHMACKey, clientIP))
+		encodedServerList = support.discovery.DiscoverServers(clientIP)
 	}
 
 	// When the client indicates that it used an out-of-date server entry for
@@ -413,21 +410,6 @@ func handshakeAPIRequestHandler(
 	return responsePayload, nil
 }
 
-// calculateDiscoveryValue derives a value from the client IP address to be
-// used as input in the server discovery algorithm.
-// See https://github.com/Psiphon-Inc/psiphon-automation/tree/master/Automation/psi_ops_discovery.py
-// for full details.
-func calculateDiscoveryValue(discoveryValueHMACKey string, ipAddress net.IP) int {
-	// From: psi_ops_discovery.calculate_ip_address_strategy_value:
-	//     # Mix bits from all octets of the client IP address to determine the
-	//     # bucket. An HMAC is used to prevent pre-calculation of buckets for IPs.
-	//     return ord(hmac.new(HMAC_KEY, ip_address, hashlib.sha256).digest()[0])
-	// TODO: use 3-octet algorithm?
-	hash := hmac.New(sha256.New, []byte(discoveryValueHMACKey))
-	hash.Write([]byte(ipAddress.String()))
-	return int(hash.Sum(nil)[0])
-}
-
 // uniqueUserParams are the connected request parameters which are logged for
 // unique_user events.
 var uniqueUserParams = append(

+ 166 - 0
psiphon/server/discovery.go

@@ -0,0 +1,166 @@
+/*
+ * Copyright (c) 2024, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package server
+
+import (
+	"net"
+	"sync"
+
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/parameters"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server/discovery"
+)
+
+const (
+	DISCOVERY_STRATEGY_CLASSIC    = "classic"
+	DISCOVERY_STRATEGY_CONSISTENT = "consistent"
+)
+
+// Discovery handles the discovery step of the "handshake" API request. It's
+// safe for concurrent usage.
+type Discovery struct {
+	support         *SupportServices
+	currentStrategy string
+	discovery       *discovery.Discovery
+
+	sync.RWMutex
+}
+
+func makeDiscovery(support *SupportServices) *Discovery {
+	return &Discovery{
+		support: support,
+	}
+}
+
+// Start starts discovery.
+func (d *Discovery) Start() error {
+
+	err := d.reload(false)
+	if err != nil {
+		return errors.Trace(err)
+	}
+
+	return nil
+}
+
+// reload reinitializes the underlying discovery component. If reloadedTactics
+// is set and the target discovery strategy has not changed, then the
+// underlying discovery component is not reinitialized.
+func (d *Discovery) reload(reloadedTactics bool) error {
+
+	// Determine which discovery strategy to use. Assumes no GeoIP targeting
+	// for the ServerDiscoveryStrategy tactic.
+
+	p, err := d.support.ServerTacticsParametersCache.Get(NewGeoIPData())
+	if err != nil {
+		return errors.Trace(err)
+	}
+
+	strategy := ""
+	if !p.IsNil() {
+		strategy = p.String(parameters.ServerDiscoveryStrategy)
+	}
+	if strategy == "" {
+		// No tactics are configured; default to consistent discovery.
+		strategy = DISCOVERY_STRATEGY_CONSISTENT
+	}
+
+	// Do not reinitialize underlying discovery component if only tactics have
+	// been reloaded and the discovery strategy has not changed.
+	if reloadedTactics && d.support.discovery.currentStrategy == strategy {
+		return nil
+	}
+
+	// Initialize new discovery strategy.
+	// TODO: do not reinitialize discovery if the discovery strategy and
+	// discovery servers have not changed.
+	var discoveryStrategy discovery.DiscoveryStrategy
+	if strategy == DISCOVERY_STRATEGY_CONSISTENT {
+		discoveryStrategy, err = discovery.NewConsistentHashingDiscovery()
+		if err != nil {
+			return errors.Trace(err)
+		}
+	} else if strategy == DISCOVERY_STRATEGY_CLASSIC {
+		discoveryStrategy, err = discovery.NewClassicDiscovery(
+			d.support.Config.DiscoveryValueHMACKey)
+		if err != nil {
+			return errors.Trace(err)
+		}
+	} else {
+		return errors.Tracef("unknown strategy %s", strategy)
+	}
+
+	// Initialize and set underlying discovery component. Replaces old
+	// component if discovery is already initialized.
+
+	oldDiscovery := d.discovery
+
+	discovery := discovery.MakeDiscovery(
+		d.support.PsinetDatabase.GetDiscoveryServers(),
+		discoveryStrategy)
+
+	discovery.Start()
+
+	d.Lock()
+
+	d.discovery = discovery
+	d.currentStrategy = strategy
+
+	d.Unlock()
+
+	// Ensure resources used by previous underlying discovery component are
+	// cleaned up.
+	// Note: a more efficient impementation would not recreate the underlying
+	// discovery instance if the discovery strategy has not changed, but
+	// instead would update the underlying set of discovery servers if the set
+	// of discovery servers has changed.
+	if oldDiscovery != nil {
+		oldDiscovery.Stop()
+	}
+
+	log.WithTraceFields(
+		LogFields{"discovery_strategy": strategy}).Infof("reloaded discovery")
+
+	return nil
+}
+
+// Stop stops discovery and cleans up underlying resources.
+func (d *Discovery) Stop() {
+	d.discovery.Stop()
+}
+
+// DiscoverServers selects new encoded server entries to be "discovered" by
+// the client, using the client's IP address as the input into the discovery
+// algorithm.
+func (d *Discovery) DiscoverServers(clientIP net.IP) []string {
+
+	d.RLock()
+	defer d.RUnlock()
+
+	servers := d.discovery.SelectServers(clientIP)
+
+	encodedServerEntries := make([]string, 0)
+
+	for _, server := range servers {
+		encodedServerEntries = append(encodedServerEntries, server.EncodedServerEntry)
+	}
+
+	return encodedServerEntries
+}

+ 182 - 0
psiphon/server/discovery/classic.go

@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2024, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package discovery
+
+import (
+	"crypto/hmac"
+	"crypto/sha256"
+	"math"
+	"net"
+	"sync"
+	"time"
+
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server/psinet"
+)
+
+type classicDiscovery struct {
+	clk                   clock
+	buckets               [][]*psinet.DiscoveryServer
+	discoveryValueHMACKey string
+
+	sync.RWMutex
+}
+
+func NewClassicDiscovery(discoveryValueHMACKey string) (*classicDiscovery, error) {
+	return newClassicDiscovery(discoveryValueHMACKey, realClock{})
+}
+
+func newClassicDiscovery(discoveryValueHMACKey string, clk clock) (*classicDiscovery, error) {
+	return &classicDiscovery{
+		clk:                   clk,
+		discoveryValueHMACKey: discoveryValueHMACKey,
+	}, nil
+}
+
+func (c *classicDiscovery) serversChanged(servers []*psinet.DiscoveryServer) {
+
+	var buckets [][]*psinet.DiscoveryServer
+	if len(servers) != 0 {
+		// Divide servers into buckets. The bucket count is chosen such that the number
+		// of buckets and the number of items in each bucket are close (using sqrt).
+		// IP address selects the bucket, time selects the item in the bucket.
+		bucketCount := calculateBucketCount(len(servers))
+		buckets = bucketizeServerList(servers, bucketCount)
+	}
+	c.RWMutex.Lock()
+	c.buckets = buckets
+	c.RWMutex.Unlock()
+}
+
+func calculateDiscoveryValue(discoveryValueHMACKey string, ipAddress net.IP) int {
+	// From: psi_ops_discovery.calculate_ip_address_strategy_value:
+	//     # Mix bits from all octets of the client IP address to determine the
+	//     # bucket. An HMAC is used to prevent pre-calculation of buckets for IPs.
+	//     return ord(hmac.new(HMAC_KEY, ip_address, hashlib.sha256).digest()[0])
+	// TODO: use 3-octet algorithm?
+	hash := hmac.New(sha256.New, []byte(discoveryValueHMACKey))
+	hash.Write([]byte(ipAddress.String()))
+	return int(hash.Sum(nil)[0])
+}
+
+func (c *classicDiscovery) selectServers(clientIP net.IP) []*psinet.DiscoveryServer {
+	discoveryValue := calculateDiscoveryValue(c.discoveryValueHMACKey, clientIP)
+	return c.discoverServers(discoveryValue)
+}
+
+// discoverServers selects new encoded server entries to be "discovered" by
+// the client, using the discoveryValue -- a function of the client's IP
+// address -- as the input into the discovery algorithm.
+func (c *classicDiscovery) discoverServers(discoveryValue int) []*psinet.DiscoveryServer {
+
+	discoveryDate := c.clk.Now().UTC()
+
+	c.RWMutex.RLock()
+	buckets := c.buckets
+	c.RWMutex.RUnlock()
+
+	if len(buckets) == 0 {
+		return nil
+	}
+
+	timeInSeconds := int(discoveryDate.Unix())
+	servers := selectServers(buckets, timeInSeconds, discoveryValue, discoveryDate)
+
+	return servers
+}
+
+// Combine client IP address and time-of-day strategies to give out different
+// discovery servers to different clients. The aim is to achieve defense against
+// enumerability. We also want to achieve a degree of load balancing clients
+// and these strategies are expected to have reasonably random distribution,
+// even for a cluster of users coming from the same network.
+//
+// We only select one server: multiple results makes enumeration easier; the
+// strategies have a built-in load balancing effect; and date range discoverability
+// means a client will actually learn more servers later even if they happen to
+// always pick the same result at this point.
+//
+// This is a blended strategy: as long as there are enough servers to pick from,
+// both aspects determine which server is selected. IP address is given the
+// priority: if there are only a couple of servers, for example, IP address alone
+// determines the outcome.
+func selectServers(
+	buckets [][]*psinet.DiscoveryServer,
+	timeInSeconds,
+	discoveryValue int,
+	discoveryDate time.Time) []*psinet.DiscoveryServer {
+
+	TIME_GRANULARITY := 3600
+
+	// Time truncated to an hour
+	timeStrategyValue := timeInSeconds / TIME_GRANULARITY
+
+	// NOTE: this code assumes that the range of possible timeStrategyValues
+	// and discoveryValues are sufficient to index to all bucket items.
+
+	if len(buckets) == 0 {
+		return nil
+	}
+
+	bucket := buckets[discoveryValue%len(buckets)]
+
+	if len(bucket) == 0 {
+		return nil
+	}
+	server := bucket[timeStrategyValue%len(bucket)]
+
+	// Double check that server is discoverable at this time.
+	if discoveryDate.Before(server.DiscoveryDateRange[0]) ||
+		!discoveryDate.Before(server.DiscoveryDateRange[1]) {
+		return nil
+	}
+
+	serverList := make([]*psinet.DiscoveryServer, 1)
+	serverList[0] = server
+
+	return serverList
+}
+
+// Number of buckets such that first strategy picks among about the same number
+// of choices as the second strategy. Gives an edge to the "outer" strategy.
+func calculateBucketCount(length int) int {
+	return int(math.Ceil(math.Sqrt(float64(length))))
+}
+
+// bucketizeServerList creates nearly equal sized slices of the input list.
+func bucketizeServerList(servers []*psinet.DiscoveryServer, bucketCount int) [][]*psinet.DiscoveryServer {
+
+	// This code creates the same partitions as legacy servers:
+	// https://github.com/Psiphon-Inc/psiphon-automation/blob/685f91a85bcdb33a75a200d936eadcb0686eadd7/Automation/psi_ops_discovery.py
+	//
+	// Both use the same algorithm from:
+	// http://stackoverflow.com/questions/2659900/python-slicing-a-list-into-n-nearly-equal-length-partitions
+
+	buckets := make([][]*psinet.DiscoveryServer, bucketCount)
+
+	division := float64(len(servers)) / float64(bucketCount)
+
+	for i := 0; i < bucketCount; i++ {
+		start := int((division * float64(i)) + 0.5)
+		end := int((division * (float64(i) + 1)) + 0.5)
+		buckets[i] = servers[start:end]
+	}
+
+	return buckets
+}

+ 143 - 0
psiphon/server/discovery/classic_test.go

@@ -0,0 +1,143 @@
+/*
+ * Copyright (c) 2024, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package discovery
+
+import (
+	"strconv"
+	"testing"
+	"time"
+
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server/psinet"
+)
+
+func TestDiscoveryBuckets(t *testing.T) {
+
+	checkBuckets := func(buckets [][]*psinet.DiscoveryServer, expectedServerEntries [][]int) {
+		if len(buckets) != len(expectedServerEntries) {
+			t.Errorf(
+				"unexpected bucket count: got %d expected %d",
+				len(buckets), len(expectedServerEntries))
+			return
+		}
+		for i := 0; i < len(buckets); i++ {
+			if len(buckets[i]) != len(expectedServerEntries[i]) {
+				t.Errorf(
+					"unexpected bucket %d size: got %d expected %d",
+					i, len(buckets[i]), len(expectedServerEntries[i]))
+				return
+			}
+			for j := 0; j < len(buckets[i]); j++ {
+				expectedServerEntry := strconv.Itoa(expectedServerEntries[i][j])
+				if buckets[i][j].EncodedServerEntry != expectedServerEntry {
+					t.Errorf(
+						"unexpected bucket %d item %d: got %s expected %s",
+						i, j, buckets[i][j].EncodedServerEntry, expectedServerEntry)
+					return
+				}
+			}
+		}
+	}
+
+	// Partition test cases from:
+	// http://stackoverflow.com/questions/2659900/python-slicing-a-list-into-n-nearly-equal-length-partitions
+
+	servers := make([]*psinet.DiscoveryServer, 0)
+	for i := 0; i < 105; i++ {
+		servers = append(servers, &psinet.DiscoveryServer{
+			EncodedServerEntry: strconv.Itoa(i),
+			DiscoveryDateRange: []time.Time{time.Time{}, time.Now()},
+		})
+	}
+
+	t.Run("5 servers, 5 buckets", func(t *testing.T) {
+		checkBuckets(
+			bucketizeServerList(servers[0:5], 5),
+			[][]int{{0}, {1}, {2}, {3}, {4}})
+	})
+
+	t.Run("5 servers, 2 buckets", func(t *testing.T) {
+		checkBuckets(
+			bucketizeServerList(servers[0:5], 2),
+			[][]int{{0, 1, 2}, {3, 4}})
+	})
+
+	t.Run("5 servers, 3 buckets", func(t *testing.T) {
+		checkBuckets(
+			bucketizeServerList(servers[0:5], 3),
+			[][]int{{0, 1}, {2}, {3, 4}})
+	})
+
+	t.Run("105 servers, 10 buckets", func(t *testing.T) {
+		checkBuckets(
+			bucketizeServerList(servers, 10),
+			[][]int{
+				{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
+				{11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
+				{21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
+				{32, 33, 34, 35, 36, 37, 38, 39, 40, 41},
+				{42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52},
+				{53, 54, 55, 56, 57, 58, 59, 60, 61, 62},
+				{63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73},
+				{74, 75, 76, 77, 78, 79, 80, 81, 82, 83},
+				{84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94},
+				{95, 96, 97, 98, 99, 100, 101, 102, 103, 104},
+			})
+	})
+
+	t.Run("repeatedly discover with fixed IP address", func(t *testing.T) {
+
+		// For a IP address values, only one bucket should be used; with enough
+		// iterations, all and only the items in a single bucket should be discovered.
+
+		discoveredServers := make(map[string]bool)
+
+		// discoveryValue is derived from the client's IP address and indexes the bucket;
+		// a value of 0 always maps to the first bucket.
+		discoveryValue := 0
+
+		for i := 0; i < 1000; i++ {
+
+			buckets := bucketizeServerList(servers, calculateBucketCount(len(servers)))
+
+			for _, server := range selectServers(buckets, i*int(time.Hour/time.Second), discoveryValue, time.Time{}) {
+				discoveredServers[server.EncodedServerEntry] = true
+			}
+		}
+
+		bucketCount := calculateBucketCount(len(servers))
+
+		buckets := bucketizeServerList(servers, bucketCount)
+
+		if len(buckets[0]) != len(discoveredServers) {
+			t.Errorf(
+				"unexpected discovered server count: got %d expected %d",
+				len(discoveredServers), len(buckets[0]))
+			return
+		}
+
+		for _, bucketServer := range buckets[0] {
+			if _, ok := discoveredServers[bucketServer.EncodedServerEntry]; !ok {
+				t.Errorf("unexpected missing discovery server: %s", bucketServer.EncodedServerEntry)
+				return
+			}
+		}
+	})
+
+}

+ 114 - 0
psiphon/server/discovery/consistent.go

@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2024, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package discovery
+
+import (
+	"net"
+	"sync"
+
+	"github.com/Psiphon-Labs/consistent"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server/psinet"
+	"github.com/cespare/xxhash"
+)
+
+type hasher struct{}
+
+// consistent.Hasher implementation.
+func (h hasher) Sum64(data []byte) uint64 {
+	return xxhash.Sum64(data)
+}
+
+type consistentHashingDiscovery struct {
+	clk    clock
+	config *consistent.Config
+	ring   *consistent.Consistent
+
+	sync.RWMutex
+}
+
+func NewConsistentHashingDiscovery() (*consistentHashingDiscovery, error) {
+	return newConsistentHashingDiscovery(realClock{})
+}
+
+func newConsistentHashingDiscovery(clk clock) (*consistentHashingDiscovery, error) {
+	return &consistentHashingDiscovery{
+		clk: clk,
+		config: &consistent.Config{
+			PartitionCount:    0, // set in serversChanged
+			ReplicationFactor: 1, // ensure all servers are discoverable
+			Load:              1, // ensure all servers are discoverable
+			Hasher:            hasher{},
+		},
+	}, nil
+}
+
+func (c *consistentHashingDiscovery) serversChanged(newServers []*psinet.DiscoveryServer) {
+	if len(newServers) == 0 {
+		c.RWMutex.Lock()
+		c.ring = nil
+		c.RWMutex.Unlock()
+	} else {
+
+		members := make([]consistent.Member, len(newServers))
+		for i, server := range newServers {
+			members[i] = server
+		}
+
+		// Note: requires full reinitialization because we cannot change
+		// PartitionCount on the fly. Add/Remove do not update PartitionCount
+		// and updating ParitionCount is required to ensure that there is not
+		// a panic in the Psiphon-Labs/consistent package and that all servers
+		// are discoverable.
+		c.config.PartitionCount = len(newServers)
+
+		c.RWMutex.Lock()
+		c.ring = consistent.New(members, *c.config)
+		c.RWMutex.Unlock()
+	}
+}
+
+func (c *consistentHashingDiscovery) selectServers(clientIP net.IP) []*psinet.DiscoveryServer {
+
+	c.RWMutex.RLock()
+	defer c.RWMutex.RUnlock()
+
+	if c.ring == nil {
+		// No discoverable servers.
+		return nil
+	}
+
+	member := c.ring.LocateKey(clientIP)
+	if member == nil {
+		// Should never happen.
+		return nil
+	}
+
+	server := member.(*psinet.DiscoveryServer)
+
+	discoveryDate := c.clk.Now()
+
+	// Double check that server is discoverable at this time.
+	if discoveryDate.Before(server.DiscoveryDateRange[0]) ||
+		!discoveryDate.Before(server.DiscoveryDateRange[1]) {
+		return nil
+	}
+
+	return []*psinet.DiscoveryServer{server}
+}

+ 67 - 0
psiphon/server/discovery/consistent_test.go

@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2024, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package discovery
+
+import (
+	"strconv"
+	"testing"
+	"time"
+
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server/psinet"
+)
+
+func TestConsistentHashingDiscovery(t *testing.T) {
+
+	serverIPs, err := nRandomIPs(100)
+	if err != nil {
+		t.Fatalf("nRandomIPs failed %s", err)
+	}
+
+	servers := make([]*psinet.DiscoveryServer, len(serverIPs))
+	for i := 0; i < len(servers); i++ {
+		servers[i] = newDiscoveryServer(strconv.Itoa(i), []time.Time{{}, time.Now().Add(1 * time.Hour)})
+	}
+
+	c, err := NewConsistentHashingDiscovery()
+	if err != nil {
+		t.Fatalf("newConsistentHashingDiscovery failed %s", err)
+	}
+	c.serversChanged(servers)
+
+	// For a single IP address value, only one server in a set of discovery
+	// servers should be discoverable.
+
+	discoveredServers := make(map[string]bool)
+
+	clientIP, err := randomIP()
+	if err != nil {
+		t.Fatalf("randomIP failed %s", err)
+	}
+
+	for i := 0; i < 1000; i++ {
+		for _, server := range c.selectServers(clientIP) {
+			discoveredServers[server.EncodedServerEntry] = true
+		}
+	}
+
+	if len(discoveredServers) != 1 {
+		t.Fatalf("expected to discover 1 server but discovered %d", len(discoveredServers))
+	}
+}

+ 254 - 0
psiphon/server/discovery/discovery.go

@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2024, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+// Package discovery implements the Psiphon discovery algorithms.
+package discovery
+
+import (
+	"context"
+	"net"
+	"sync"
+	"time"
+
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server/psinet"
+)
+
+// clock is an interface of functions required by discovery that exist in
+// the time package in the Go standard library, which enables using
+// implementations in tests that do not rely on the monotonic clock or wall
+// clock.
+type clock interface {
+	Now() time.Time
+	Until(t time.Time) time.Duration
+	After(d time.Duration) <-chan time.Time
+	NewTimer(d time.Duration) timer
+}
+
+// realClock implements clock using the time package in the Go standard library.
+type realClock struct{}
+
+func (realClock) Now() time.Time { return time.Now() }
+
+func (realClock) Until(t time.Time) time.Duration { return time.Until(t) }
+
+func (realClock) After(d time.Duration) <-chan time.Time { return time.After(d) }
+
+func (realClock) NewTimer(d time.Duration) timer { return &realTimer{t: time.NewTimer(d)} }
+
+// timer is an interface matching what Timer in the time package provides in
+// the Go standard library, which enables using implementations in tests that
+// do not rely on the monotonic clock or wall clock.
+type timer interface {
+	C() <-chan time.Time
+	Stop() bool
+	Reset(d time.Duration) bool
+}
+
+// realTimer implements timer using the time package in the Go standard library.
+type realTimer struct {
+	t *time.Timer
+}
+
+func (t *realTimer) C() <-chan time.Time {
+	return t.t.C
+}
+
+func (t *realTimer) Stop() bool {
+	return t.t.Stop()
+}
+
+func (t *realTimer) Reset(d time.Duration) bool {
+	return t.t.Reset(d)
+}
+
+// DiscoveryStrategy represents a discovery algorithm that selects server
+// entries to be "discovered" by a client. Implementations must be safe for
+// concurrent usage.
+type DiscoveryStrategy interface {
+	// selectServers selects discovery servers to give out to the client based
+	// on its IP address and, possibly, other strategies that are internal to
+	// the discovery strategy implementation.
+	selectServers(clientIP net.IP) []*psinet.DiscoveryServer
+	// serversChanged is called with the set of currently discoverable servers
+	// whever that set changes. The discovery strategy implementation must
+	// replace its set of discoverable servers with these servers.
+	serversChanged(servers []*psinet.DiscoveryServer)
+}
+
+// Discovery is the combination of a discovery strategy with a set of discovery
+// servers. It's safe for concurrent usage.
+type Discovery struct {
+	clk        clock
+	all        []*psinet.DiscoveryServer
+	strategy   DiscoveryStrategy
+	cancelFunc context.CancelFunc
+	wg         *sync.WaitGroup
+}
+
+// MakeDiscovery creates a new Discovery instance, which uses the specified
+// strategy with the given discovery servers.
+func MakeDiscovery(
+	servers []*psinet.DiscoveryServer,
+	strategy DiscoveryStrategy) *Discovery {
+
+	return makeDiscovery(realClock{}, servers, strategy)
+}
+
+func makeDiscovery(
+	clk clock,
+	servers []*psinet.DiscoveryServer,
+	strategy DiscoveryStrategy) *Discovery {
+
+	d := Discovery{
+		clk:      clk,
+		all:      servers,
+		strategy: strategy,
+		wg:       new(sync.WaitGroup),
+	}
+
+	return &d
+}
+
+// Start starts discovery. Servers are discoverable when the current time
+// falls within their discovery date range, i.e. DiscoveryDateRange[0] <=
+// clk.Now() < DiscoveryDateRange[1].
+func (d *Discovery) Start() {
+
+	current, nextUpdate := discoverableServers(d.all, d.clk)
+
+	d.strategy.serversChanged(current)
+
+	ctx, cancelFunc := context.WithCancel(context.Background())
+	d.cancelFunc = cancelFunc
+	d.wg.Add(1)
+
+	// Update the set of discovery servers used by the chosen discovery
+	// algorithm, and therefore discoverable with SelectServers, everytime a
+	// server enters, or exits, its discovery date range.
+	go func() {
+		for ctx.Err() == nil {
+			// Wait until the next time a server enters, or exits, its
+			// discovery date range.
+			//
+			// Warning: NewTimer uses the monotonic clock but discovery uses
+			// the wall clock. If there is wall clock drift, then it is
+			// possible that the wall clock surpasses nextUpdate or, more
+			// generally, by the wall clock time the set of discoverable
+			// servers should change before the timer fires. This scenario is
+			// not handled. One solution would be to periodically check if set
+			// of discoverable servers has changed in conjunction with using a
+			// timer.
+			t := d.clk.NewTimer(d.clk.Until(nextUpdate))
+
+			select {
+			case <-t.C():
+			case <-ctx.Done():
+				t.Stop()
+				continue
+			}
+			t.Stop()
+
+			// Note: servers with a discovery date range in the past are not
+			// removed from d.all in case the wall clock has drifted;
+			// otherwise, we risk removing them prematurely.
+			servers, nextUpdate := discoverableServers(d.all, d.clk)
+
+			// Update the set of discoverable servers.
+			d.strategy.serversChanged(servers)
+
+			if nextUpdate == (time.Time{}) {
+				// The discovery date range of all candidate discovery servers
+				// are in the past. No more serversChanged calls will be made
+				// to DiscoveryStrategy.
+				//
+				// Warning: at this point if the wall clock has drifted but
+				// will correct itself in the future such that the set of
+				// discoverable servers changes, then serversChanged will
+				// not be called on the discovery strategies with the new set
+				// of discoverable servers. One workaround for this scenario
+				// would be to periodically check if set of discoverable
+				// servers has changed after this point and restart this loop
+				// if they have.
+				break
+			}
+		}
+		d.wg.Done()
+	}()
+}
+
+// Stop stops discovery and cleans up underlying resources. Stop should be
+// invoked as soon as Discovery is no longer needed. Discovery should not be
+// used after this because the set of discoverable servers will no longer be
+// updated, so it may contain servers that are no longer discoverable and
+// exclude servers that are.
+func (d *Discovery) Stop() {
+	d.cancelFunc()
+	d.wg.Wait()
+}
+
+// SelectServers selects new server entries to be "discovered" by the client,
+// using the client's IP address as the input into the configured discovery
+// algorithm.
+func (d *Discovery) SelectServers(clientIP net.IP) []*psinet.DiscoveryServer {
+	return d.strategy.selectServers(clientIP)
+}
+
+// discoverableServers returns all servers in discoveryServers that are currently
+// eligible for discovery along with the next time that a server in
+// discoveryServers will enter, or exit, its discovery date range.
+func discoverableServers(
+	discoveryServers []*psinet.DiscoveryServer,
+	clk clock) (discoverableServers []*psinet.DiscoveryServer, nextUpdate time.Time) {
+
+	now := clk.Now().UTC()
+	discoverableServers = make([]*psinet.DiscoveryServer, 0)
+
+	var nextServerAdd time.Time
+	var nextServerRemove time.Time
+
+	for _, server := range discoveryServers {
+		if len(server.DiscoveryDateRange) == 2 {
+			if now.Before(server.DiscoveryDateRange[0]) {
+				// Next server that will enter its discovery date range.
+				if nextServerAdd == (time.Time{}) || server.DiscoveryDateRange[0].Before(nextServerAdd) {
+					nextServerAdd = server.DiscoveryDateRange[0]
+				}
+			} else if now.Before(server.DiscoveryDateRange[1]) {
+				discoverableServers = append(discoverableServers, server)
+
+				// Next server that will exit its discovery date range.
+				if nextServerRemove == (time.Time{}) || server.DiscoveryDateRange[1].Before(nextServerRemove) {
+					nextServerRemove = server.DiscoveryDateRange[1]
+				}
+			}
+		}
+	}
+
+	// The next time the set of servers eligible for discovery changes is
+	// whichever occurs first: the next time a server enters its discovery
+	// discovery date range or the next time a server exits its discovery
+	// date range.
+	nextUpdate = nextServerAdd
+	if nextServerAdd == (time.Time{}) ||
+		(nextServerRemove.Before(nextServerAdd) && nextServerRemove != (time.Time{})) {
+		nextUpdate = nextServerRemove
+	}
+
+	return discoverableServers, nextUpdate
+}

+ 374 - 0
psiphon/server/discovery/discovery_test.go

@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2024, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package discovery
+
+import (
+	"math/rand"
+	"net"
+	"sync"
+	"testing"
+	"time"
+
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server/psinet"
+)
+
+// Not safe for concurrent use.
+type testClock struct {
+	now    time.Time
+	update chan time.Time
+
+	sync.RWMutex
+}
+
+func NewTestClock(now time.Time) testClock {
+	return testClock{
+		now:    now,
+		update: make(chan time.Time),
+	}
+}
+
+func (clk *testClock) Now() time.Time {
+	clk.RWMutex.RLock()
+	defer clk.RWMutex.RUnlock()
+	return clk.now
+}
+
+func (clk *testClock) Until(t time.Time) time.Duration {
+	clk.RWMutex.RLock()
+	defer clk.RWMutex.RUnlock()
+	return t.Sub(clk.now)
+
+}
+
+func (clk *testClock) After(d time.Duration) <-chan time.Time {
+	t := clk.NewTimer(d)
+	return t.C()
+}
+
+func (clk *testClock) SetNow(now time.Time) {
+	clk.RWMutex.Lock()
+	clk.now = now
+	clk.RWMutex.Unlock()
+	select {
+	case clk.update <- now:
+	default:
+	}
+}
+
+// Not safe for concurrent use.
+func (clk *testClock) NewTimer(d time.Duration) timer {
+
+	clk.RWMutex.RLock()
+	start := clk.now
+	clk.RWMutex.RUnlock()
+
+	c := make(chan time.Time)
+	if d == 0 {
+		close(c)
+	} else {
+		go func() {
+			for {
+				now := <-clk.update
+				if now.Sub(start) >= d {
+					close(c)
+					break
+				}
+			}
+		}()
+	}
+
+	return &testTimer{
+		c: c,
+	}
+}
+
+type testTimer struct {
+	c <-chan time.Time
+}
+
+func (t *testTimer) C() <-chan time.Time {
+	return t.c
+}
+
+func (t *testTimer) Stop() bool {
+	return true
+}
+
+func (t *testTimer) Reset(d time.Duration) bool {
+	return false
+}
+
+type check struct {
+	t      time.Time // time check is performed
+	ips    []string  // server IP addresses expected to be discoverable
+	subset int       // if non-zero, then expect a subset of ips of this size to be discovered
+}
+
+type discoveryTest struct {
+	name                 string
+	newDiscoveryStrategy func(clk clock) (DiscoveryStrategy, error)
+	servers              []*psinet.DiscoveryServer
+	checks               []check
+}
+
+func runDiscoveryTest(tt *discoveryTest, now time.Time) error {
+
+	if len(tt.servers) == 0 {
+		return errors.TraceNew("test requires >=1 discovery servers")
+	}
+
+	clk := NewTestClock(now)
+
+	strategy, err := tt.newDiscoveryStrategy(&clk)
+	if err != nil {
+		return errors.Trace(err)
+	}
+
+	discovery := makeDiscovery(&clk, tt.servers, strategy)
+
+	discovery.Start()
+
+	for _, check := range tt.checks {
+		time.Sleep(1 * time.Second) // let async code complete
+		clk.SetNow(check.t)
+		time.Sleep(1 * time.Second) // let async code complete
+		discovered := discovery.SelectServers(net.IP{})
+		discoveredIPs := make([]string, len(discovered))
+		for i := range discovered {
+			serverEntry, err := protocol.DecodeServerEntry(discovered[i].EncodedServerEntry, "", "")
+			if err != nil {
+				return errors.Trace(err)
+			}
+			discoveredIPs[i] = serverEntry.IpAddress
+		}
+
+		matches := 0
+		for _, ip := range check.ips {
+			if common.Contains(discoveredIPs, ip) {
+				matches++
+			}
+		}
+
+		expectedMatches := len(check.ips)
+		if check.subset != 0 {
+			expectedMatches = check.subset
+		}
+
+		if expectedMatches != matches {
+			return errors.Tracef("expected %d of %s to be discovered at %s but discovered servers are %s", expectedMatches, check.ips, check.t, discoveredIPs)
+		}
+	}
+
+	discovery.Stop()
+
+	return nil
+}
+
+func TestDiscoveryTestClock(t *testing.T) {
+
+	now := time.Now()
+
+	serverIPs, err := nRandomIPs(4)
+	if err != nil {
+		t.Fatalf("nRandomIPs failed %s", err)
+	}
+
+	server1 := newDiscoveryServer(
+		serverIPs[0].String(),
+		[]time.Time{
+			now.Add(-1 * time.Second).UTC(),
+			now.Add(2 * time.Second).UTC(),
+		})
+	server2 := newDiscoveryServer(
+		serverIPs[1].String(),
+		[]time.Time{
+			now.Add(3 * time.Second).UTC(),
+			now.Add(5 * time.Second).UTC(),
+		})
+	server3 := newDiscoveryServer(
+		serverIPs[2].String(),
+		[]time.Time{
+			now.Add(5 * time.Second).UTC(),
+			now.Add(7 * time.Second).UTC(),
+		})
+	server4 := newDiscoveryServer(
+		serverIPs[3].String(),
+		[]time.Time{
+			now.Add(5 * time.Second).UTC(),
+			now.Add(7 * time.Second).UTC(),
+		})
+
+	tests := []discoveryTest{
+		{
+			name: "classic",
+			newDiscoveryStrategy: func(clk clock) (DiscoveryStrategy, error) {
+				return newClassicDiscovery("discoveryValueHMACKey", clk)
+			},
+			servers: []*psinet.DiscoveryServer{
+				server1,
+				server2,
+				server3,
+				server4,
+			},
+			checks: []check{
+				{
+					t:   now.Add(1 * time.Second),
+					ips: []string{server1.IPAddress},
+				},
+				// discovery end date is noninclusive
+				{
+					t:   now.Add(2 * time.Second),
+					ips: []string{},
+				},
+				// discovery start date is inclusive
+				{
+					t:   now.Add(3 * time.Second),
+					ips: []string{server2.IPAddress},
+				},
+				{
+					t:   now.Add(4 * time.Second),
+					ips: []string{server2.IPAddress},
+				},
+				{
+					t:      now.Add(6 * time.Second),
+					ips:    []string{server3.IPAddress, server4.IPAddress},
+					subset: 1,
+				},
+				{
+					t:   now.Add(8 * time.Second),
+					ips: []string{},
+				},
+			},
+		},
+		{
+			name: "consistent",
+			newDiscoveryStrategy: func(clk clock) (DiscoveryStrategy, error) {
+				return newConsistentHashingDiscovery(clk)
+			},
+			servers: []*psinet.DiscoveryServer{
+				server1,
+				server2,
+				server3,
+				server4,
+			},
+			checks: []check{
+				{
+					t:   now.Add(1 * time.Second),
+					ips: []string{server1.IPAddress},
+				},
+				// discovery end date is noninclusive
+				{
+					t:   now.Add(2 * time.Second),
+					ips: []string{},
+				},
+				// discovery start date is inclusive
+				{
+					t:   now.Add(3 * time.Second),
+					ips: []string{server2.IPAddress},
+				},
+				{
+					t:   now.Add(4 * time.Second),
+					ips: []string{server2.IPAddress},
+				},
+				{
+					t:      now.Add(6 * time.Second),
+					ips:    []string{server3.IPAddress, server4.IPAddress},
+					subset: 1,
+				},
+				{
+					t:   now.Add(8 * time.Second),
+					ips: []string{},
+				},
+			},
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+
+			err := runDiscoveryTest(&tt, now)
+			if err != nil {
+				t.Fatalf("runDiscoveryTest failed: %v", err)
+			}
+		})
+	}
+}
+
+func newDiscoveryServer(IPAddress string, discoveryDateRange []time.Time) *psinet.DiscoveryServer {
+
+	encoded, err := protocol.EncodeServerEntry(
+		&protocol.ServerEntry{
+			IpAddress: IPAddress,
+		},
+	)
+	if err != nil {
+		panic(err)
+	}
+
+	return &psinet.DiscoveryServer{
+		EncodedServerEntry: encoded,
+		DiscoveryDateRange: discoveryDateRange,
+		IPAddress:          IPAddress,
+	}
+}
+
+// randomIP returns a random IP address.
+func randomIP() (net.IP, error) {
+
+	r := make([]byte, 4)
+	_, err := rand.Read(r)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	return r, nil
+}
+
+// nRandomIPs returns numIPs unique random IPs.
+func nRandomIPs(numIPs int) ([]net.IP, error) {
+
+	ips := make([]net.IP, numIPs)
+	ipsSeen := make(map[string]struct{})
+
+	for i := 0; i < numIPs; i++ {
+
+		for {
+
+			ip, err := randomIP()
+			if err != nil {
+				return nil, errors.Trace(err)
+			}
+
+			if _, ok := ipsSeen[ip.String()]; ok {
+				continue
+			}
+
+			ipsSeen[ip.String()] = struct{}{}
+			ips[i] = ip
+
+			break
+		}
+	}
+
+	return ips, nil
+}

+ 2 - 1
psiphon/server/geoip.go

@@ -209,7 +209,8 @@ func (geoIP *GeoIPService) LookupIP(IP net.IP) GeoIPData {
 
 // LookupISPForIP determines a GeoIPData for a given client IP address. Only
 // ISP, ASN, and ASO fields will be populated. This lookup is faster than a
-// full lookup.
+// full lookup. Benchmarks show this lookup is <= ~1 microsecond against the
+// production geo IP database.
 func (geoIP *GeoIPService) LookupISPForIP(IP net.IP) GeoIPData {
 	return geoIP.lookupIP(IP, true)
 }

+ 51 - 126
psiphon/server/psinet/psinet.go

@@ -20,13 +20,12 @@
 // Package psinet implements psinet database services. The psinet database is a
 // JSON-format file containing information about the Psiphon network, including
 // sponsors, home pages, stats regexes, available upgrades, and other servers for
-// discovery. This package also implements the Psiphon discovery algorithm.
+// discovery.
 package psinet
 
 import (
 	"crypto/md5"
 	"encoding/json"
-	"math"
 	"math/rand"
 	"strconv"
 	"strings"
@@ -34,15 +33,18 @@ import (
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 const (
 	MAX_DATABASE_AGE_FOR_SERVER_ENTRY_VALIDITY = 48 * time.Hour
 )
 
-// Database serves Psiphon API data requests. It's safe for
-// concurrent usage. The Reload function supports hot reloading
-// of Psiphon network data while the server is running.
+// Database serves Psiphon API data requests. The Reload function supports hot
+// reloading of Psiphon network data while the server is running.
+//
+// All of the methods on Database are thread-safe, but callers must not mutate
+// any returned data. The struct may be safely shared across goroutines.
 type Database struct {
 	common.ReloadableFile
 
@@ -59,6 +61,18 @@ type Database struct {
 type DiscoveryServer struct {
 	DiscoveryDateRange []time.Time `json:"discovery_date_range"`
 	EncodedServerEntry string      `json:"encoded_server_entry"`
+
+	IPAddress string `json:"-"`
+}
+
+// consistent.Member implementation.
+// TODO: move to discovery package. Requires bridging to a new type.
+func (s *DiscoveryServer) String() string {
+	// Other options:
+	// - Tag
+	// - EncodedServerEntry
+	// - ...
+	return s.IPAddress
 }
 
 type Sponsor struct {
@@ -123,6 +137,32 @@ func NewDatabase(filename string) (*Database, error) {
 				sponsor.domainBytesChecksum = checksum[:]
 			}
 
+			// Decode each encoded server entry for its IP address, which is used in
+			// the consistent.Member implementation in the discovery package.
+			//
+			// Also ensure that no servers share the same IP address, which is
+			// a requirement of consistent hashing discovery; otherwise it will
+			// panic in the underlying Psiphon-Labs/consistent package.
+			serverIPToDiagnosticID := make(map[string]string)
+			for i, server := range database.DiscoveryServers {
+
+				serverEntry, err := protocol.DecodeServerEntry(server.EncodedServerEntry, "", "")
+				if err != nil {
+					return errors.Trace(err)
+				}
+				if serverEntry.IpAddress == "" {
+					return errors.Tracef("unexpected empty IP address in server entry for %s ", serverEntry.GetDiagnosticID())
+				}
+
+				if diagnosticID, ok := serverIPToDiagnosticID[serverEntry.IpAddress]; ok {
+					return errors.Tracef("unexpected %s and %s shared the same IP address", diagnosticID, serverEntry.GetDiagnosticID())
+				} else {
+					serverIPToDiagnosticID[serverEntry.IpAddress] = serverEntry.GetDiagnosticID()
+				}
+
+				database.DiscoveryServers[i].IPAddress = serverEntry.IpAddress
+			}
+
 			return nil
 		})
 
@@ -335,127 +375,6 @@ func (db *Database) GetDomainBytesChecksum(sponsorID string) []byte {
 	return sponsor.domainBytesChecksum
 }
 
-// DiscoverServers selects new encoded server entries to be "discovered" by
-// the client, using the discoveryValue -- a function of the client's IP
-// address -- as the input into the discovery algorithm.
-func (db *Database) DiscoverServers(discoveryValue int) []string {
-	db.ReloadableFile.RLock()
-	defer db.ReloadableFile.RUnlock()
-
-	var servers []*DiscoveryServer
-
-	discoveryDate := time.Now().UTC()
-	candidateServers := make([]*DiscoveryServer, 0)
-
-	for _, server := range db.DiscoveryServers {
-		// All servers that are discoverable on this day are eligible for discovery
-		if len(server.DiscoveryDateRange) == 2 &&
-			discoveryDate.After(server.DiscoveryDateRange[0]) &&
-			discoveryDate.Before(server.DiscoveryDateRange[1]) {
-
-			candidateServers = append(candidateServers, server)
-		}
-	}
-
-	timeInSeconds := int(discoveryDate.Unix())
-	servers = selectServers(candidateServers, timeInSeconds, discoveryValue)
-
-	encodedServerEntries := make([]string, 0)
-
-	for _, server := range servers {
-		encodedServerEntries = append(encodedServerEntries, server.EncodedServerEntry)
-	}
-
-	return encodedServerEntries
-}
-
-// Combine client IP address and time-of-day strategies to give out different
-// discovery servers to different clients. The aim is to achieve defense against
-// enumerability. We also want to achieve a degree of load balancing clients
-// and these strategies are expected to have reasonably random distribution,
-// even for a cluster of users coming from the same network.
-//
-// We only select one server: multiple results makes enumeration easier; the
-// strategies have a built-in load balancing effect; and date range discoverability
-// means a client will actually learn more servers later even if they happen to
-// always pick the same result at this point.
-//
-// This is a blended strategy: as long as there are enough servers to pick from,
-// both aspects determine which server is selected. IP address is given the
-// priority: if there are only a couple of servers, for example, IP address alone
-// determines the outcome.
-func selectServers(
-	servers []*DiscoveryServer, timeInSeconds, discoveryValue int) []*DiscoveryServer {
-
-	TIME_GRANULARITY := 3600
-
-	if len(servers) == 0 {
-		return nil
-	}
-
-	// Time truncated to an hour
-	timeStrategyValue := timeInSeconds / TIME_GRANULARITY
-
-	// Divide servers into buckets. The bucket count is chosen such that the number
-	// of buckets and the number of items in each bucket are close (using sqrt).
-	// IP address selects the bucket, time selects the item in the bucket.
-
-	// NOTE: this code assumes that the range of possible timeStrategyValues
-	// and discoveryValues are sufficient to index to all bucket items.
-
-	bucketCount := calculateBucketCount(len(servers))
-
-	buckets := bucketizeServerList(servers, bucketCount)
-
-	if len(buckets) == 0 {
-		return nil
-	}
-
-	bucket := buckets[discoveryValue%len(buckets)]
-
-	if len(bucket) == 0 {
-		return nil
-	}
-
-	server := bucket[timeStrategyValue%len(bucket)]
-
-	serverList := make([]*DiscoveryServer, 1)
-	serverList[0] = server
-
-	return serverList
-}
-
-// Number of buckets such that first strategy picks among about the same number
-// of choices as the second strategy. Gives an edge to the "outer" strategy.
-func calculateBucketCount(length int) int {
-	return int(math.Ceil(math.Sqrt(float64(length))))
-}
-
-// bucketizeServerList creates nearly equal sized slices of the input list.
-func bucketizeServerList(servers []*DiscoveryServer, bucketCount int) [][]*DiscoveryServer {
-
-	// This code creates the same partitions as legacy servers:
-	// https://github.com/Psiphon-Inc/psiphon-automation/blob/685f91a85bcdb33a75a200d936eadcb0686eadd7/Automation/psi_ops_discovery.py
-	//
-	// Both use the same algorithm from:
-	// http://stackoverflow.com/questions/2659900/python-slicing-a-list-into-n-nearly-equal-length-partitions
-
-	// TODO: this partition is constant for fixed Database content, so it could
-	// be done once and cached in the Database ReloadableFile reloadAction.
-
-	buckets := make([][]*DiscoveryServer, bucketCount)
-
-	division := float64(len(servers)) / float64(bucketCount)
-
-	for i := 0; i < bucketCount; i++ {
-		start := int((division * float64(i)) + 0.5)
-		end := int((division * (float64(i) + 1)) + 0.5)
-		buckets[i] = servers[start:end]
-	}
-
-	return buckets
-}
-
 // IsValidServerEntryTag checks if the specified server entry tag is valid.
 func (db *Database) IsValidServerEntryTag(serverEntryTag string) bool {
 	db.ReloadableFile.RLock()
@@ -473,3 +392,9 @@ func (db *Database) IsValidServerEntryTag(serverEntryTag string) bool {
 	// The tag must be in the map and have the value "true".
 	return db.ValidServerEntryTags[serverEntryTag]
 }
+
+func (db *Database) GetDiscoveryServers() []*DiscoveryServer {
+	db.ReloadableFile.RLock()
+	defer db.ReloadableFile.RUnlock()
+	return db.DiscoveryServers
+}

+ 20 - 128
psiphon/server/psinet/psinet_test.go

@@ -25,9 +25,9 @@ import (
 	"io/ioutil"
 	"os"
 	"path/filepath"
-	"strconv"
 	"testing"
-	"time"
+
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 func TestDatabase(t *testing.T) {
@@ -38,7 +38,21 @@ func TestDatabase(t *testing.T) {
 	}
 	defer os.RemoveAll(testDataDirName)
 
-	databaseJSON := `
+	server1, err := protocol.EncodeServerEntry(&protocol.ServerEntry{
+		IpAddress: "1",
+	})
+	if err != nil {
+		t.Fatalf("EncodeServerEntry failed: %s\n", err)
+	}
+
+	server2, err := protocol.EncodeServerEntry(&protocol.ServerEntry{
+		IpAddress: "2",
+	})
+	if err != nil {
+		t.Fatalf("EncodeServerEntry failed: %s\n", err)
+	}
+
+	databaseJSON := fmt.Sprintf(`
     {
         "sponsors" : {
             "SPONSOR-ID" : {
@@ -92,16 +106,10 @@ func TestDatabase(t *testing.T) {
         },
 
         "discovery_servers" : [
-            {"discovery_date_range" : ["1900-01-01T00:00:00Z", "2000-01-01T00:00:00Z"], "encoded_server_entry" : "0"},
-            {"discovery_date_range" : ["1900-01-01T00:00:00Z", "2000-01-01T00:00:00Z"], "encoded_server_entry" : "0"},
-            {"discovery_date_range" : ["1900-01-01T00:00:00Z", "2000-01-01T00:00:00Z"], "encoded_server_entry" : "0"},
-            {"discovery_date_range" : ["1900-01-01T00:00:00Z", "2000-01-01T00:00:00Z"], "encoded_server_entry" : "0"},
-            {"discovery_date_range" : ["2000-01-01T00:00:00Z", "2100-01-01T00:00:00Z"], "encoded_server_entry" : "1"},
-            {"discovery_date_range" : ["2000-01-01T00:00:00Z", "2100-01-01T00:00:00Z"], "encoded_server_entry" : "1"},
-            {"discovery_date_range" : ["2000-01-01T00:00:00Z", "2100-01-01T00:00:00Z"], "encoded_server_entry" : "1"},
-            {"discovery_date_range" : ["2000-01-01T00:00:00Z", "2100-01-01T00:00:00Z"], "encoded_server_entry" : "1"}
+            {"discovery_date_range" : ["1900-01-01T00:00:00Z", "2000-01-01T00:00:00Z"], "encoded_server_entry" : "%s"},
+            {"discovery_date_range" : ["2000-01-01T00:00:00Z", "2100-01-01T00:00:00Z"], "encoded_server_entry" : "%s"}
         ]
-    }`
+    }`, server1, server2)
 
 	filename := filepath.Join(testDataDirName, "psinet.json")
 
@@ -214,13 +222,6 @@ func TestDatabase(t *testing.T) {
 		})
 	}
 
-	for i := 0; i < 1000; i++ {
-		encodedServerEntries := db.DiscoverServers(i)
-		if len(encodedServerEntries) != 1 || encodedServerEntries[0] != "1" {
-			t.Fatalf("unexpected discovery server list: %+v", encodedServerEntries)
-		}
-	}
-
 	if !db.IsValidServerEntryTag("SERVER-ENTRY-TAG") {
 		t.Fatalf("unexpected invalid server entry tag")
 	}
@@ -229,112 +230,3 @@ func TestDatabase(t *testing.T) {
 		t.Fatalf("unexpected valid server entry tag")
 	}
 }
-
-func TestDiscoveryBuckets(t *testing.T) {
-
-	checkBuckets := func(buckets [][]*DiscoveryServer, expectedServerEntries [][]int) {
-		if len(buckets) != len(expectedServerEntries) {
-			t.Errorf(
-				"unexpected bucket count: got %d expected %d",
-				len(buckets), len(expectedServerEntries))
-			return
-		}
-		for i := 0; i < len(buckets); i++ {
-			if len(buckets[i]) != len(expectedServerEntries[i]) {
-				t.Errorf(
-					"unexpected bucket %d size: got %d expected %d",
-					i, len(buckets[i]), len(expectedServerEntries[i]))
-				return
-			}
-			for j := 0; j < len(buckets[i]); j++ {
-				expectedServerEntry := strconv.Itoa(expectedServerEntries[i][j])
-				if buckets[i][j].EncodedServerEntry != expectedServerEntry {
-					t.Errorf(
-						"unexpected bucket %d item %d: got %s expected %s",
-						i, j, buckets[i][j].EncodedServerEntry, expectedServerEntry)
-					return
-				}
-			}
-		}
-	}
-
-	// Partition test cases from:
-	// http://stackoverflow.com/questions/2659900/python-slicing-a-list-into-n-nearly-equal-length-partitions
-
-	servers := make([]*DiscoveryServer, 0)
-	for i := 0; i < 105; i++ {
-		servers = append(servers, &DiscoveryServer{EncodedServerEntry: strconv.Itoa(i)})
-	}
-
-	t.Run("5 servers, 5 buckets", func(t *testing.T) {
-		checkBuckets(
-			bucketizeServerList(servers[0:5], 5),
-			[][]int{{0}, {1}, {2}, {3}, {4}})
-	})
-
-	t.Run("5 servers, 2 buckets", func(t *testing.T) {
-		checkBuckets(
-			bucketizeServerList(servers[0:5], 2),
-			[][]int{{0, 1, 2}, {3, 4}})
-	})
-
-	t.Run("5 servers, 3 buckets", func(t *testing.T) {
-		checkBuckets(
-			bucketizeServerList(servers[0:5], 3),
-			[][]int{{0, 1}, {2}, {3, 4}})
-	})
-
-	t.Run("105 servers, 10 buckets", func(t *testing.T) {
-		checkBuckets(
-			bucketizeServerList(servers, 10),
-			[][]int{
-				{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
-				{11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
-				{21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
-				{32, 33, 34, 35, 36, 37, 38, 39, 40, 41},
-				{42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52},
-				{53, 54, 55, 56, 57, 58, 59, 60, 61, 62},
-				{63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73},
-				{74, 75, 76, 77, 78, 79, 80, 81, 82, 83},
-				{84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94},
-				{95, 96, 97, 98, 99, 100, 101, 102, 103, 104},
-			})
-	})
-
-	t.Run("repeatedly discover with fixed IP address", func(t *testing.T) {
-
-		// For a IP address values, only one bucket should be used; with enough
-		// iterations, all and only the items in a single bucket should be discovered.
-
-		discoveredServers := make(map[string]bool)
-
-		// discoveryValue is derived from the client's IP address and indexes the bucket;
-		// a value of 0 always maps to the first bucket.
-		discoveryValue := 0
-
-		for i := 0; i < 1000; i++ {
-			for _, server := range selectServers(servers, i*int(time.Hour/time.Second), discoveryValue) {
-				discoveredServers[server.EncodedServerEntry] = true
-			}
-		}
-
-		bucketCount := calculateBucketCount(len(servers))
-
-		buckets := bucketizeServerList(servers, bucketCount)
-
-		if len(buckets[0]) != len(discoveredServers) {
-			t.Errorf(
-				"unexpected discovered server count: got %d expected %d",
-				len(discoveredServers), len(buckets[0]))
-			return
-		}
-
-		for _, bucketServer := range buckets[0] {
-			if _, ok := discoveredServers[bucketServer.EncodedServerEntry]; !ok {
-				t.Errorf("unexpected missing discovery server: %s", bucketServer.EncodedServerEntry)
-				return
-			}
-		}
-	})
-
-}

+ 188 - 16
psiphon/server/server_test.go

@@ -58,6 +58,7 @@ import (
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tactics"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/transforms"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/values"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server/psinet"
 	lrucache "github.com/cognusion/go-cache-lru"
 	"github.com/miekg/dns"
 	"golang.org/x/net/proxy"
@@ -396,6 +397,19 @@ func TestHotReload(t *testing.T) {
 		})
 }
 
+func TestHotReloadWithTactics(t *testing.T) {
+	runServer(t,
+		&runServerConfig{
+			tunnelProtocol:       "UNFRONTED-MEEK-OSSH",
+			enableSSHAPIRequests: true,
+			doHotReload:          true,
+			requireAuthorization: true,
+			doTunneledWebRequest: true,
+			doTunneledNTPRequest: true,
+			doLogHostProvider:    true,
+		})
+}
+
 func TestDefaultSponsorID(t *testing.T) {
 	runServer(t,
 		&runServerConfig{
@@ -743,6 +757,11 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 
 	// customize server config
 
+	discoveryServers, err := newDiscoveryServers([]string{"1.1.1.1", "2.2.2.2"})
+	if err != nil {
+		t.Fatalf("newDiscoveryServers failed: %s\n", err)
+	}
+
 	// Initialize prune server entry test cases and associated data to pave into psinet.
 	pruneServerEntryTestCases, psinetValidServerEntryTags, expectedNumPruneNotices :=
 		initializePruneServerEntriesTest(t, runConfig)
@@ -750,7 +769,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 	// Pave psinet with random values to test handshake homepages.
 	psinetFilename := filepath.Join(testDataDirName, "psinet.json")
 	sponsorID, expectedHomepageURL := pavePsinetDatabaseFile(
-		t, psinetFilename, "", runConfig.doDefaultSponsorID, true, psinetValidServerEntryTags)
+		t, psinetFilename, "", runConfig.doDefaultSponsorID, true, psinetValidServerEntryTags, discoveryServers)
 
 	// Pave OSL config for SLOK testing
 	oslConfigFilename := filepath.Join(testDataDirName, "osl_config.json")
@@ -771,15 +790,17 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 		livenessTestSize)
 
 	var tacticsConfigFilename string
+	var tacticsTunnelProtocol string
 
 	// Only pave the tactics config when tactics are required. This exercises the
 	// case where the tactics config is omitted.
 	if doServerTactics {
 		tacticsConfigFilename = filepath.Join(testDataDirName, "tactics_config.json")
 
-		tacticsTunnelProtocol := runConfig.tunnelProtocol
 		if runConfig.clientTunnelProtocol != "" {
 			tacticsTunnelProtocol = runConfig.clientTunnelProtocol
+		} else {
+			tacticsTunnelProtocol = runConfig.tunnelProtocol
 		}
 
 		paveTacticsConfigFile(
@@ -795,6 +816,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 			runConfig.doDestinationBytes,
 			runConfig.applyPrefix,
 			runConfig.forceFragmenting,
+			"classic",
 		)
 	}
 
@@ -864,6 +886,11 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 	uniqueUserLog := make(chan map[string]interface{}, 1)
 	domainBytesLog := make(chan map[string]interface{}, 1)
 	serverTunnelLog := make(chan map[string]interface{}, 1)
+	// Max 3 discovery logs:
+	// 1. server startup
+	// 2. hot reload of psinet db (runConfig.doHotReload)
+	// 3. hot reload of server tactics (runConfig.doHotReload && doServerTactics)
+	discoveryLog := make(chan map[string]interface{}, 3)
 
 	setLogCallback(func(log []byte) {
 
@@ -875,6 +902,12 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 		}
 
 		if logFields["event_name"] == nil {
+			if logFields["discovery_strategy"] != nil {
+				select {
+				case discoveryLog <- logFields:
+				default:
+				}
+			}
 			return
 		}
 
@@ -969,9 +1002,16 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 
 	if runConfig.doHotReload {
 
+		// Change discovery servers. Tests that discovery switches over to
+		// these new servers.
+		discoveryServers, err = newDiscoveryServers([]string{"3.3.3.3"})
+		if err != nil {
+			t.Fatalf("newDiscoveryServers failed: %s\n", err)
+		}
+
 		// Pave new config files with different random values.
 		sponsorID, expectedHomepageURL = pavePsinetDatabaseFile(
-			t, psinetFilename, "", runConfig.doDefaultSponsorID, true, psinetValidServerEntryTags)
+			t, psinetFilename, "", runConfig.doDefaultSponsorID, true, psinetValidServerEntryTags, discoveryServers)
 
 		propagationChannelID = paveOSLConfigFile(t, oslConfigFilename)
 
@@ -985,6 +1025,26 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 			runConfig.denyTrafficRules,
 			livenessTestSize)
 
+		if doServerTactics {
+			// Pave new tactics file with different discovery strategy. Tests
+			// that discovery switches over to the new strategy.
+			paveTacticsConfigFile(
+				t,
+				tacticsConfigFilename,
+				tacticsRequestPublicKey,
+				tacticsRequestPrivateKey,
+				tacticsRequestObfuscatedKey,
+				tacticsTunnelProtocol,
+				propagationChannelID,
+				livenessTestSize,
+				runConfig.doBurstMonitor,
+				runConfig.doDestinationBytes,
+				runConfig.applyPrefix,
+				runConfig.forceFragmenting,
+				"consistent",
+			)
+		}
+
 		p, _ := os.FindProcess(os.Getpid())
 		p.Signal(syscall.SIGUSR1)
 
@@ -1356,12 +1416,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 		// random homepage URLs will change, but this has no effect on the
 		// already connected client.
 		_, _ = pavePsinetDatabaseFile(
-			t, psinetFilename, sponsorID, runConfig.doDefaultSponsorID, false, psinetValidServerEntryTags)
-
-		tacticsTunnelProtocol := runConfig.tunnelProtocol
-		if runConfig.clientTunnelProtocol != "" {
-			tacticsTunnelProtocol = runConfig.clientTunnelProtocol
-		}
+			t, psinetFilename, sponsorID, runConfig.doDefaultSponsorID, false, psinetValidServerEntryTags, discoveryServers)
 
 		// Pave tactics without destination bytes.
 		paveTacticsConfigFile(
@@ -1375,7 +1430,8 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 			livenessTestSize,
 			runConfig.doBurstMonitor,
 			false,
-			false, false)
+			false, false,
+			"consistent")
 
 		p, _ := os.FindProcess(os.Getpid())
 		p.Signal(syscall.SIGUSR1)
@@ -1584,6 +1640,41 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 		}
 	}
 
+	// Check logs emitted by discovery.
+
+	var expectedDiscoveryStrategy []string
+
+	// Discovery emits 1 log on startup.
+	if doServerTactics {
+		expectedDiscoveryStrategy = append(expectedDiscoveryStrategy, "classic")
+	} else {
+		expectedDiscoveryStrategy = append(expectedDiscoveryStrategy, "consistent")
+	}
+	if runConfig.doHotReload {
+		if doServerTactics {
+			// Discovery emits 1 log when tactics are reloaded, which happens
+			// before the psinet database is reloaded.
+			expectedDiscoveryStrategy = append(expectedDiscoveryStrategy, "classic")
+		}
+		// Discovery emits 1 when the psinet database is reloaded.
+		expectedDiscoveryStrategy = append(expectedDiscoveryStrategy, "consistent")
+	}
+
+	for _, expectedStrategy := range expectedDiscoveryStrategy {
+		select {
+		case logFields := <-discoveryLog:
+			if strategy, ok := logFields["discovery_strategy"].(string); ok {
+				if strategy != expectedStrategy {
+					t.Fatalf("expected discovery strategy \"%s\"", expectedStrategy)
+				}
+			} else {
+				t.Fatalf("missing discovery_strategy field")
+			}
+		default:
+			t.Fatalf("missing discovery log")
+		}
+	}
+
 	// Check that datastore had retained/pruned server entries as expected.
 	checkPruneServerEntriesTest(t, runConfig, testDataDirName, pruneServerEntryTestCases)
 
@@ -1670,6 +1761,49 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 			t.Fatalf("unexpected cached steering IP: %v", entry)
 		}
 	}
+
+	// Check that the client discovered one of the discovery servers.
+
+	discoveredServers := make(map[string]*protocol.ServerEntry)
+
+	// Otherwise NewServerEntryIterator only returns TargetServerEntry.
+	clientConfig.TargetServerEntry = ""
+
+	_, iterator, err := psiphon.NewServerEntryIterator(clientConfig)
+	if err != nil {
+		t.Fatalf("NewServerEntryIterator failed: %s", err)
+	}
+	defer iterator.Close()
+
+	for {
+		serverEntry, err := iterator.Next()
+		if err != nil {
+			t.Fatalf("ServerIterator.Next failed: %s", err)
+		}
+		if serverEntry == nil {
+			break
+		}
+		discoveredServers[serverEntry.IpAddress] = serverEntry
+	}
+
+	foundOne := false
+	for _, server := range discoveryServers {
+
+		serverEntry, err := protocol.DecodeServerEntry(server.EncodedServerEntry, "", "")
+		if err != nil {
+			t.Fatalf("protocol.DecodeServerEntry failed: %s", err)
+		}
+
+		if v, ok := discoveredServers[serverEntry.IpAddress]; ok {
+			if v.Tag == serverEntry.Tag {
+				foundOne = true
+				break
+			}
+		}
+	}
+	if !foundOne {
+		t.Fatalf("expected client to discover at least one server")
+	}
 }
 
 func sendNotificationReceived(c chan<- struct{}) {
@@ -2549,7 +2683,8 @@ func pavePsinetDatabaseFile(
 	sponsorID string,
 	useDefaultSponsorID bool,
 	doDomainBytes bool,
-	validServerEntryTags []string) (string, string) {
+	validServerEntryTags []string,
+	discoveryServers []*psinet.DiscoveryServer) (string, string) {
 
 	if sponsorID == "" {
 		sponsorID = prng.HexString(8)
@@ -2564,6 +2699,11 @@ func pavePsinetDatabaseFile(
 	fakePath := prng.HexString(4)
 	expectedHomepageURL := fmt.Sprintf("https://%s.com/%s", fakeDomain, fakePath)
 
+	discoverServersJSON, err := json.Marshal(discoveryServers)
+	if err != nil {
+		t.Fatalf("json.Marshal failed: %s\n", err)
+	}
+
 	psinetJSONFormat := `
     {
         "default_sponsor_id" : "%s",
@@ -2585,7 +2725,8 @@ func pavePsinetDatabaseFile(
         },
         "valid_server_entry_tags" : {
             %s
-        }
+        },
+        "discovery_servers" : %s
     }
 	`
 
@@ -2619,9 +2760,10 @@ func pavePsinetDatabaseFile(
 		expectedHomepageURL,
 		protocol.PSIPHON_API_ALERT_DISALLOWED_TRAFFIC,
 		actionURLsJSON,
-		validServerEntryTagsJSON)
+		validServerEntryTagsJSON,
+		discoverServersJSON)
 
-	err := ioutil.WriteFile(psinetFilename, []byte(psinetJSON), 0600)
+	err = ioutil.WriteFile(psinetFilename, []byte(psinetJSON), 0600)
 	if err != nil {
 		t.Fatalf("error paving psinet database file: %s", err)
 	}
@@ -2827,7 +2969,8 @@ func paveTacticsConfigFile(
 	doBurstMonitor bool,
 	doDestinationBytes bool,
 	applyOsshPrefix bool,
-	enableOsshPrefixFragmenting bool) {
+	enableOsshPrefixFragmenting bool,
+	discoveryStategy string) {
 
 	// Setting LimitTunnelProtocols passively exercises the
 	// server-side LimitTunnelProtocols enforcement.
@@ -2877,7 +3020,8 @@ func paveTacticsConfigFile(
           "BPFClientTCPProbability" : 1.0,
           "ServerPacketManipulationSpecs" : [{"Name": "test-packetman-spec", "PacketSpecs": [["TCP-flags S"]]}],
           "ServerPacketManipulationProbability" : 1.0,
-          "ServerProtocolPacketManipulations": {"All" : ["test-packetman-spec"]}
+          "ServerProtocolPacketManipulations": {"All" : ["test-packetman-spec"]},
+		  "ServerDiscoveryStrategy": "%s"
         }
       },
       "FilteredTactics" : [
@@ -2953,6 +3097,7 @@ func paveTacticsConfigFile(
 		tunnelProtocol,
 		tunnelProtocol,
 		livenessTestSize, livenessTestSize, livenessTestSize, livenessTestSize,
+		discoveryStategy,
 		propagationChannelID,
 		strings.ReplaceAll(testCustomHostNameRegex, `\`, `\\`),
 		tunnelProtocol)
@@ -3481,3 +3626,30 @@ func (f *flows) Write(p []byte) (n int, err error) {
 
 	return n, err
 }
+
+// newDiscoveryServers returns len(ipAddresses) discovery servers with the
+// given IP addresses and randomly generated tags.
+func newDiscoveryServers(ipAddresses []string) ([]*psinet.DiscoveryServer, error) {
+
+	servers := make([]*psinet.DiscoveryServer, len(ipAddresses))
+
+	for i, ipAddress := range ipAddresses {
+
+		encodedServer, err := protocol.EncodeServerEntry(&protocol.ServerEntry{
+			IpAddress: ipAddress,
+			Tag:       prng.HexString(16),
+		})
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+
+		servers[i] = &psinet.DiscoveryServer{
+			DiscoveryDateRange: []time.Time{
+				time.Now().Add(-time.Hour).UTC(),
+				time.Now().Add(time.Hour).UTC(),
+			},
+			EncodedServerEntry: encodedServer,
+		}
+	}
+	return servers, nil
+}

+ 38 - 0
psiphon/server/services.go

@@ -126,6 +126,8 @@ func RunServices(configJSON []byte) (retErr error) {
 		support.PacketManipulator = packetManipulator
 	}
 
+	support.discovery = makeDiscovery(support)
+
 	// After this point, errors should be delivered to the errors channel and
 	// orderly shutdown should flow through to the end of the function to ensure
 	// all workers are synchronously stopped.
@@ -157,6 +159,21 @@ func RunServices(configJSON []byte) (retErr error) {
 		}
 	}
 
+	err = support.discovery.Start()
+	if err != nil {
+		select {
+		case errorChannel <- err:
+		default:
+		}
+	} else {
+		waitGroup.Add(1)
+		go func() {
+			defer waitGroup.Done()
+			<-shutdownBroadcast
+			support.discovery.Stop()
+		}()
+	}
+
 	if config.RunLoadMonitor() {
 		waitGroup.Add(1)
 		go func() {
@@ -479,6 +496,8 @@ func logIrregularTunnel(
 // components, which allows these data components to be refreshed
 // without restarting the server process.
 type SupportServices struct {
+	// TODO: make all fields non-exported, none are accessed outside
+	// of this package.
 	Config                       *Config
 	TrafficRulesSet              *TrafficRulesSet
 	OSLConfig                    *osl.Config
@@ -492,6 +511,8 @@ type SupportServices struct {
 	PacketManipulator            *packetman.Manipulator
 	ReplayCache                  *ReplayCache
 	ServerTacticsParametersCache *ServerTacticsParametersCache
+
+	discovery *Discovery
 }
 
 // NewSupportServices initializes a new SupportServices.
@@ -569,6 +590,16 @@ func (support *SupportServices) Reload() {
 			support.Blocklist},
 		support.GeoIPService.Reloaders()...)
 
+	reloadDiscovery := func(reloadedTactics bool) {
+		err := support.discovery.reload(reloadedTactics)
+		if err != nil {
+			log.WithTraceFields(
+				LogFields{"error": errors.Trace(err)}).Warning(
+				"failed to reload discovery")
+			return
+		}
+	}
+
 	// Note: established clients aren't notified when tactics change after a
 	// reload; new tactics will be obtained on the next client handshake or
 	// tactics request.
@@ -587,15 +618,22 @@ func (support *SupportServices) Reload() {
 					"failed to reload packet manipulation specs")
 			}
 		}
+
+		reloadDiscovery(true)
 	}
 
 	// Take these actions only after the corresponding Reloader has reloaded.
 	// In both the traffic rules and OSL cases, there is some impact from state
 	// reset, so the reset should be avoided where possible.
+	//
+	// Note: if both tactics and psinet are reloaded at the same time and
+	// the discovery strategy tactic has changed, then discovery will be reloaded
+	// twice.
 	reloadPostActions := map[common.Reloader]func(){
 		support.TrafficRulesSet: func() { support.TunnelServer.ResetAllClientTrafficRules() },
 		support.OSLConfig:       func() { support.TunnelServer.ResetAllClientOSLConfigs() },
 		support.TacticsServer:   reloadTactics,
+		support.PsinetDatabase:  func() { reloadDiscovery(false) },
 	}
 
 	for _, reloader := range reloaders {

+ 9 - 3
psiphon/server/tunnelServer.go

@@ -2630,7 +2630,7 @@ func (sshClient *sshClient) handleTCPPortForwards(
 	//
 	//    The manager enforces the concurrent TCP dial limit: when at the limit, the
 	//    manager blocks waiting for the number of dials to drop below the limit before
-	//    dispatching the request to handleTCPPortForward(), which will run in its own
+	//    dispatching the request to handleTCPChannel(), which will run in its own
 	//    goroutine and will dial and relay the port forward.
 	//
 	//    The block delays the current request and also halts dequeuing of subsequent
@@ -2643,7 +2643,7 @@ func (sshClient *sshClient) handleTCPPortForwards(
 	//    the dial timeout. If the dial timeout has expired before the dial begins, the
 	//    port forward is rejected and a stat is recorded.
 	//
-	// 3. handleTCPPortForward() performs the port forward dial and relaying.
+	// 3. handleTCPChannel() performs the port forward dial and relaying.
 	//
 	//     a. Dial the target, using the dial timeout remaining after queue and blocking
 	//        time is deducted.
@@ -3770,7 +3770,13 @@ func (sshClient *sshClient) newClientSeedPortForward(IPAddress net.IP) *osl.Clie
 		return nil
 	}
 
-	return sshClient.oslClientSeedState.NewClientSeedPortForward(IPAddress)
+	lookupASN := func(IP net.IP) string {
+		// TODO: there are potentially multiple identical geo IP lookups per new
+		// port forward and flow, cache and use result of first lookup.
+		return sshClient.sshServer.support.GeoIPService.LookupISPForIP(IP).ASN
+	}
+
+	return sshClient.oslClientSeedState.NewClientSeedPortForward(IPAddress, lookupASN)
 }
 
 // getOSLSeedPayload returns a payload containing all seeded SLOKs for

+ 24 - 0
vendor/github.com/Psiphon-Labs/consistent/.gitignore

@@ -0,0 +1,24 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Vim creates this
+*.swp
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+# GoLand creates this
+.idea/
+
+# OSX creates this
+.DS_Store
+

+ 1 - 0
vendor/github.com/Psiphon-Labs/consistent/.travis.yml

@@ -0,0 +1 @@
+language: go

+ 21 - 0
vendor/github.com/Psiphon-Labs/consistent/LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2018-2021 Burak Sezer
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 255 - 0
vendor/github.com/Psiphon-Labs/consistent/README.md

@@ -0,0 +1,255 @@
+consistent
+==========
+[![Go Reference](https://pkg.go.dev/badge/github.com/buraksezer/consistent.svg)](https://pkg.go.dev/github.com/buraksezer/consistent) ![Build Status](https://github.com/buraksezer/consistent/actions/workflows/tests.yml/badge.svg?branch=master) ![Linter](https://github.com/buraksezer/consistent/actions/workflows/lint.yml/badge.svg?branch=master) [![Coverage](http://gocover.io/_badge/github.com/buraksezer/consistent)](http://gocover.io/github.com/buraksezer/consistent) [![Go Report Card](https://goreportcard.com/badge/github.com/buraksezer/consistent)](https://goreportcard.com/report/github.com/buraksezer/consistent) [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) [![Mentioned in Awesome Go](https://awesome.re/mentioned-badge.svg)](https://github.com/avelino/awesome-go)  
+
+
+This library provides a consistent hashing function which simultaneously achieves both uniformity and consistency. 
+
+For detailed information about the concept, you should take a look at the following resources:
+
+* [Consistent Hashing with Bounded Loads on Google Research Blog](https://research.googleblog.com/2017/04/consistent-hashing-with-bounded-loads.html)
+* [Improving load balancing with a new consistent-hashing algorithm on Vimeo Engineering Blog](https://medium.com/vimeo-engineering-blog/improving-load-balancing-with-a-new-consistent-hashing-algorithm-9f1bd75709ed)
+* [Consistent Hashing with Bounded Loads paper on arXiv](https://arxiv.org/abs/1608.01350)
+
+Table of Content
+----------------
+
+- [Overview](#overview)
+- [Notable Users](#notable-users)
+- [Install](#install)
+- [Configuration](#configuration)
+- [Usage](#usage)
+- [Benchmarks](#benchmarks)
+- [Examples](#examples)
+
+Overview
+--------
+
+In this package's context, the keys are distributed among partitions and partitions are distributed among members as well. 
+
+When you create a new consistent instance or call `Add/Remove`:
+
+* The member's name is hashed and inserted into the hash ring,
+* Average load is calculated by the algorithm defined in the paper,
+* Partitions are distributed among members by hashing partition IDs and none of them exceed the average load.
+
+Average load cannot be exceeded. So if all members are loaded at the maximum while trying to add a new member, it panics.
+
+When you want to locate a key by calling `LocateKey`:
+
+* The key(byte slice) is hashed,
+* The result of the hash is mod by the number of partitions,
+* The result of this modulo - `MOD(hash result, partition count)` - is the partition in which the key will be located,
+* Owner of the partition is already determined before calling `LocateKey`. So it returns the partition owner immediately.
+
+No memory is allocated by `consistent` except hashing when you want to locate a key.
+
+Note that the number of partitions cannot be changed after creation. 
+
+Notable Users
+-------------
+
+[buraksezer/consistent](https://github.com/buraksezer/consistent) is used at production by the following projects:
+
+* [buraksezer/olric](https://github.com/buraksezer/olric): Embeddable, distributed data structures in Go.
+* [open-telemetry/opentelemetry-operator](https://github.com/open-telemetry/opentelemetry-operator): Kubernetes Operator for OpenTelemetry Collector.
+* [giantswarm/starboard-exporter](https://github.com/giantswarm/starboard-exporter): Exposes Prometheus metrics from [Starboard](https://github.com/aquasecurity/starboard)'s `VulnerabilityReport`, `ConfigAuditReport`, and other custom resources (CRs).
+* [megaease/easegress](https://github.com/megaease/easegress): A Cloud Native traffic orchestration system.
+* [chrislusf/seaweedfs](https://github.com/chrislusf/seaweedfs): SeaweedFS is a distributed storage system for blobs, objects, files, and data warehouse, to store and serve billions of files fast!.
+* [erda-project/erda](https://github.com/erda-project/erda): An enterprise-grade Cloud-Native application platform for Kubernetes.
+* [celo-org/celo-blockchain](https://github.com/celo-org/celo-blockchain): Global payments infrastructure built for mobile.
+* [koderover/zadig](https://github.com/koderover/zadig): Zadig is a cloud native, distributed, developer-oriented continuous delivery product.
+* [mason-leap-lab/infinicache](https://github.com/mason-leap-lab/infinicache): InfiniCache: A cost-effective memory cache that is built atop ephemeral serverless functions.
+* [opencord/voltha-lib-go](https://github.com/opencord/voltha-lib-go): Voltha common library code.
+* [kubeedge/edgemesh](https://github.com/kubeedge/edgemesh): Simplified network and services for edge applications.
+* [authorizer-tech/access-controller](https://github.com/authorizer-tech/access-controller) An implementation of a distributed access-control server that is based on Google Zanzibar - "Google's Consistent, Global Authorization System.
+* [Conflux-Chain/confura](https://github.com/Conflux-Chain/confura) Implementation of an Ethereum Infura equivalent public RPC service on Conflux Network.
+
+Install
+-------
+
+With a correctly configured Go environment:
+
+```
+go get github.com/buraksezer/consistent
+```
+
+You will find some useful usage samples in [examples](https://github.com/buraksezer/consistent/tree/master/_examples) folder.
+
+Configuration
+-------------
+
+```go
+type Config struct {
+	// Hasher is responsible for generating unsigned, 64 bit hash of provided byte slice.
+	Hasher Hasher
+
+	// Keys are distributed among partitions. Prime numbers are good to
+	// distribute keys uniformly. Select a big PartitionCount if you have
+	// too many keys.
+	PartitionCount int
+
+	// Members are replicated on consistent hash ring. This number controls
+	// the number each member is replicated on the ring.
+	ReplicationFactor int
+
+	// Load is used to calculate average load. See the code, the paper and Google's 
+	// blog post to learn about it.
+	Load float64
+}
+```
+
+Any hash algorithm can be used as hasher which implements Hasher interface. Please take a look at the *Sample* section for an example.
+
+Usage
+-----
+
+`LocateKey` function finds a member in the cluster for your key:
+```go
+// With a properly configured and initialized consistent instance
+key := []byte("my-key")
+member := c.LocateKey(key)
+```
+It returns a thread-safe copy of the member you added before.
+
+The second most frequently used function is `GetClosestN`. 
+
+```go
+// With a properly configured and initialized consistent instance
+
+key := []byte("my-key")
+members, err := c.GetClosestN(key, 2)
+```
+
+This may be useful to find backup nodes to store your key.
+
+Benchmarks
+----------
+On an early 2015 Macbook:
+
+```
+BenchmarkAddRemove-4     	  100000	     22006 ns/op
+BenchmarkLocateKey-4     	 5000000	       252 ns/op
+BenchmarkGetClosestN-4   	  500000	      2974 ns/op
+```
+
+Examples
+--------
+
+The most basic use of consistent package should be like this. For detailed list of functions, [visit godoc.org.](https://godoc.org/github.com/buraksezer/consistent)
+More sample code can be found under [_examples](https://github.com/buraksezer/consistent/tree/master/_examples).
+
+```go
+package main
+
+import (
+	"fmt"
+
+	"github.com/buraksezer/consistent"
+	"github.com/cespare/xxhash"
+)
+
+// In your code, you probably have a custom data type 
+// for your cluster members. Just add a String function to implement 
+// consistent.Member interface.
+type myMember string
+
+func (m myMember) String() string {
+	return string(m)
+}
+
+// consistent package doesn't provide a default hashing function. 
+// You should provide a proper one to distribute keys/members uniformly.
+type hasher struct{}
+
+func (h hasher) Sum64(data []byte) uint64 {
+	// you should use a proper hash function for uniformity.
+	return xxhash.Sum64(data)
+}
+
+func main() {
+	// Create a new consistent instance
+	cfg := consistent.Config{
+		PartitionCount:    7,
+		ReplicationFactor: 20,
+		Load:              1.25,
+		Hasher:            hasher{},
+	}
+	c := consistent.New(nil, cfg)
+
+	// Add some members to the consistent hash table.
+	// Add function calculates average load and distributes partitions over members
+	node1 := myMember("node1.olric.com")
+	c.Add(node1)
+
+	node2 := myMember("node2.olric.com")
+	c.Add(node2)
+
+	key := []byte("my-key")
+	// calculates partition id for the given key
+	// partID := hash(key) % partitionCount
+	// the partitions are already distributed among members by Add function.
+	owner := c.LocateKey(key)
+	fmt.Println(owner.String())
+	// Prints node2.olric.com
+}
+```
+
+Another useful example is `_examples/relocation_percentage.go`. It creates a `consistent` object with 8 members and distributes partitions among them. Then adds 9th member, 
+here is the result with a proper configuration and hash function:
+
+```
+bloom:consistent burak$ go run _examples/relocation_percentage.go
+partID: 218 moved to node2.olric from node0.olric
+partID: 173 moved to node9.olric from node3.olric
+partID: 225 moved to node7.olric from node0.olric
+partID:  85 moved to node9.olric from node7.olric
+partID: 220 moved to node5.olric from node0.olric
+partID:  33 moved to node9.olric from node5.olric
+partID: 254 moved to node9.olric from node4.olric
+partID:  71 moved to node9.olric from node3.olric
+partID: 236 moved to node9.olric from node2.olric
+partID: 118 moved to node9.olric from node3.olric
+partID: 233 moved to node3.olric from node0.olric
+partID:  50 moved to node9.olric from node4.olric
+partID: 252 moved to node9.olric from node2.olric
+partID: 121 moved to node9.olric from node2.olric
+partID: 259 moved to node9.olric from node4.olric
+partID:  92 moved to node9.olric from node7.olric
+partID: 152 moved to node9.olric from node3.olric
+partID: 105 moved to node9.olric from node2.olric
+
+6% of the partitions are relocated
+```
+
+Moved partition count is highly dependent on your configuration and quailty of hash function. You should modify the configuration to find an optimum set of configurations
+for your system.
+
+`_examples/load_distribution.go` is also useful to understand load distribution. It creates a `consistent` object with 8 members and locates 1M key. It also calculates average 
+load which cannot be exceeded by any member. Here is the result:
+
+```
+Maximum key count for a member should be around this:  147602
+member: node2.olric, key count: 100362
+member: node5.olric, key count: 99448
+member: node0.olric, key count: 147735
+member: node3.olric, key count: 103455
+member: node6.olric, key count: 147069
+member: node1.olric, key count: 121566
+member: node4.olric, key count: 147932
+member: node7.olric, key count: 132433
+```
+
+Average load can be calculated by using the following formula:
+
+```
+load := (consistent.AverageLoad() * float64(keyCount)) / float64(config.PartitionCount)
+```
+
+Contributions
+-------------
+Please don't hesitate to fork the project and send a pull request or just e-mail me to ask questions and share ideas.
+
+License
+-------
+MIT License, - see LICENSE for more details.

+ 397 - 0
vendor/github.com/Psiphon-Labs/consistent/consistent.go

@@ -0,0 +1,397 @@
+// Copyright (c) 2018-2022 Burak Sezer
+// All rights reserved.
+//
+// This code is licensed under the MIT License.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files(the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions :
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package consistent provides a consistent hashing function with bounded loads. This implementation also adds
+// partitioning logic on top of the original algorithm. For more information about the underlying algorithm,
+// please take a look at https://research.googleblog.com/2017/04/consistent-hashing-with-bounded-loads.html
+//
+// Example Use:
+//
+//	cfg := consistent.Config{
+//		PartitionCount:    71,
+//		ReplicationFactor: 20,
+//		Load:              1.25,
+//		Hasher:            hasher{},
+//	}
+//
+// Now you can create a new Consistent instance. This function can take a list of the members.
+//
+//	c := consistent.New(members, cfg)
+//
+// In the following sample, you add a new Member to the consistent hash ring. myMember is just a Go struct that
+// implements the Member interface. You should know that modifying the consistent hash ring distributes partitions among
+// members using the algorithm defined on Google Research Blog.
+//
+//	c.Add(myMember)
+//
+// Remove a member from the consistent hash ring:
+//
+//	c.Remove(member-name)
+//
+// LocateKey hashes the key and calculates partition ID with this modulo operation: MOD(hash result, partition count)
+// The owner of the partition is already calculated by New/Add/Remove. LocateKey just returns the member that is responsible
+// for the key.
+//
+//	key := []byte("my-key")
+//	member := c.LocateKey(key)
+package consistent
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"sort"
+	"sync"
+)
+
+const (
+	DefaultPartitionCount    int     = 271
+	DefaultReplicationFactor int     = 20
+	DefaultLoad              float64 = 1.25
+)
+
+// ErrInsufficientMemberCount represents an error which means there are not enough members to complete the task.
+var ErrInsufficientMemberCount = errors.New("insufficient member count")
+
+// Hasher is responsible for generating unsigned, 64-bit hash of provided byte slice.
+// Hasher should minimize collisions (generating same hash for different byte slice)
+// and while performance is also important fast functions are preferable (i.e.
+// you can use FarmHash family).
+type Hasher interface {
+	Sum64([]byte) uint64
+}
+
+// Member interface represents a member in consistent hash ring.
+type Member interface {
+	String() string
+}
+
+// Config represents a structure to control consistent package.
+type Config struct {
+	// Hasher is responsible for generating unsigned, 64-bit hash of provided byte slice.
+	Hasher Hasher
+
+	// Keys are distributed among partitions. Prime numbers are good to
+	// distribute keys uniformly. Select a big PartitionCount if you have
+	// too many keys.
+	PartitionCount int
+
+	// Members are replicated on consistent hash ring. This number means that a member
+	// how many times replicated on the ring.
+	ReplicationFactor int
+
+	// Load is used to calculate average load. See the code, the paper and Google's blog post to learn about it.
+	Load float64
+}
+
+// Consistent holds the information about the members of the consistent hash circle.
+type Consistent struct {
+	mu sync.RWMutex
+
+	config         Config
+	hasher         Hasher
+	sortedSet      []uint64
+	partitionCount uint64
+	loads          map[string]float64
+	members        map[string]*Member
+	partitions     map[int]*Member
+	ring           map[uint64]*Member
+}
+
+// New creates and returns a new Consistent object.
+func New(members []Member, config Config) *Consistent {
+	if config.Hasher == nil {
+		panic("Hasher cannot be nil")
+	}
+	if config.PartitionCount == 0 {
+		config.PartitionCount = DefaultPartitionCount
+	}
+	if config.ReplicationFactor == 0 {
+		config.ReplicationFactor = DefaultReplicationFactor
+	}
+	if config.Load == 0 {
+		config.Load = DefaultLoad
+	}
+
+	c := &Consistent{
+		config:         config,
+		members:        make(map[string]*Member),
+		partitionCount: uint64(config.PartitionCount),
+		ring:           make(map[uint64]*Member),
+	}
+
+	c.hasher = config.Hasher
+	for _, member := range members {
+		c.add(member)
+	}
+	if members != nil {
+		c.distributePartitions()
+	}
+	return c
+}
+
+// GetMembers returns a thread-safe copy of members. If there are no members, it returns an empty slice of Member.
+func (c *Consistent) GetMembers() []Member {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+
+	// Create a thread-safe copy of member list.
+	members := make([]Member, 0, len(c.members))
+	for _, member := range c.members {
+		members = append(members, *member)
+	}
+	return members
+}
+
+// AverageLoad exposes the current average load.
+func (c *Consistent) AverageLoad() float64 {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+
+	return c.averageLoad()
+}
+
+func (c *Consistent) averageLoad() float64 {
+	if len(c.members) == 0 {
+		return 0
+	}
+
+	avgLoad := float64(c.partitionCount/uint64(len(c.members))) * c.config.Load
+	return math.Ceil(avgLoad)
+}
+
+func (c *Consistent) distributeWithLoad(partID, idx int, partitions map[int]*Member, loads map[string]float64) {
+	avgLoad := c.averageLoad()
+	var count int
+	for {
+		count++
+		// [Psiphon]
+		// Fix: changed ">=" to ">"; otherwise tests showed that 1 member may
+		// be excluded when there is more than one member and that using a
+		// single member results in a crash.
+		if count > len(c.sortedSet) {
+			// User needs to decrease partition count, increase member count or increase load factor.
+			panic("not enough room to distribute partitions")
+		}
+		i := c.sortedSet[idx]
+		member := *c.ring[i]
+		load := loads[member.String()]
+		if load+1 <= avgLoad {
+			partitions[partID] = &member
+			loads[member.String()]++
+			return
+		}
+		idx++
+		if idx >= len(c.sortedSet) {
+			idx = 0
+		}
+	}
+}
+
+func (c *Consistent) distributePartitions() {
+	loads := make(map[string]float64)
+	partitions := make(map[int]*Member)
+
+	bs := make([]byte, 8)
+	for partID := uint64(0); partID < c.partitionCount; partID++ {
+		binary.LittleEndian.PutUint64(bs, partID)
+		key := c.hasher.Sum64(bs)
+		idx := sort.Search(len(c.sortedSet), func(i int) bool {
+			return c.sortedSet[i] >= key
+		})
+		if idx >= len(c.sortedSet) {
+			idx = 0
+		}
+		c.distributeWithLoad(int(partID), idx, partitions, loads)
+	}
+	c.partitions = partitions
+	c.loads = loads
+}
+
+func (c *Consistent) add(member Member) {
+	for i := 0; i < c.config.ReplicationFactor; i++ {
+		key := []byte(fmt.Sprintf("%s%d", member.String(), i))
+		h := c.hasher.Sum64(key)
+		c.ring[h] = &member
+		c.sortedSet = append(c.sortedSet, h)
+	}
+	// sort hashes ascendingly
+	sort.Slice(c.sortedSet, func(i int, j int) bool {
+		return c.sortedSet[i] < c.sortedSet[j]
+	})
+	// Storing member at this map is useful to find backup members of a partition.
+	c.members[member.String()] = &member
+}
+
+// Add adds a new member to the consistent hash circle.
+func (c *Consistent) Add(member Member) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if _, ok := c.members[member.String()]; ok {
+		// We already have this member. Quit immediately.
+		return
+	}
+	c.add(member)
+	c.distributePartitions()
+}
+
+func (c *Consistent) delSlice(val uint64) {
+	for i := 0; i < len(c.sortedSet); i++ {
+		if c.sortedSet[i] == val {
+			c.sortedSet = append(c.sortedSet[:i], c.sortedSet[i+1:]...)
+			break
+		}
+	}
+}
+
+// Remove removes a member from the consistent hash circle.
+func (c *Consistent) Remove(name string) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if _, ok := c.members[name]; !ok {
+		// There is no member with that name. Quit immediately.
+		return
+	}
+
+	for i := 0; i < c.config.ReplicationFactor; i++ {
+		key := []byte(fmt.Sprintf("%s%d", name, i))
+		h := c.hasher.Sum64(key)
+		delete(c.ring, h)
+		c.delSlice(h)
+	}
+	delete(c.members, name)
+	if len(c.members) == 0 {
+		// consistent hash ring is empty now. Reset the partition table.
+		c.partitions = make(map[int]*Member)
+		return
+	}
+	c.distributePartitions()
+}
+
+// LoadDistribution exposes load distribution of members.
+func (c *Consistent) LoadDistribution() map[string]float64 {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+
+	// Create a thread-safe copy
+	res := make(map[string]float64)
+	for member, load := range c.loads {
+		res[member] = load
+	}
+	return res
+}
+
+// FindPartitionID returns partition id for given key.
+func (c *Consistent) FindPartitionID(key []byte) int {
+	hkey := c.hasher.Sum64(key)
+	return int(hkey % c.partitionCount)
+}
+
+// GetPartitionOwner returns the owner of the given partition.
+func (c *Consistent) GetPartitionOwner(partID int) Member {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+
+	return c.getPartitionOwner(partID)
+}
+
+// getPartitionOwner returns the owner of the given partition. It's not thread-safe.
+func (c *Consistent) getPartitionOwner(partID int) Member {
+	member, ok := c.partitions[partID]
+	if !ok {
+		return nil
+	}
+	// Create a thread-safe copy of member and return it.
+	return *member
+}
+
+// LocateKey finds a home for given key
+func (c *Consistent) LocateKey(key []byte) Member {
+	partID := c.FindPartitionID(key)
+	return c.GetPartitionOwner(partID)
+}
+
+func (c *Consistent) getClosestN(partID, count int) ([]Member, error) {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+
+	var res []Member
+	if count > len(c.members) {
+		return res, ErrInsufficientMemberCount
+	}
+
+	var ownerKey uint64
+	owner := c.getPartitionOwner(partID)
+	// Hash and sort all the names.
+	var keys []uint64
+	kmems := make(map[uint64]*Member)
+	for name, member := range c.members {
+		key := c.hasher.Sum64([]byte(name))
+		if name == owner.String() {
+			ownerKey = key
+		}
+		keys = append(keys, key)
+		kmems[key] = member
+	}
+	sort.Slice(keys, func(i, j int) bool {
+		return keys[i] < keys[j]
+	})
+
+	// Find the key owner
+	idx := 0
+	for idx < len(keys) {
+		if keys[idx] == ownerKey {
+			key := keys[idx]
+			res = append(res, *kmems[key])
+			break
+		}
+		idx++
+	}
+
+	// Find the closest(replica owners) members.
+	for len(res) < count {
+		idx++
+		if idx >= len(keys) {
+			idx = 0
+		}
+		key := keys[idx]
+		res = append(res, *kmems[key])
+	}
+	return res, nil
+}
+
+// GetClosestN returns the closest N member to a key in the hash ring.
+// This may be useful to find members for replication.
+func (c *Consistent) GetClosestN(key []byte, count int) ([]Member, error) {
+	partID := c.FindPartitionID(key)
+	return c.getClosestN(partID, count)
+}
+
+// GetClosestNForPartition returns the closest N member for given partition.
+// This may be useful to find members for replication.
+func (c *Consistent) GetClosestNForPartition(partID, count int) ([]Member, error) {
+	return c.getClosestN(partID, count)
+}

+ 22 - 0
vendor/github.com/cespare/xxhash/LICENSE.txt

@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

+ 50 - 0
vendor/github.com/cespare/xxhash/README.md

@@ -0,0 +1,50 @@
+# xxhash
+
+[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+The API is very small, taking its cue from the other hashing packages in the
+standard library:
+
+    $ go doc github.com/cespare/xxhash                                                                                                                                                                                              !
+    package xxhash // import "github.com/cespare/xxhash"
+
+    Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+    at http://cyan4973.github.io/xxHash/.
+
+    func New() hash.Hash64
+    func Sum64(b []byte) uint64
+    func Sum64String(s string) uint64
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64 against another popular Go XXH64 implementation,
+[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash):
+
+| input size | OneOfOne | cespare (purego) | cespare |
+| --- | --- | --- | --- |
+| 5 B   |  416 MB/s | 720 MB/s |  872 MB/s  |
+| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s  |
+| 4 KB  | 12727 MB/s | 12999 MB/s | 13026 MB/s |
+| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s  |
+
+These numbers were generated with:
+
+```
+$ go test -benchtime 10s -bench '/OneOfOne,'
+$ go test -tags purego -benchtime 10s -bench '/xxhash,'
+$ go test -benchtime 10s -bench '/xxhash,'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)

+ 14 - 0
vendor/github.com/cespare/xxhash/rotate.go

@@ -0,0 +1,14 @@
+// +build !go1.9
+
+package xxhash
+
+// TODO(caleb): After Go 1.10 comes out, remove this fallback code.
+
+func rol1(x uint64) uint64  { return (x << 1) | (x >> (64 - 1)) }
+func rol7(x uint64) uint64  { return (x << 7) | (x >> (64 - 7)) }
+func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
+func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
+func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
+func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
+func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
+func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }

+ 14 - 0
vendor/github.com/cespare/xxhash/rotate19.go

@@ -0,0 +1,14 @@
+// +build go1.9
+
+package xxhash
+
+import "math/bits"
+
+func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }

+ 168 - 0
vendor/github.com/cespare/xxhash/xxhash.go

@@ -0,0 +1,168 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+	"encoding/binary"
+	"hash"
+)
+
+const (
+	prime1 uint64 = 11400714785074694791
+	prime2 uint64 = 14029467366897019727
+	prime3 uint64 = 1609587929392839161
+	prime4 uint64 = 9650029242287828579
+	prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+	prime1v = prime1
+	prime2v = prime2
+	prime3v = prime3
+	prime4v = prime4
+	prime5v = prime5
+)
+
+type xxh struct {
+	v1    uint64
+	v2    uint64
+	v3    uint64
+	v4    uint64
+	total int
+	mem   [32]byte
+	n     int // how much of mem is used
+}
+
+// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm.
+func New() hash.Hash64 {
+	var x xxh
+	x.Reset()
+	return &x
+}
+
+func (x *xxh) Reset() {
+	x.n = 0
+	x.total = 0
+	x.v1 = prime1v + prime2
+	x.v2 = prime2
+	x.v3 = 0
+	x.v4 = -prime1v
+}
+
+func (x *xxh) Size() int      { return 8 }
+func (x *xxh) BlockSize() int { return 32 }
+
+// Write adds more data to x. It always returns len(b), nil.
+func (x *xxh) Write(b []byte) (n int, err error) {
+	n = len(b)
+	x.total += len(b)
+
+	if x.n+len(b) < 32 {
+		// This new data doesn't even fill the current block.
+		copy(x.mem[x.n:], b)
+		x.n += len(b)
+		return
+	}
+
+	if x.n > 0 {
+		// Finish off the partial block.
+		copy(x.mem[x.n:], b)
+		x.v1 = round(x.v1, u64(x.mem[0:8]))
+		x.v2 = round(x.v2, u64(x.mem[8:16]))
+		x.v3 = round(x.v3, u64(x.mem[16:24]))
+		x.v4 = round(x.v4, u64(x.mem[24:32]))
+		b = b[32-x.n:]
+		x.n = 0
+	}
+
+	if len(b) >= 32 {
+		// One or more full blocks left.
+		b = writeBlocks(x, b)
+	}
+
+	// Store any remaining partial block.
+	copy(x.mem[:], b)
+	x.n = len(b)
+
+	return
+}
+
+func (x *xxh) Sum(b []byte) []byte {
+	s := x.Sum64()
+	return append(
+		b,
+		byte(s>>56),
+		byte(s>>48),
+		byte(s>>40),
+		byte(s>>32),
+		byte(s>>24),
+		byte(s>>16),
+		byte(s>>8),
+		byte(s),
+	)
+}
+
+func (x *xxh) Sum64() uint64 {
+	var h uint64
+
+	if x.total >= 32 {
+		v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = x.v3 + prime5
+	}
+
+	h += uint64(x.total)
+
+	i, end := 0, x.n
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(x.mem[i:i+8]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(x.mem[i:i+4])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for i < end {
+		h ^= uint64(x.mem[i]) * prime5
+		h = rol11(h) * prime1
+		i++
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+	acc += input * prime2
+	acc = rol31(acc)
+	acc *= prime1
+	return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+	val = round(0, val)
+	acc ^= val
+	acc = acc*prime1 + prime4
+	return acc
+}

+ 12 - 0
vendor/github.com/cespare/xxhash/xxhash_amd64.go

@@ -0,0 +1,12 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+func writeBlocks(x *xxh, b []byte) []byte

+ 233 - 0
vendor/github.com/cespare/xxhash/xxhash_amd64.s

@@ -0,0 +1,233 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Register allocation:
+// AX	h
+// CX	pointer to advance through b
+// DX	n
+// BX	loop end
+// R8	v1, k1
+// R9	v2
+// R10	v3
+// R11	v4
+// R12	tmp
+// R13	prime1v
+// R14	prime2v
+// R15	prime4v
+
+// round reads from and advances the buffer pointer in CX.
+// It assumes that R13 has prime1v and R14 has prime2v.
+#define round(r) \
+	MOVQ  (CX), R12 \
+	ADDQ  $8, CX    \
+	IMULQ R14, R12  \
+	ADDQ  R12, r    \
+	ROLQ  $31, r    \
+	IMULQ R13, r
+
+// mergeRound applies a merge round on the two registers acc and val.
+// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
+#define mergeRound(acc, val) \
+	IMULQ R14, val \
+	ROLQ  $31, val \
+	IMULQ R13, val \
+	XORQ  val, acc \
+	IMULQ R13, acc \
+	ADDQ  R15, acc
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT, $0-32
+	// Load fixed primes.
+	MOVQ ·prime1v(SB), R13
+	MOVQ ·prime2v(SB), R14
+	MOVQ ·prime4v(SB), R15
+
+	// Load slice.
+	MOVQ b_base+0(FP), CX
+	MOVQ b_len+8(FP), DX
+	LEAQ (CX)(DX*1), BX
+
+	// The first loop limit will be len(b)-32.
+	SUBQ $32, BX
+
+	// Check whether we have at least one block.
+	CMPQ DX, $32
+	JLT  noBlocks
+
+	// Set up initial state (v1, v2, v3, v4).
+	MOVQ R13, R8
+	ADDQ R14, R8
+	MOVQ R14, R9
+	XORQ R10, R10
+	XORQ R11, R11
+	SUBQ R13, R11
+
+	// Loop until CX > BX.
+blockLoop:
+	round(R8)
+	round(R9)
+	round(R10)
+	round(R11)
+
+	CMPQ CX, BX
+	JLE  blockLoop
+
+	MOVQ R8, AX
+	ROLQ $1, AX
+	MOVQ R9, R12
+	ROLQ $7, R12
+	ADDQ R12, AX
+	MOVQ R10, R12
+	ROLQ $12, R12
+	ADDQ R12, AX
+	MOVQ R11, R12
+	ROLQ $18, R12
+	ADDQ R12, AX
+
+	mergeRound(AX, R8)
+	mergeRound(AX, R9)
+	mergeRound(AX, R10)
+	mergeRound(AX, R11)
+
+	JMP afterBlocks
+
+noBlocks:
+	MOVQ ·prime5v(SB), AX
+
+afterBlocks:
+	ADDQ DX, AX
+
+	// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
+	ADDQ $24, BX
+
+	CMPQ CX, BX
+	JG   fourByte
+
+wordLoop:
+	// Calculate k1.
+	MOVQ  (CX), R8
+	ADDQ  $8, CX
+	IMULQ R14, R8
+	ROLQ  $31, R8
+	IMULQ R13, R8
+
+	XORQ  R8, AX
+	ROLQ  $27, AX
+	IMULQ R13, AX
+	ADDQ  R15, AX
+
+	CMPQ CX, BX
+	JLE  wordLoop
+
+fourByte:
+	ADDQ $4, BX
+	CMPQ CX, BX
+	JG   singles
+
+	MOVL  (CX), R8
+	ADDQ  $4, CX
+	IMULQ R13, R8
+	XORQ  R8, AX
+
+	ROLQ  $23, AX
+	IMULQ R14, AX
+	ADDQ  ·prime3v(SB), AX
+
+singles:
+	ADDQ $4, BX
+	CMPQ CX, BX
+	JGE  finalize
+
+singlesLoop:
+	MOVBQZX (CX), R12
+	ADDQ    $1, CX
+	IMULQ   ·prime5v(SB), R12
+	XORQ    R12, AX
+
+	ROLQ  $11, AX
+	IMULQ R13, AX
+
+	CMPQ CX, BX
+	JL   singlesLoop
+
+finalize:
+	MOVQ  AX, R12
+	SHRQ  $33, R12
+	XORQ  R12, AX
+	IMULQ R14, AX
+	MOVQ  AX, R12
+	SHRQ  $29, R12
+	XORQ  R12, AX
+	IMULQ ·prime3v(SB), AX
+	MOVQ  AX, R12
+	SHRQ  $32, R12
+	XORQ  R12, AX
+
+	MOVQ AX, ret+24(FP)
+	RET
+
+// writeBlocks uses the same registers as above except that it uses AX to store
+// the x pointer.
+
+// func writeBlocks(x *xxh, b []byte) []byte
+TEXT ·writeBlocks(SB), NOSPLIT, $0-56
+	// Load fixed primes needed for round.
+	MOVQ ·prime1v(SB), R13
+	MOVQ ·prime2v(SB), R14
+
+	// Load slice.
+	MOVQ b_base+8(FP), CX
+	MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below
+	MOVQ b_len+16(FP), DX
+	LEAQ (CX)(DX*1), BX
+	SUBQ $32, BX
+
+	// Load vN from x.
+	MOVQ x+0(FP), AX
+	MOVQ 0(AX), R8   // v1
+	MOVQ 8(AX), R9   // v2
+	MOVQ 16(AX), R10 // v3
+	MOVQ 24(AX), R11 // v4
+
+	// We don't need to check the loop condition here; this function is
+	// always called with at least one block of data to process.
+blockLoop:
+	round(R8)
+	round(R9)
+	round(R10)
+	round(R11)
+
+	CMPQ CX, BX
+	JLE  blockLoop
+
+	// Copy vN back to x.
+	MOVQ R8, 0(AX)
+	MOVQ R9, 8(AX)
+	MOVQ R10, 16(AX)
+	MOVQ R11, 24(AX)
+
+	// Construct return slice.
+	// NOTE: It's important that we don't construct a slice that has a base
+	// pointer off the end of the original slice, as in Go 1.7+ this will
+	// cause runtime crashes. (See discussion in, for example,
+	// https://github.com/golang/go/issues/16772.)
+	// Therefore, we calculate the length/cap first, and if they're zero, we
+	// keep the old base. This is what the compiler does as well if you
+	// write code like
+	//   b = b[len(b):]
+
+	// New length is 32 - (CX - BX) -> BX+32 - CX.
+	ADDQ $32, BX
+	SUBQ CX, BX
+	JZ   afterSetBase
+
+	MOVQ CX, ret_base+32(FP)
+
+afterSetBase:
+	MOVQ BX, ret_len+40(FP)
+	MOVQ BX, ret_cap+48(FP) // set cap == len
+
+	RET

+ 75 - 0
vendor/github.com/cespare/xxhash/xxhash_other.go

@@ -0,0 +1,75 @@
+// +build !amd64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+	// A simpler version would be
+	//   x := New()
+	//   x.Write(b)
+	//   return x.Sum64()
+	// but this is faster, particularly for small inputs.
+
+	n := len(b)
+	var h uint64
+
+	if n >= 32 {
+		v1 := prime1v + prime2
+		v2 := prime2
+		v3 := uint64(0)
+		v4 := -prime1v
+		for len(b) >= 32 {
+			v1 = round(v1, u64(b[0:8:len(b)]))
+			v2 = round(v2, u64(b[8:16:len(b)]))
+			v3 = round(v3, u64(b[16:24:len(b)]))
+			v4 = round(v4, u64(b[24:32:len(b)]))
+			b = b[32:len(b):len(b)]
+		}
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = prime5
+	}
+
+	h += uint64(n)
+
+	i, end := 0, len(b)
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(b[i:i+8:len(b)]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for ; i < end; i++ {
+		h ^= uint64(b[i]) * prime5
+		h = rol11(h) * prime1
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+func writeBlocks(x *xxh, b []byte) []byte {
+	v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
+	for len(b) >= 32 {
+		v1 = round(v1, u64(b[0:8:len(b)]))
+		v2 = round(v2, u64(b[8:16:len(b)]))
+		v3 = round(v3, u64(b[16:24:len(b)]))
+		v4 = round(v4, u64(b[24:32:len(b)]))
+		b = b[32:len(b):len(b)]
+	}
+	x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4
+	return b
+}

+ 10 - 0
vendor/github.com/cespare/xxhash/xxhash_safe.go

@@ -0,0 +1,10 @@
+// +build appengine
+
+// This file contains the safe implementations of otherwise unsafe-using code.
+
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+	return Sum64([]byte(s))
+}

+ 30 - 0
vendor/github.com/cespare/xxhash/xxhash_unsafe.go

@@ -0,0 +1,30 @@
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// Sum64String computes the 64-bit xxHash digest of s.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+//
+// TODO(caleb): Consider removing this if an optimization is ever added to make
+// it unnecessary: https://golang.org/issue/2205.
+//
+// TODO(caleb): We still have a function call; we could instead write Go/asm
+// copies of Sum64 for strings to squeeze out a bit more speed.
+func Sum64String(s string) uint64 {
+	// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
+	// for some discussion about this unsafe conversion.
+	var b []byte
+	bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+	bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+	bh.Len = len(s)
+	bh.Cap = len(s)
+	return Sum64(b)
+}

+ 6 - 0
vendor/modules.txt

@@ -17,6 +17,9 @@ github.com/Psiphon-Inc/rotate-safe-writer
 # github.com/Psiphon-Labs/bolt v0.0.0-20200624191537-23cedaef7ad7
 ## explicit; go 1.12
 github.com/Psiphon-Labs/bolt
+# github.com/Psiphon-Labs/consistent v0.0.0-20240322131436-20aaa4e05737
+## explicit; go 1.9
+github.com/Psiphon-Labs/consistent
 # github.com/Psiphon-Labs/goptlib v0.0.0-20200406165125-c0e32a7a3464
 ## explicit
 github.com/Psiphon-Labs/goptlib
@@ -51,6 +54,9 @@ github.com/armon/go-proxyproto
 ## explicit
 github.com/bifurcation/mint
 github.com/bifurcation/mint/syntax
+# github.com/cespare/xxhash v1.1.0
+## explicit
+github.com/cespare/xxhash
 # github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9
 ## explicit
 github.com/cheekybits/genny/generic