Przeglądaj źródła

Merge remote-tracking branch 'upstream/master'

Adam Pritchard 9 lat temu
rodzic
commit
e0573950dd
53 zmienionych plików z 4919 dodań i 1051 usunięć
  1. 5 1
      .travis.yml
  2. 1 1
      ConsoleClient/Dockerfile
  3. 6 2
      ConsoleClient/main.go
  4. 1 1
      MobileLibrary/Android/Dockerfile
  5. 9 0
      MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java
  6. 3 2
      MobileLibrary/psi/psi.go
  7. 1 1
      Server/Dockerfile-binary-builder
  8. 2 2
      Server/main.go
  9. 151 0
      psiphon/common/authPackage.go
  10. 98 0
      psiphon/common/authPackage_test.go
  11. 29 1
      psiphon/common/net.go
  12. 4 3
      psiphon/common/net_test.go
  13. 4 6
      psiphon/common/networkInterface.go
  14. 25 27
      psiphon/common/obfuscatedSshConn.go
  15. 27 29
      psiphon/common/obfuscator.go
  16. 161 0
      psiphon/common/obfuscator_test.go
  17. 1301 0
      psiphon/common/osl/osl.go
  18. 557 0
      psiphon/common/osl/osl_test.go
  19. 20 1
      psiphon/common/protocol/protocol.go
  20. 6 8
      psiphon/common/protocol/serverEntry.go
  21. 3 3
      psiphon/common/protocol/serverEntry_test.go
  22. 35 38
      psiphon/common/reloader.go
  23. 12 26
      psiphon/common/reloader_test.go
  24. 152 0
      psiphon/common/subnet.go
  25. 126 0
      psiphon/common/subnet_test.go
  26. 47 6
      psiphon/config.go
  27. 97 56
      psiphon/controller.go
  28. 63 27
      psiphon/controller_test.go
  29. 232 117
      psiphon/dataStore.go
  30. 26 8
      psiphon/meekConn.go
  31. 6 2
      psiphon/migrateDataStore.go
  32. 6 5
      psiphon/migrateDataStore_windows.go
  33. 22 3
      psiphon/net.go
  34. 13 6
      psiphon/notice.go
  35. 0 80
      psiphon/package.go
  36. 295 48
      psiphon/remoteServerList.go
  37. 370 0
      psiphon/remoteServerList_test.go
  38. 72 17
      psiphon/server/api.go
  39. 25 22
      psiphon/server/config.go
  40. 8 10
      psiphon/server/dns.go
  41. 26 5
      psiphon/server/geoip.go
  42. 7 6
      psiphon/server/meek.go
  43. 21 27
      psiphon/server/psinet/psinet.go
  44. 199 40
      psiphon/server/server_test.go
  45. 28 7
      psiphon/server/services.go
  46. 2 8
      psiphon/server/trafficRules.go
  47. 382 135
      psiphon/server/tunnelServer.go
  48. 9 1
      psiphon/server/udp.go
  49. 12 9
      psiphon/server/webServer.go
  50. 120 38
      psiphon/serverApi.go
  51. 4 112
      psiphon/splitTunnel.go
  52. 0 38
      psiphon/splitTunnel_test.go
  53. 88 66
      psiphon/tunnel.go

+ 5 - 1
.travis.yml

@@ -1,6 +1,6 @@
 language: go
 go:
-- 1.6
+- 1.7.3
 addons:
   apt_packages:
     - libx11-dev
@@ -10,10 +10,14 @@ install:
 script:
 - cd psiphon
 - go test -race -v ./common
+- go test -race -v ./common/osl
+- go test -race -v ./common/protocol
 - go test -race -v ./transferstats
 - go test -race -v ./server
 - go test -race -v
 - go test -v -covermode=count -coverprofile=common.coverprofile ./common
+- go test -v -covermode=count -coverprofile=osl.coverprofile ./common/osl
+- go test -v -covermode=count -coverprofile=protocol.coverprofile ./common/protocol
 - go test -v -covermode=count -coverprofile=transferstats.coverprofile ./transferstats
 - go test -v -covermode=count -coverprofile=server.coverprofile ./server
 - go test -v -covermode=count -coverprofile=psiphon.coverprofile

+ 1 - 1
ConsoleClient/Dockerfile

@@ -22,7 +22,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \
   && rm -rf /var/lib/apt/lists/*
 
 # Install Go.
-ENV GOVERSION=go1.7.1 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1
+ENV GOVERSION=go1.7.3 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1
 
 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \
    && tar -C /usr/local -xzf /tmp/go.tar.gz \

+ 6 - 2
ConsoleClient/main.go

@@ -30,6 +30,7 @@ import (
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 func main() {
@@ -67,16 +68,19 @@ func main() {
 	// Handle required config file parameter
 
 	if configFilename == "" {
+		psiphon.SetEmitDiagnosticNotices(true)
 		psiphon.NoticeError("configuration file is required")
 		os.Exit(1)
 	}
 	configFileContents, err := ioutil.ReadFile(configFilename)
 	if err != nil {
+		psiphon.SetEmitDiagnosticNotices(true)
 		psiphon.NoticeError("error loading configuration file: %s", err)
 		os.Exit(1)
 	}
 	config, err := psiphon.LoadConfig(configFileContents)
 	if err != nil {
+		psiphon.SetEmitDiagnosticNotices(true)
 		psiphon.NoticeError("error processing configuration file: %s", err)
 		os.Exit(1)
 	}
@@ -138,10 +142,10 @@ func main() {
 				return
 			}
 			// TODO: stream embedded server list data? also, the cast makes an unnecessary copy of a large buffer?
-			serverEntries, err := psiphon.DecodeAndValidateServerEntryList(
+			serverEntries, err := protocol.DecodeAndValidateServerEntryList(
 				string(serverEntryList),
 				common.GetCurrentTimestamp(),
-				common.SERVER_ENTRY_SOURCE_EMBEDDED)
+				protocol.SERVER_ENTRY_SOURCE_EMBEDDED)
 			if err != nil {
 				psiphon.NoticeError("error decoding embedded server entry list file: %s", err)
 				return

+ 1 - 1
MobileLibrary/Android/Dockerfile

@@ -19,7 +19,7 @@ RUN apt-get update -y && apt-get install -y --no-install-recommends \
   && rm -rf /var/lib/apt/lists/*
 
 # Install Go.
-ENV GOVERSION=go1.7.1 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1
+ENV GOVERSION=go1.7.3 GOROOT=/usr/local/go GOPATH=/go PATH=$PATH:/usr/local/go/bin:/go/bin CGO_ENABLED=1
 
 RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz \
   && tar -C /usr/local -xzf /tmp/go.tar.gz \

+ 9 - 0
MobileLibrary/Android/PsiphonTunnel/PsiphonTunnel.java

@@ -379,6 +379,15 @@ public class PsiphonTunnel extends Psi.PsiphonProvider.Stub {
             json.put("RemoteServerListDownloadFilename", remoteServerListDownload.getAbsolutePath());
         }
 
+        File oslDownloadDir = new File(context.getFilesDir(), "osl");
+        if (!oslDownloadDir.exists()
+                && !oslDownloadDir.mkdirs()) {
+            // Failed to create osl directory
+            // TODO: proceed anyway?
+            throw new IOException("failed to create OSL download directory");
+        }
+        json.put("ObfuscatedServerListDownloadDirectory", oslDownloadDir.getAbsolutePath());
+
         // Note: onConnecting/onConnected logic assumes 1 tunnel connection
         json.put("TunnelPoolSize", 1);
 

+ 3 - 2
MobileLibrary/psi/psi.go

@@ -30,6 +30,7 @@ import (
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 type PsiphonProvider interface {
@@ -82,10 +83,10 @@ func Start(
 		return fmt.Errorf("error initializing datastore: %s", err)
 	}
 
-	serverEntries, err := psiphon.DecodeAndValidateServerEntryList(
+	serverEntries, err := protocol.DecodeAndValidateServerEntryList(
 		embeddedServerEntryList,
 		common.GetCurrentTimestamp(),
-		common.SERVER_ENTRY_SOURCE_EMBEDDED)
+		protocol.SERVER_ENTRY_SOURCE_EMBEDDED)
 	if err != nil {
 		return fmt.Errorf("error decoding embedded server entry list: %s", err)
 	}

+ 1 - 1
Server/Dockerfile-binary-builder

@@ -1,6 +1,6 @@
 FROM alpine:latest
 
-ENV GOLANG_VERSION 1.7.1
+ENV GOLANG_VERSION 1.7.3
 ENV GOLANG_SRC_URL https://golang.org/dl/go$GOLANG_VERSION.src.tar.gz
 
 RUN set -ex \

+ 2 - 2
Server/main.go

@@ -27,7 +27,7 @@ import (
 	"strconv"
 	"strings"
 
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server"
 )
 
@@ -111,7 +111,7 @@ func main() {
 
 		if generateServerNetworkInterface != "" {
 			var err error
-			serverIPaddress, err = psiphon.GetInterfaceIPAddress(generateServerNetworkInterface)
+			serverIPaddress, err = common.GetInterfaceIPAddress(generateServerNetworkInterface)
 			if err != nil {
 				fmt.Printf("generate failed: %s\n", err)
 				os.Exit(1)

+ 151 - 0
psiphon/common/authPackage.go

@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package common
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/sha256"
+	"crypto/x509"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+)
+
+// AuthenticatedDataPackage is a JSON record containing some Psiphon data
+// payload, such as list of Psiphon server entries. As it may be downloaded
+// from various sources, it is digitally signed so that the data may be
+// authenticated.
+type AuthenticatedDataPackage struct {
+	Data                   string `json:"data"`
+	SigningPublicKeyDigest []byte `json:"signingPublicKeyDigest"`
+	Signature              []byte `json:"signature"`
+}
+
+// GenerateAuthenticatedDataPackageKeys generates a key pair
+// be used to sign and verify AuthenticatedDataPackages.
+func GenerateAuthenticatedDataPackageKeys() (string, string, error) {
+
+	rsaKey, err := rsa.GenerateKey(rand.Reader, 4096)
+	if err != nil {
+		return "", "", ContextError(err)
+	}
+
+	publicKeyBytes, err := x509.MarshalPKIXPublicKey(rsaKey.Public())
+	if err != nil {
+		return "", "", ContextError(err)
+	}
+
+	privateKeyBytes := x509.MarshalPKCS1PrivateKey(rsaKey)
+
+	return base64.StdEncoding.EncodeToString(publicKeyBytes),
+		base64.StdEncoding.EncodeToString(privateKeyBytes),
+		nil
+}
+
+func sha256sum(data string) []byte {
+	hash := sha256.New()
+	hash.Write([]byte(data))
+	return hash.Sum(nil)
+}
+
+// WriteAuthenticatedDataPackage creates an AuthenticatedDataPackage
+// containing the specified data and signed by the given key. The output
+// conforms with the legacy format here:
+// https://bitbucket.org/psiphon/psiphon-circumvention-system/src/c25d080f6827b141fe637050ce0d5bd0ae2e9db5/Automation/psi_ops_crypto_tools.py
+func WriteAuthenticatedDataPackage(
+	data string, signingPublicKey, signingPrivateKey string) ([]byte, error) {
+
+	derEncodedPrivateKey, err := base64.StdEncoding.DecodeString(signingPrivateKey)
+	if err != nil {
+		return nil, ContextError(err)
+	}
+	rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(derEncodedPrivateKey)
+	if err != nil {
+		return nil, ContextError(err)
+	}
+
+	signature, err := rsa.SignPKCS1v15(
+		rand.Reader,
+		rsaPrivateKey,
+		crypto.SHA256,
+		sha256sum(data))
+	if err != nil {
+		return nil, ContextError(err)
+	}
+
+	packageJSON, err := json.Marshal(
+		&AuthenticatedDataPackage{
+			Data: data,
+			SigningPublicKeyDigest: sha256sum(signingPublicKey),
+			Signature:              signature,
+		})
+	if err != nil {
+		return nil, ContextError(err)
+	}
+
+	return packageJSON, nil
+}
+
+// ReadAuthenticatedDataPackage extracts and verifies authenticated
+// data from an AuthenticatedDataPackage. The package must have been
+// signed with the given key.
+func ReadAuthenticatedDataPackage(
+	packageJSON []byte, signingPublicKey string) (string, error) {
+
+	var authenticatedDataPackage *AuthenticatedDataPackage
+	err := json.Unmarshal(packageJSON, &authenticatedDataPackage)
+	if err != nil {
+		return "", ContextError(err)
+	}
+
+	derEncodedPublicKey, err := base64.StdEncoding.DecodeString(signingPublicKey)
+	if err != nil {
+		return "", ContextError(err)
+	}
+	publicKey, err := x509.ParsePKIXPublicKey(derEncodedPublicKey)
+	if err != nil {
+		return "", ContextError(err)
+	}
+	rsaPublicKey, ok := publicKey.(*rsa.PublicKey)
+	if !ok {
+		return "", ContextError(errors.New("unexpected signing public key type"))
+	}
+
+	if 0 != bytes.Compare(
+		authenticatedDataPackage.SigningPublicKeyDigest,
+		sha256sum(signingPublicKey)) {
+
+		return "", ContextError(errors.New("unexpected signing public key digest"))
+	}
+
+	err = rsa.VerifyPKCS1v15(
+		rsaPublicKey,
+		crypto.SHA256,
+		sha256sum(authenticatedDataPackage.Data),
+		authenticatedDataPackage.Signature)
+	if err != nil {
+		return "", ContextError(err)
+	}
+
+	return authenticatedDataPackage.Data, nil
+}

+ 98 - 0
psiphon/common/authPackage_test.go

@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package common
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestAuthenticatedPackage(t *testing.T) {
+
+	var signingPublicKey, signingPrivateKey string
+
+	t.Run("generate package keys", func(t *testing.T) {
+		var err error
+		signingPublicKey, signingPrivateKey, err = GenerateAuthenticatedDataPackageKeys()
+		if err != nil {
+			t.Fatalf("GenerateAuthenticatedDataPackageKeys failed: %s", err)
+		}
+	})
+
+	expectedContent := "TestAuthenticatedPackage"
+	var packagePayload []byte
+
+	t.Run("write package", func(t *testing.T) {
+		var err error
+		packagePayload, err = WriteAuthenticatedDataPackage(
+			expectedContent,
+			signingPublicKey,
+			signingPrivateKey)
+		if err != nil {
+			t.Fatalf("WriteAuthenticatedDataPackage failed: %s", err)
+		}
+	})
+
+	t.Run("read package: success", func(t *testing.T) {
+		content, err := ReadAuthenticatedDataPackage(
+			packagePayload, signingPublicKey)
+		if err != nil {
+			t.Fatalf("ReadAuthenticatedDataPackage failed: %s", err)
+		}
+		if content != expectedContent {
+			t.Fatalf(
+				"unexpected package content: expected %s got %s",
+				expectedContent, content)
+		}
+	})
+
+	t.Run("read package: wrong signing key", func(t *testing.T) {
+		wrongSigningPublicKey, _, err := GenerateAuthenticatedDataPackageKeys()
+		if err != nil {
+			t.Fatalf("GenerateAuthenticatedDataPackageKeys failed: %s", err)
+		}
+		_, err = ReadAuthenticatedDataPackage(
+			packagePayload, wrongSigningPublicKey)
+		if err == nil {
+			t.Fatalf("ReadAuthenticatedDataPackage unexpectedly succeeded")
+		}
+	})
+
+	t.Run("read package: tampered data", func(t *testing.T) {
+
+		var authDataPackage AuthenticatedDataPackage
+		err := json.Unmarshal(packagePayload, &authDataPackage)
+		if err != nil {
+			t.Fatalf("Unmarshal failed: %s", err)
+		}
+		authDataPackage.Data = "TamperedData"
+
+		tamperedPackagePayload, err := json.Marshal(&authDataPackage)
+		if err != nil {
+			t.Fatalf("Marshal failed: %s", err)
+		}
+
+		_, err = ReadAuthenticatedDataPackage(
+			tamperedPackagePayload, signingPublicKey)
+		if err == nil {
+			t.Fatalf("ReadAuthenticatedDataPackage unexpectedly succeeded")
+		}
+	})
+}

+ 29 - 1
psiphon/common/net.go

@@ -185,6 +185,11 @@ func (entry *LRUConnsEntry) Touch() {
 // When a LRUConnsEntry is specified, then the LRU entry is promoted on
 // either a successful read or write.
 //
+// When an ActivityUpdater is set, then its UpdateActivity method is
+// called on each read and write with the number of bytes transferred.
+// The durationNanoseconds, which is the time since the last read, is
+// reported only on reads.
+//
 type ActivityMonitoredConn struct {
 	// Note: 64-bit ints used with atomic operations are at placed
 	// at the start of struct to ensure 64-bit alignment.
@@ -195,13 +200,23 @@ type ActivityMonitoredConn struct {
 	net.Conn
 	inactivityTimeout time.Duration
 	activeOnWrite     bool
+	activityUpdater   ActivityUpdater
 	lruEntry          *LRUConnsEntry
 }
 
+// ActivityUpdater defines an interface for receiving updates for
+// ActivityMonitoredConn activity. Values passed to UpdateProgress are
+// bytes transferred and conn duration since the previous UpdateProgress.
+type ActivityUpdater interface {
+	UpdateProgress(bytesRead, bytesWritten int64, durationNanoseconds int64)
+}
+
+// NewActivityMonitoredConn creates a new ActivityMonitoredConn.
 func NewActivityMonitoredConn(
 	conn net.Conn,
 	inactivityTimeout time.Duration,
 	activeOnWrite bool,
+	activityUpdater ActivityUpdater,
 	lruEntry *LRUConnsEntry) (*ActivityMonitoredConn, error) {
 
 	if inactivityTimeout > 0 {
@@ -220,6 +235,7 @@ func NewActivityMonitoredConn(
 		realStartTime:        time.Now(),
 		monotonicStartTime:   now,
 		lastReadActivityTime: now,
+		activityUpdater:      activityUpdater,
 		lruEntry:             lruEntry,
 	}, nil
 }
@@ -252,11 +268,19 @@ func (conn *ActivityMonitoredConn) Read(buffer []byte) (int, error) {
 				return n, ContextError(err)
 			}
 		}
+
+		readActivityTime := int64(monotime.Now())
+
+		if conn.activityUpdater != nil {
+			conn.activityUpdater.UpdateProgress(
+				int64(n), 0, readActivityTime-atomic.LoadInt64(&conn.lastReadActivityTime))
+		}
+
 		if conn.lruEntry != nil {
 			conn.lruEntry.Touch()
 		}
 
-		atomic.StoreInt64(&conn.lastReadActivityTime, int64(monotime.Now()))
+		atomic.StoreInt64(&conn.lastReadActivityTime, readActivityTime)
 
 	}
 	// Note: no context error to preserve error type
@@ -274,6 +298,10 @@ func (conn *ActivityMonitoredConn) Write(buffer []byte) (int, error) {
 			}
 		}
 
+		if conn.activityUpdater != nil {
+			conn.activityUpdater.UpdateProgress(0, int64(n), 0)
+		}
+
 		if conn.lruEntry != nil {
 			conn.lruEntry.Touch()
 		}

+ 4 - 3
psiphon/common/net_test.go

@@ -106,6 +106,7 @@ func TestActivityMonitoredConn(t *testing.T) {
 		&dummyConn{},
 		200*time.Millisecond,
 		true,
+		nil,
 		nil)
 	if err != nil {
 		t.Fatalf("NewActivityMonitoredConn failed")
@@ -182,19 +183,19 @@ func TestActivityMonitoredLRUConns(t *testing.T) {
 	lruConns := NewLRUConns()
 
 	dummy1 := &dummyConn{}
-	conn1, err := NewActivityMonitoredConn(dummy1, 0, true, lruConns.Add(dummy1))
+	conn1, err := NewActivityMonitoredConn(dummy1, 0, true, nil, lruConns.Add(dummy1))
 	if err != nil {
 		t.Fatalf("NewActivityMonitoredConn failed")
 	}
 
 	dummy2 := &dummyConn{}
-	conn2, err := NewActivityMonitoredConn(dummy2, 0, true, lruConns.Add(dummy2))
+	conn2, err := NewActivityMonitoredConn(dummy2, 0, true, nil, lruConns.Add(dummy2))
 	if err != nil {
 		t.Fatalf("NewActivityMonitoredConn failed")
 	}
 
 	dummy3 := &dummyConn{}
-	conn3, err := NewActivityMonitoredConn(dummy3, 0, true, lruConns.Add(dummy3))
+	conn3, err := NewActivityMonitoredConn(dummy3, 0, true, nil, lruConns.Add(dummy3))
 	if err != nil {
 		t.Fatalf("NewActivityMonitoredConn failed")
 	}

+ 4 - 6
psiphon/networkInterface.go → psiphon/common/networkInterface.go

@@ -17,13 +17,11 @@
  *
  */
 
-package psiphon
+package common
 
 import (
 	"errors"
 	"net"
-
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 )
 
 // Take in an interface name ("lo", "eth0", "any") passed from either
@@ -43,12 +41,12 @@ func GetInterfaceIPAddress(listenInterface string) (string, error) {
 	} else {
 		availableInterfaces, err := net.InterfaceByName(listenInterface)
 		if err != nil {
-			return "", common.ContextError(err)
+			return "", ContextError(err)
 		}
 
 		addrs, err := availableInterfaces.Addrs()
 		if err != nil {
-			return "", common.ContextError(err)
+			return "", ContextError(err)
 		}
 		for _, addr := range addrs {
 			iptype := addr.(*net.IPNet)
@@ -64,6 +62,6 @@ func GetInterfaceIPAddress(listenInterface string) (string, error) {
 		}
 	}
 
-	return "", common.ContextError(errors.New("Could not find IP address of specified interface"))
+	return "", ContextError(errors.New("Could not find IP address of specified interface"))
 
 }

+ 25 - 27
psiphon/obfuscatedSshConn.go → psiphon/common/obfuscatedSshConn.go

@@ -17,7 +17,7 @@
  *
  */
 
-package psiphon
+package common
 
 import (
 	"bytes"
@@ -25,8 +25,6 @@ import (
 	"errors"
 	"io"
 	"net"
-
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 )
 
 const (
@@ -117,7 +115,7 @@ func NewObfuscatedSshConn(
 	if mode == OBFUSCATION_CONN_MODE_CLIENT {
 		obfuscator, err = NewClientObfuscator(&ObfuscatorConfig{Keyword: obfuscationKeyword})
 		if err != nil {
-			return nil, common.ContextError(err)
+			return nil, ContextError(err)
 		}
 		readDeobfuscate = obfuscator.ObfuscateServerToClient
 		writeObfuscate = obfuscator.ObfuscateClientToServer
@@ -128,7 +126,7 @@ func NewObfuscatedSshConn(
 			conn, &ObfuscatorConfig{Keyword: obfuscationKeyword})
 		if err != nil {
 			// TODO: readForver() equivilent
-			return nil, common.ContextError(err)
+			return nil, ContextError(err)
 		}
 		readDeobfuscate = obfuscator.ObfuscateClientToServer
 		writeObfuscate = obfuscator.ObfuscateServerToClient
@@ -163,7 +161,7 @@ func (conn *ObfuscatedSshConn) Write(buffer []byte) (n int, err error) {
 	}
 	err = conn.transformAndWrite(buffer)
 	if err != nil {
-		return 0, common.ContextError(err)
+		return 0, ContextError(err)
 	}
 	// Reports that we wrote all the bytes
 	// (althogh we may have buffered some or all)
@@ -220,7 +218,7 @@ func (conn *ObfuscatedSshConn) readAndTransform(buffer []byte) (n int, err error
 				conn.readBuffer, err = readSshIdentificationLine(
 					conn.Conn, conn.readDeobfuscate)
 				if err != nil {
-					return 0, common.ContextError(err)
+					return 0, ContextError(err)
 				}
 				if bytes.HasPrefix(conn.readBuffer, []byte("SSH-")) {
 					break
@@ -237,7 +235,7 @@ func (conn *ObfuscatedSshConn) readAndTransform(buffer []byte) (n int, err error
 			conn.readBuffer, isMsgNewKeys, err = readSshPacket(
 				conn.Conn, conn.readDeobfuscate)
 			if err != nil {
-				return 0, common.ContextError(err)
+				return 0, ContextError(err)
 			}
 
 			if isMsgNewKeys {
@@ -249,7 +247,7 @@ func (conn *ObfuscatedSshConn) readAndTransform(buffer []byte) (n int, err error
 		nextState = OBFUSCATION_READ_STATE_FINISHED
 
 	case OBFUSCATION_READ_STATE_FINISHED:
-		return 0, common.ContextError(errors.New("invalid read state"))
+		return 0, ContextError(errors.New("invalid read state"))
 	}
 
 	n = copy(buffer, conn.readBuffer)
@@ -308,18 +306,18 @@ func (conn *ObfuscatedSshConn) transformAndWrite(buffer []byte) (err error) {
 	if conn.writeState == OBFUSCATION_WRITE_STATE_CLIENT_SEND_SEED_MESSAGE {
 		_, err = conn.Conn.Write(conn.obfuscator.SendSeedMessage())
 		if err != nil {
-			return common.ContextError(err)
+			return ContextError(err)
 		}
 		conn.writeState = OBFUSCATION_WRITE_STATE_IDENTIFICATION_LINE
 	} else if conn.writeState == OBFUSCATION_WRITE_STATE_SERVER_SEND_IDENTIFICATION_LINE_PADDING {
 		padding, err := makeServerIdentificationLinePadding()
 		if err != nil {
-			return common.ContextError(err)
+			return ContextError(err)
 		}
 		conn.writeObfuscate(padding)
 		_, err = conn.Conn.Write(padding)
 		if err != nil {
-			return common.ContextError(err)
+			return ContextError(err)
 		}
 		conn.writeState = OBFUSCATION_WRITE_STATE_IDENTIFICATION_LINE
 	}
@@ -338,21 +336,21 @@ func (conn *ObfuscatedSshConn) transformAndWrite(buffer []byte) (err error) {
 		var hasMsgNewKeys bool
 		conn.writeBuffer, sendBuffer, hasMsgNewKeys, err = extractSshPackets(conn.writeBuffer)
 		if err != nil {
-			return common.ContextError(err)
+			return ContextError(err)
 		}
 		if hasMsgNewKeys {
 			conn.writeState = OBFUSCATION_WRITE_STATE_FINISHED
 		}
 
 	case OBFUSCATION_WRITE_STATE_FINISHED:
-		return common.ContextError(errors.New("invalid write state"))
+		return ContextError(errors.New("invalid write state"))
 	}
 
 	if sendBuffer != nil {
 		conn.writeObfuscate(sendBuffer)
 		_, err := conn.Conn.Write(sendBuffer)
 		if err != nil {
-			return common.ContextError(err)
+			return ContextError(err)
 		}
 	}
 
@@ -360,7 +358,7 @@ func (conn *ObfuscatedSshConn) transformAndWrite(buffer []byte) (err error) {
 		// After SSH_MSG_NEWKEYS, any remaining bytes are un-obfuscated
 		_, err := conn.Conn.Write(conn.writeBuffer)
 		if err != nil {
-			return common.ContextError(err)
+			return ContextError(err)
 		}
 		// The buffer memory is no longer used
 		conn.writeBuffer = nil
@@ -378,7 +376,7 @@ func readSshIdentificationLine(
 	for len(readBuffer) < SSH_MAX_SERVER_LINE_LENGTH {
 		_, err := io.ReadFull(conn, oneByte[:])
 		if err != nil {
-			return nil, common.ContextError(err)
+			return nil, ContextError(err)
 		}
 		deobfuscate(oneByte[:])
 		readBuffer = append(readBuffer, oneByte[0])
@@ -388,7 +386,7 @@ func readSshIdentificationLine(
 		}
 	}
 	if !validLine {
-		return nil, common.ContextError(errors.New("invalid identification line"))
+		return nil, ContextError(errors.New("invalid identification line"))
 	}
 	return readBuffer, nil
 }
@@ -399,18 +397,18 @@ func readSshPacket(
 	prefix := make([]byte, SSH_PACKET_PREFIX_LENGTH)
 	_, err := io.ReadFull(conn, prefix)
 	if err != nil {
-		return nil, false, common.ContextError(err)
+		return nil, false, ContextError(err)
 	}
 	deobfuscate(prefix)
 	packetLength, _, payloadLength, messageLength := getSshPacketPrefix(prefix)
 	if packetLength > SSH_MAX_PACKET_LENGTH {
-		return nil, false, common.ContextError(errors.New("ssh packet length too large"))
+		return nil, false, ContextError(errors.New("ssh packet length too large"))
 	}
 	readBuffer := make([]byte, messageLength)
 	copy(readBuffer, prefix)
 	_, err = io.ReadFull(conn, readBuffer[len(prefix):])
 	if err != nil {
-		return nil, false, common.ContextError(err)
+		return nil, false, ContextError(err)
 	}
 	deobfuscate(readBuffer[len(prefix):])
 	isMsgNewKeys := false
@@ -426,9 +424,9 @@ func readSshPacket(
 // From the original patch to sshd.c:
 // https://bitbucket.org/psiphon/psiphon-circumvention-system/commits/f40865ce624b680be840dc2432283c8137bd896d
 func makeServerIdentificationLinePadding() ([]byte, error) {
-	paddingLength, err := common.MakeSecureRandomInt(OBFUSCATE_MAX_PADDING - 2) // 2 = CRLF
+	paddingLength, err := MakeSecureRandomInt(OBFUSCATE_MAX_PADDING - 2) // 2 = CRLF
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
 	paddingLength += 2
 	padding := make([]byte, paddingLength)
@@ -492,14 +490,14 @@ func extractSshPackets(writeBuffer []byte) ([]byte, []byte, bool, error) {
 		possiblePaddings := (SSH_MAX_PADDING_LENGTH - paddingLength) / SSH_PADDING_MULTIPLE
 		if possiblePaddings > 0 {
 			// selectedPadding is integer in range [0, possiblePaddings)
-			selectedPadding, err := common.MakeSecureRandomInt(possiblePaddings)
+			selectedPadding, err := MakeSecureRandomInt(possiblePaddings)
 			if err != nil {
-				return nil, nil, false, common.ContextError(err)
+				return nil, nil, false, ContextError(err)
 			}
 			extraPaddingLength := selectedPadding * SSH_PADDING_MULTIPLE
-			extraPadding, err := common.MakeSecureRandomBytes(extraPaddingLength)
+			extraPadding, err := MakeSecureRandomBytes(extraPaddingLength)
 			if err != nil {
-				return nil, nil, false, common.ContextError(err)
+				return nil, nil, false, ContextError(err)
 			}
 			setSshPacketPrefix(
 				packetBuffer, packetLength+extraPaddingLength, paddingLength+extraPaddingLength)

+ 27 - 29
psiphon/obfuscator.go → psiphon/common/obfuscator.go

@@ -17,7 +17,7 @@
  *
  */
 
-package psiphon
+package common
 
 import (
 	"bytes"
@@ -26,8 +26,6 @@ import (
 	"encoding/binary"
 	"errors"
 	"io"
-
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 )
 
 const (
@@ -60,14 +58,14 @@ type ObfuscatorConfig struct {
 func NewClientObfuscator(
 	config *ObfuscatorConfig) (obfuscator *Obfuscator, err error) {
 
-	seed, err := common.MakeSecureRandomBytes(OBFUSCATE_SEED_LENGTH)
+	seed, err := MakeSecureRandomBytes(OBFUSCATE_SEED_LENGTH)
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
 
 	clientToServerCipher, serverToClientCipher, err := initObfuscatorCiphers(seed, config)
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
 
 	maxPadding := OBFUSCATE_MAX_PADDING
@@ -77,7 +75,7 @@ func NewClientObfuscator(
 
 	seedMessage, err := makeSeedMessage(maxPadding, seed, clientToServerCipher)
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
 
 	return &Obfuscator{
@@ -94,7 +92,7 @@ func NewServerObfuscator(
 	clientToServerCipher, serverToClientCipher, err := readSeedMessage(
 		clientReader, config)
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
 
 	return &Obfuscator{
@@ -125,22 +123,22 @@ func initObfuscatorCiphers(
 
 	clientToServerKey, err := deriveKey(seed, []byte(config.Keyword), []byte(OBFUSCATE_CLIENT_TO_SERVER_IV))
 	if err != nil {
-		return nil, nil, common.ContextError(err)
+		return nil, nil, ContextError(err)
 	}
 
 	serverToClientKey, err := deriveKey(seed, []byte(config.Keyword), []byte(OBFUSCATE_SERVER_TO_CLIENT_IV))
 	if err != nil {
-		return nil, nil, common.ContextError(err)
+		return nil, nil, ContextError(err)
 	}
 
 	clientToServerCipher, err := rc4.NewCipher(clientToServerKey)
 	if err != nil {
-		return nil, nil, common.ContextError(err)
+		return nil, nil, ContextError(err)
 	}
 
 	serverToClientCipher, err := rc4.NewCipher(serverToClientKey)
 	if err != nil {
-		return nil, nil, common.ContextError(err)
+		return nil, nil, ContextError(err)
 	}
 
 	return clientToServerCipher, serverToClientCipher, nil
@@ -158,37 +156,37 @@ func deriveKey(seed, keyword, iv []byte) ([]byte, error) {
 		digest = h.Sum(nil)
 	}
 	if len(digest) < OBFUSCATE_KEY_LENGTH {
-		return nil, common.ContextError(errors.New("insufficient bytes for obfuscation key"))
+		return nil, ContextError(errors.New("insufficient bytes for obfuscation key"))
 	}
 	return digest[0:OBFUSCATE_KEY_LENGTH], nil
 }
 
 func makeSeedMessage(maxPadding int, seed []byte, clientToServerCipher *rc4.Cipher) ([]byte, error) {
 	// paddingLength is integer in range [0, maxPadding]
-	paddingLength, err := common.MakeSecureRandomInt(maxPadding + 1)
+	paddingLength, err := MakeSecureRandomInt(maxPadding + 1)
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
-	padding, err := common.MakeSecureRandomBytes(paddingLength)
+	padding, err := MakeSecureRandomBytes(paddingLength)
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
 	buffer := new(bytes.Buffer)
 	err = binary.Write(buffer, binary.BigEndian, seed)
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
 	err = binary.Write(buffer, binary.BigEndian, uint32(OBFUSCATE_MAGIC_VALUE))
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
 	err = binary.Write(buffer, binary.BigEndian, uint32(paddingLength))
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
 	err = binary.Write(buffer, binary.BigEndian, padding)
 	if err != nil {
-		return nil, common.ContextError(err)
+		return nil, ContextError(err)
 	}
 	seedMessage := buffer.Bytes()
 	clientToServerCipher.XORKeyStream(seedMessage[len(seed):], seedMessage[len(seed):])
@@ -201,18 +199,18 @@ func readSeedMessage(
 	seed := make([]byte, OBFUSCATE_SEED_LENGTH)
 	_, err := io.ReadFull(clientReader, seed)
 	if err != nil {
-		return nil, nil, common.ContextError(err)
+		return nil, nil, ContextError(err)
 	}
 
 	clientToServerCipher, serverToClientCipher, err := initObfuscatorCiphers(seed, config)
 	if err != nil {
-		return nil, nil, common.ContextError(err)
+		return nil, nil, ContextError(err)
 	}
 
 	fixedLengthFields := make([]byte, 8) // 4 bytes each for magic value and padding length
 	_, err = io.ReadFull(clientReader, fixedLengthFields)
 	if err != nil {
-		return nil, nil, common.ContextError(err)
+		return nil, nil, ContextError(err)
 	}
 
 	clientToServerCipher.XORKeyStream(fixedLengthFields, fixedLengthFields)
@@ -222,25 +220,25 @@ func readSeedMessage(
 	var magicValue, paddingLength int32
 	err = binary.Read(buffer, binary.BigEndian, &magicValue)
 	if err != nil {
-		return nil, nil, common.ContextError(err)
+		return nil, nil, ContextError(err)
 	}
 	err = binary.Read(buffer, binary.BigEndian, &paddingLength)
 	if err != nil {
-		return nil, nil, common.ContextError(err)
+		return nil, nil, ContextError(err)
 	}
 
 	if magicValue != OBFUSCATE_MAGIC_VALUE {
-		return nil, nil, common.ContextError(errors.New("invalid magic value"))
+		return nil, nil, ContextError(errors.New("invalid magic value"))
 	}
 
 	if paddingLength < 0 || paddingLength > OBFUSCATE_MAX_PADDING {
-		return nil, nil, common.ContextError(errors.New("invalid padding length"))
+		return nil, nil, ContextError(errors.New("invalid padding length"))
 	}
 
 	padding := make([]byte, paddingLength)
 	_, err = io.ReadFull(clientReader, padding)
 	if err != nil {
-		return nil, nil, common.ContextError(err)
+		return nil, nil, ContextError(err)
 	}
 
 	clientToServerCipher.XORKeyStream(padding, padding)

+ 161 - 0
psiphon/common/obfuscator_test.go

@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package common
+
+import (
+	"bytes"
+	"crypto/rand"
+	"crypto/rsa"
+	"errors"
+	"net"
+	"testing"
+	"time"
+
+	"github.com/Psiphon-Inc/crypto/ssh"
+)
+
+func TestObfuscator(t *testing.T) {
+
+	keyword, _ := MakeRandomStringHex(32)
+
+	config := &ObfuscatorConfig{
+		Keyword:    keyword,
+		MaxPadding: 256,
+	}
+
+	client, err := NewClientObfuscator(config)
+	if err != nil {
+		t.Fatalf("NewClientObfuscator failed: %s", err)
+	}
+
+	seedMessage := client.SendSeedMessage()
+
+	server, err := NewServerObfuscator(bytes.NewReader(seedMessage), config)
+	if err != nil {
+		t.Fatalf("NewServerObfuscator failed: %s", err)
+	}
+
+	clientMessage := []byte("client hello")
+
+	b := append([]byte(nil), clientMessage...)
+	client.ObfuscateClientToServer(b)
+	server.ObfuscateClientToServer(b)
+
+	if !bytes.Equal(clientMessage, b) {
+		t.Fatalf("unexpected client message")
+	}
+
+	serverMessage := []byte("server hello")
+
+	b = append([]byte(nil), serverMessage...)
+	client.ObfuscateServerToClient(b)
+	server.ObfuscateServerToClient(b)
+
+	if !bytes.Equal(serverMessage, b) {
+		t.Fatalf("unexpected client message")
+	}
+}
+
+func TestObfuscatedSSHConn(t *testing.T) {
+
+	keyword, _ := MakeRandomStringHex(32)
+
+	serverAddress := "127.0.0.1:2222"
+
+	listener, err := net.Listen("tcp", serverAddress)
+	if err != nil {
+		t.Fatalf("Listen failed: %s", err)
+	}
+
+	rsaKey, err := rsa.GenerateKey(rand.Reader, 2048)
+	if err != nil {
+		t.Fatalf("GenerateKey failed: %s", err)
+	}
+
+	hostKey, err := ssh.NewSignerFromKey(rsaKey)
+	if err != nil {
+		t.Fatalf("NewSignerFromKey failed: %s", err)
+	}
+
+	sshCertChecker := &ssh.CertChecker{
+		HostKeyFallback: func(addr string, remote net.Addr, publicKey ssh.PublicKey) error {
+			if !bytes.Equal(hostKey.PublicKey().Marshal(), publicKey.Marshal()) {
+				return errors.New("unexpected host public key")
+			}
+			return nil
+		},
+	}
+
+	result := make(chan error, 1)
+
+	go func() {
+
+		conn, err := listener.Accept()
+
+		if err == nil {
+			conn, err = NewObfuscatedSshConn(
+				OBFUSCATION_CONN_MODE_SERVER, conn, keyword)
+		}
+
+		if err == nil {
+			config := &ssh.ServerConfig{
+				NoClientAuth: true,
+			}
+			config.AddHostKey(hostKey)
+
+			_, _, _, err = ssh.NewServerConn(conn, config)
+		}
+
+		if err != nil {
+			select {
+			case result <- err:
+			default:
+			}
+		}
+	}()
+
+	go func() {
+
+		conn, err := net.DialTimeout("tcp", serverAddress, 5*time.Second)
+
+		if err == nil {
+			conn, err = NewObfuscatedSshConn(
+				OBFUSCATION_CONN_MODE_CLIENT, conn, keyword)
+		}
+
+		if err == nil {
+			config := &ssh.ClientConfig{
+				HostKeyCallback: sshCertChecker.CheckHostKey,
+			}
+			_, _, _, err = ssh.NewClientConn(conn, "", config)
+		}
+
+		// Sends nil on success
+		select {
+		case result <- err:
+		default:
+		}
+	}()
+
+	err = <-result
+	if err != nil {
+		t.Fatalf("obfuscated SSH handshake failed: %s", err)
+	}
+}

+ 1301 - 0
psiphon/common/osl/osl.go

@@ -0,0 +1,1301 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+// Package osl implements the Obfuscated Server List (OSL) mechanism. This
+// mechanism is a method of distributing server lists only to clients that
+// demonstrate certain behavioral traits. Clients are seeded with Server
+// List Obfuscation Keys (SLOKs) as they meet the configured criteria. These
+// keys are stored and later combined to assemble keys to decrypt out-of-band
+// distributed OSL files that contain server lists.
+//
+// This package contains the core routines used in psiphond (to track client
+// traits and issue SLOKs), clients (to manage SLOKs and decrypt OSLs), and
+// automation (to create OSLs for distribution).
+package osl
+
+import (
+	"bytes"
+	"compress/zlib"
+	"crypto/hmac"
+	"crypto/md5"
+	"crypto/sha256"
+	"encoding/base64"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/url"
+	"path"
+	"path/filepath"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/Psiphon-Inc/crypto/nacl/secretbox"
+	"github.com/Psiphon-Inc/sss"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+)
+
+const (
+	KEY_LENGTH_BYTES    = 32
+	REGISTRY_FILENAME   = "osl-registry"
+	OSL_FILENAME_FORMAT = "osl-%s"
+)
+
+// Config is an OSL configuration, which consists of a list of schemes.
+// The Reload function supports hot reloading of rules data while the
+// process is running.
+type Config struct {
+	common.ReloadableFile
+
+	Schemes []*Scheme
+}
+
+// Scheme defines a OSL seeding and distribution strategy. SLOKs to
+// decrypt OSLs are issued based on client network activity -- defined
+// in the SeedSpecs -- and time. OSLs are created for periods of time
+// and can be decrypted by clients that are seeded with a sufficient
+// selection of SLOKs for that time period. Distribution of server
+// entries to OSLs is delegated to automation.
+type Scheme struct {
+
+	// Epoch is the start time of the scheme, the start time of the
+	// first OSL and when SLOKs will first be issued. It must be
+	// specified in UTC and must be a multiple of SeedPeriodNanoseconds.
+	Epoch string
+
+	// Regions is a list of client country codes this scheme applies to.
+	// If empty, the scheme applies to all regions.
+	Regions []string
+
+	// PropagationChannelIDs is a list of client propagtion channel IDs
+	// this scheme applies to. Propagation channel IDs are an input
+	// to SLOK key derivation.
+	PropagationChannelIDs []string
+
+	// MasterKey is the base random key used for SLOK key derivation. It
+	// must be unique for each scheme. It must be 32 random bytes, base64
+	// encoded.
+	MasterKey []byte
+
+	// SeedSpecs is the set of different client network activity patterns
+	// that will result in issuing SLOKs. For a given time period, a distinct
+	// SLOK is issued for each SeedLevel in each SeedSpec.
+	// Duplicate subnets may appear in multiple SeedSpecs.
+	SeedSpecs []*SeedSpec
+
+	// SeedSpecThreshold is the threshold scheme for combining SLOKs to
+	// decrypt an OSL. For any fixed time period, at least K (threshold) of
+	// N (total) SLOKs from the N SeedSpecs must be seeded for a client to be
+	// able to reassemble the OSL key.
+	// Limitation: thresholds must be at least 2.
+	SeedSpecThreshold int
+
+	// SeedPeriodNanoseconds is the time period granularity of SLOKs.
+	// New SLOKs are issued every SeedPeriodNanoseconds. Client progress
+	// towards activity levels is reset at the end of each period.
+	SeedPeriodNanoseconds int64
+
+	// KeySplits is the time period threshold scheme layered on top of the
+	// SeedSpecThreshold scheme for combining SLOKs to decrypt an OSL.
+	// There must be at least one level. For one level, any K (threshold) of
+	// N (total) SeedSpec SLOK groups must be sufficiently seeded for a client
+	// to be able to reassemble the OSL key. When an additional level is
+	// specified, then K' of N' groups of N of K SeedSpec SLOK groups must be
+	// sufficiently seeded. And so on. The first level in the list is the
+	// lowest level. The time period for OSLs is determined by the totals in
+	// the KeySplits.
+	// Limitation: thresholds must be at least 2.
+	//
+	// Example:
+	//
+	//   SeedSpecs = <3 specs>
+	//   SeedSpecThreshold = 2
+	//   SeedPeriodNanoseconds = 100,000,000 = 100 milliseconds
+	//   SeedPeriodKeySplits = [{10, 7}, {60, 5}]
+	//
+	//   In these scheme, up to 3 distinct SLOKs, one per spec, are issued
+	//   every 100 milliseconds.
+	//
+	//   Distinct OSLs are paved for every minute (60 seconds). Each OSL
+	//   key is split such that, for those 60 seconds, a client must seed
+	//   2/3 spec SLOKs for 7 of 10 consecutive 100 ms. time periods within
+	//   a second, for any 5 of 60 seconds within the minute.
+	//
+	SeedPeriodKeySplits []KeySplit
+
+	// The following fields are ephemeral state.
+
+	epoch                 time.Time
+	subnetLookups         []common.SubnetLookup
+	derivedSLOKCacheMutex sync.RWMutex
+	derivedSLOKCache      map[slokReference]*SLOK
+}
+
+// SeedSpec defines a client traffic pattern that results in a seeded SLOK.
+// For each time period, a unique SLOK is issued to a client that meets the
+// traffic levels specified in Targets. All upstream port forward traffic to
+// UpstreamSubnets is counted towards the targets.
+//
+// ID is a SLOK key derivation component and must be 32 random bytes, base64
+// encoded. UpstreamSubnets is a list of CIDRs. Description is not used; it's
+// for JSON config file comments.
+type SeedSpec struct {
+	Description     string
+	ID              []byte
+	UpstreamSubnets []string
+	Targets         TrafficValues
+}
+
+// TrafficValues defines a client traffic level that seeds a SLOK.
+// BytesRead and BytesWritten are the minimum bytes transferred counts to
+// seed a SLOK. Both UDP and TCP data will be counted towards these totals.
+// PortForwardDurationNanoseconds is the duration that a TCP or UDP port
+// forward is active (not connected, in the UDP case). All threshold
+// settings must be met to seed a SLOK; any threshold may be set to 0 to
+// be trivially satisfied.
+type TrafficValues struct {
+	BytesRead                      int64
+	BytesWritten                   int64
+	PortForwardDurationNanoseconds int64
+}
+
+// KeySplit defines a secret key splitting scheme where the secret is split
+// into n (total) shares and any K (threshold) of N shares must be known
+// to recostruct the split secret.
+type KeySplit struct {
+	Total     int
+	Threshold int
+}
+
+// ClientSeedState tracks the progress of a client towards seeding SLOKs.
+type ClientSeedState struct {
+	scheme               *Scheme
+	propagationChannelID string
+	signalIssueSLOKs     chan struct{}
+	progress             []*TrafficValues
+	progressSLOKTime     int64
+	mutex                sync.Mutex
+	issuedSLOKs          map[string]*SLOK
+	payloadSLOKs         []*SLOK
+}
+
+// ClientSeedPortForward map a client port forward, which is relaying
+// traffic to a specific upstream address, to all seed state progress
+// counters for SeedSpecs with subnets containing the upstream address.
+// As traffic is relayed through the port forwards, the bytes transferred
+// and duration count towards the progress of these SeedSpecs and
+// associated SLOKs.
+type ClientSeedPortForward struct {
+	state           *ClientSeedState
+	progressIndexes []int
+}
+
+// slokReference uniquely identifies a SLOK by specifying all the fields
+// used to derive the SLOK secret key and ID.
+// Note: SeedSpecID is not a []byte as slokReference is used as a map key.
+type slokReference struct {
+	PropagationChannelID string
+	SeedSpecID           string
+	Time                 time.Time
+}
+
+// SLOK is a seeded SLOK issued to a client. The client will store the
+// SLOK in its local database; look it up by ID when checking which OSLs it
+// can reassemble keys for; and use the key material to reassemble OSL
+// file keys.
+type SLOK struct {
+	ID  []byte
+	Key []byte
+}
+
+// SeedPayload is the list of seeded SLOKs sent to a client.
+type SeedPayload struct {
+	SLOKs []*SLOK
+}
+
+// NewConfig initializes a Config with the settings in the specified
+// file.
+func NewConfig(filename string) (*Config, error) {
+
+	config := &Config{}
+
+	config.ReloadableFile = common.NewReloadableFile(
+		filename,
+		func(fileContent []byte) error {
+			newConfig, err := LoadConfig(fileContent)
+			if err != nil {
+				return common.ContextError(err)
+			}
+			// Modify actual traffic rules only after validation
+			config.Schemes = newConfig.Schemes
+			return nil
+		})
+
+	_, err := config.Reload()
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	return config, nil
+}
+
+// LoadConfig loads, vaildates, and initializes a JSON encoded OSL
+// configuration.
+func LoadConfig(configJSON []byte) (*Config, error) {
+
+	var config Config
+	err := json.Unmarshal(configJSON, &config)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	var previousEpoch time.Time
+
+	for _, scheme := range config.Schemes {
+
+		epoch, err := time.Parse(time.RFC3339, scheme.Epoch)
+		if err != nil {
+			return nil, common.ContextError(fmt.Errorf("invalid epoch format: %s", err))
+		}
+
+		if epoch.UTC() != epoch {
+			return nil, common.ContextError(errors.New("invalid epoch timezone"))
+		}
+
+		if epoch.Round(time.Duration(scheme.SeedPeriodNanoseconds)) != epoch {
+			return nil, common.ContextError(errors.New("invalid epoch period"))
+		}
+
+		if epoch.Before(previousEpoch) {
+			return nil, common.ContextError(errors.New("invalid epoch order"))
+		}
+
+		previousEpoch = epoch
+
+		scheme.epoch = epoch
+		scheme.subnetLookups = make([]common.SubnetLookup, len(scheme.SeedSpecs))
+		scheme.derivedSLOKCache = make(map[slokReference]*SLOK)
+
+		if len(scheme.MasterKey) != KEY_LENGTH_BYTES {
+			return nil, common.ContextError(errors.New("invalid master key"))
+		}
+
+		for index, seedSpec := range scheme.SeedSpecs {
+			if len(seedSpec.ID) != KEY_LENGTH_BYTES {
+				return nil, common.ContextError(errors.New("invalid seed spec ID"))
+			}
+
+			// TODO: check that subnets do not overlap, as required by SubnetLookup
+			subnetLookup, err := common.NewSubnetLookup(seedSpec.UpstreamSubnets)
+			if err != nil {
+				return nil, common.ContextError(fmt.Errorf("invalid upstream subnets: %s", err))
+			}
+
+			scheme.subnetLookups[index] = subnetLookup
+		}
+
+		if !isValidShamirSplit(len(scheme.SeedSpecs), scheme.SeedSpecThreshold) {
+			return nil, common.ContextError(errors.New("invalid seed spec key split"))
+		}
+
+		if len(scheme.SeedPeriodKeySplits) < 1 {
+			return nil, common.ContextError(errors.New("invalid seed period key split count"))
+		}
+
+		for _, keySplit := range scheme.SeedPeriodKeySplits {
+			if !isValidShamirSplit(keySplit.Total, keySplit.Threshold) {
+				return nil, common.ContextError(errors.New("invalid seed period key split"))
+			}
+		}
+	}
+
+	return &config, nil
+}
+
+// NewClientSeedState creates a new client seed state to track
+// client progress towards seeding SLOKs. psiphond maintains one
+// ClientSeedState for each connected client.
+//
+// A signal is sent on signalIssueSLOKs when sufficient progress
+// has been made that a new SLOK *may* be issued. psiphond will
+// receive the signal and then call GetClientSeedPayload/IssueSLOKs
+// to issue SLOKs, generate payload, and send to the client. The
+// sender will not block sending to signalIssueSLOKs; the channel
+// should be appropriately buffered.
+func (config *Config) NewClientSeedState(
+	clientRegion, propagationChannelID string,
+	signalIssueSLOKs chan struct{}) *ClientSeedState {
+
+	config.ReloadableFile.RLock()
+	defer config.ReloadableFile.RUnlock()
+
+	for _, scheme := range config.Schemes {
+		// Only the first matching scheme is selected.
+		// Note: this implementation assumes a few simple schemes. For more
+		// schemes with many propagation channel IDs or region filters, use
+		// maps for more efficient lookup.
+		if scheme.epoch.Before(time.Now().UTC()) &&
+			common.Contains(scheme.PropagationChannelIDs, propagationChannelID) &&
+			(len(scheme.Regions) == 0 || common.Contains(scheme.Regions, clientRegion)) {
+
+			// Empty progress is initialized up front for all seed specs. Once
+			// created, the progress structure is read-only (the slice, not the
+			// TrafficValue fields); this permits lock-free operation.
+			progress := make([]*TrafficValues, len(scheme.SeedSpecs))
+			for index := 0; index < len(scheme.SeedSpecs); index++ {
+				progress[index] = &TrafficValues{}
+			}
+
+			return &ClientSeedState{
+				scheme:               scheme,
+				propagationChannelID: propagationChannelID,
+				signalIssueSLOKs:     signalIssueSLOKs,
+				progressSLOKTime:     getSLOKTime(scheme.SeedPeriodNanoseconds),
+				progress:             progress,
+				issuedSLOKs:          make(map[string]*SLOK),
+				payloadSLOKs:         nil,
+			}
+		}
+	}
+
+	return &ClientSeedState{}
+}
+
+// NewClientSeedPortForwardState creates a new client port forward
+// traffic progress tracker. Port forward progress reported to the
+// ClientSeedPortForward is added to seed state progress for all
+// seed specs containing upstreamIPAddress in their subnets.
+// The return value will be nil when activity for upstreamIPAddress
+// does not count towards any progress.
+// NewClientSeedPortForward may be invoked concurrently by many
+// psiphond port forward establishment goroutines.
+func (state *ClientSeedState) NewClientSeedPortForward(
+	upstreamIPAddress net.IP) *ClientSeedPortForward {
+
+	// Concurrency: access to ClientSeedState is unsynchronized
+	// but references only read-only fields.
+
+	if state.scheme == nil {
+		return nil
+	}
+
+	var progressIndexes []int
+
+	// Determine which seed spec subnets contain upstreamIPAddress
+	// and point to the progress for each. When progress is reported,
+	// it is added directly to all of these TrafficValues instances.
+	// Assumes state.progress entries correspond 1-to-1 with
+	// state.scheme.subnetLookups.
+	// Note: this implementation assumes a small number of seed specs.
+	// For larger numbers, instead of N SubnetLookups, create a single
+	// SubnetLookup which returns, for a given IP address, all matching
+	// subnets and associated seed specs.
+	for index, subnetLookup := range state.scheme.subnetLookups {
+		if subnetLookup.ContainsIPAddress(upstreamIPAddress) {
+			progressIndexes = append(progressIndexes, index)
+		}
+	}
+
+	if progressIndexes == nil {
+		return nil
+	}
+
+	return &ClientSeedPortForward{
+		state:           state,
+		progressIndexes: progressIndexes,
+	}
+}
+
+func (state *ClientSeedState) sendIssueSLOKsSignal() {
+	if state.signalIssueSLOKs != nil {
+		select {
+		case state.signalIssueSLOKs <- *new(struct{}):
+		default:
+		}
+	}
+}
+
+// UpdateProgress adds port forward bytes transfered and duration to
+// all seed spec progresses associated with the port forward.
+// If UpdateProgress is invoked after the SLOK time period has rolled
+// over, any pending seeded SLOKs are issued and all progress is reset.
+// UpdateProgress may be invoked concurrently by many psiphond port
+// relay goroutines. The implementation of UpdateProgress prioritizes
+// not blocking port forward relaying; a consequence of this lock-free
+// design is that progress reported at the exact time of SLOK time period
+// rollover may be dropped.
+func (portForward *ClientSeedPortForward) UpdateProgress(
+	bytesRead, bytesWritten int64, durationNanoseconds int64) {
+
+	// Concurrency: non-blocking -- access to ClientSeedState is unsynchronized
+	// to read-only fields, atomic, or channels, except in the case of a time
+	// period rollover, in which case a mutex is acquired.
+
+	slokTime := getSLOKTime(portForward.state.scheme.SeedPeriodNanoseconds)
+
+	// If the SLOK time period has changed since progress was last recorded,
+	// call issueSLOKs which will issue any SLOKs for that past time period
+	// and then clear all progress. Progress will then be recorded for the
+	// current time period.
+	// As it acquires the state mutex, issueSLOKs may stall other port
+	// forwards for this client. The delay is minimized by SLOK caching,
+	// which avoids redundant crypto operations.
+	if slokTime != atomic.LoadInt64(&portForward.state.progressSLOKTime) {
+		portForward.state.mutex.Lock()
+		portForward.state.issueSLOKs()
+		portForward.state.mutex.Unlock()
+
+		// Call to issueSLOKs may have issued new SLOKs. Note that
+		// this will only happen if the time period rolls over with
+		// sufficient progress pending while the signalIssueSLOKs
+		// receiver did not call IssueSLOKs soon enough.
+		portForward.state.sendIssueSLOKsSignal()
+	}
+
+	// Add directly to the permanent TrafficValues progress accumulators
+	// for the state's seed specs. Concurrently, other port forwards may
+	// be adding to the same accumulators. Also concurrently, another
+	// goroutine may be invoking issueSLOKs, which zeros all the accumulators.
+	// As a consequence, progress may be dropped at the exact time of
+	// time period rollover.
+	for _, progressIndex := range portForward.progressIndexes {
+
+		seedSpec := portForward.state.scheme.SeedSpecs[progressIndex]
+		progress := portForward.state.progress[progressIndex]
+
+		alreadyExceedsTargets := progress.exceeds(&seedSpec.Targets)
+
+		atomic.AddInt64(&progress.BytesRead, bytesRead)
+		atomic.AddInt64(&progress.BytesWritten, bytesWritten)
+		atomic.AddInt64(&progress.PortForwardDurationNanoseconds, durationNanoseconds)
+
+		// With the target newly met for a SeedSpec, a new
+		// SLOK *may* be issued.
+		if !alreadyExceedsTargets && progress.exceeds(&seedSpec.Targets) {
+			portForward.state.sendIssueSLOKsSignal()
+		}
+	}
+}
+
+func (lhs *TrafficValues) exceeds(rhs *TrafficValues) bool {
+	return atomic.LoadInt64(&lhs.BytesRead) >= atomic.LoadInt64(&rhs.BytesRead) &&
+		atomic.LoadInt64(&lhs.BytesWritten) >= atomic.LoadInt64(&rhs.BytesWritten) &&
+		atomic.LoadInt64(&lhs.PortForwardDurationNanoseconds) >=
+			atomic.LoadInt64(&rhs.PortForwardDurationNanoseconds)
+}
+
+// issueSLOKs checks client progress against each candidate seed spec
+// and seeds SLOKs when the client traffic levels are achieved. After
+// checking progress, and if the SLOK time period has changed since
+// progress was last recorded, progress is reset. Partial, insufficient
+// progress is intentionally dropped when the time period rolls over.
+// Derived SLOKs are cached to avoid redundant CPU intensive operations.
+// All issued SLOKs are retained in the client state for the duration
+// of the client's session.
+func (state *ClientSeedState) issueSLOKs() {
+
+	// Concurrency: the caller must lock state.mutex.
+
+	if state.scheme == nil {
+		return
+	}
+
+	progressSLOKTime := time.Unix(0, state.progressSLOKTime)
+
+	for index, progress := range state.progress {
+
+		seedSpec := state.scheme.SeedSpecs[index]
+
+		if progress.exceeds(&seedSpec.Targets) {
+
+			ref := &slokReference{
+				PropagationChannelID: state.propagationChannelID,
+				SeedSpecID:           string(seedSpec.ID),
+				Time:                 progressSLOKTime,
+			}
+
+			state.scheme.derivedSLOKCacheMutex.RLock()
+			slok, ok := state.scheme.derivedSLOKCache[*ref]
+			state.scheme.derivedSLOKCacheMutex.RUnlock()
+			if !ok {
+				slok = deriveSLOK(state.scheme, ref)
+				state.scheme.derivedSLOKCacheMutex.Lock()
+				state.scheme.derivedSLOKCache[*ref] = slok
+				state.scheme.derivedSLOKCacheMutex.Unlock()
+			}
+
+			// Previously issued SLOKs are not re-added to
+			// the payload.
+			if state.issuedSLOKs[string(slok.ID)] == nil {
+				state.issuedSLOKs[string(slok.ID)] = slok
+				state.payloadSLOKs = append(state.payloadSLOKs, slok)
+			}
+		}
+	}
+
+	slokTime := getSLOKTime(state.scheme.SeedPeriodNanoseconds)
+
+	if slokTime != atomic.LoadInt64(&state.progressSLOKTime) {
+		atomic.StoreInt64(&state.progressSLOKTime, slokTime)
+		// The progress map structure is not reset or modifed; instead
+		// the mapped accumulator values are zeroed. Concurrently, port
+		// forward relay goroutines continue to add to these accumulators.
+		for _, progress := range state.progress {
+			atomic.StoreInt64(&progress.BytesRead, 0)
+			atomic.StoreInt64(&progress.BytesWritten, 0)
+			atomic.StoreInt64(&progress.PortForwardDurationNanoseconds, 0)
+		}
+	}
+}
+
+func getSLOKTime(seedPeriodNanoseconds int64) int64 {
+	return time.Now().UTC().Truncate(time.Duration(seedPeriodNanoseconds)).UnixNano()
+}
+
+// deriveSLOK produces SLOK secret keys and IDs using HKDF-Expand
+// defined in https://tools.ietf.org/html/rfc5869.
+func deriveSLOK(
+	scheme *Scheme, ref *slokReference) *SLOK {
+
+	timeBytes := make([]byte, 8)
+	binary.LittleEndian.PutUint64(timeBytes, uint64(ref.Time.UnixNano()))
+
+	key := deriveKeyHKDF(
+		scheme.MasterKey,
+		[]byte(ref.PropagationChannelID),
+		[]byte(ref.SeedSpecID),
+		timeBytes)
+
+	// TODO: is ID derivation cryptographically sound?
+	id := deriveKeyHKDF(
+		scheme.MasterKey,
+		key)
+
+	return &SLOK{
+		ID:  id,
+		Key: key,
+	}
+}
+
+// GetSeedPayload issues any pending SLOKs and returns the accumulated
+// SLOKs for a given client. psiphond will calls this when it receives
+// signalIssueSLOKs which is the trigger to check for new SLOKs.
+// Note: caller must not modify the SLOKs in SeedPayload.SLOKs
+// as these are shared data.
+func (state *ClientSeedState) GetSeedPayload() *SeedPayload {
+
+	state.mutex.Lock()
+	defer state.mutex.Unlock()
+
+	if state.scheme == nil {
+		return &SeedPayload{}
+	}
+
+	state.issueSLOKs()
+
+	sloks := make([]*SLOK, len(state.payloadSLOKs))
+	for index, slok := range state.payloadSLOKs {
+		sloks[index] = slok
+	}
+
+	return &SeedPayload{
+		SLOKs: sloks,
+	}
+}
+
+// ClearSeedPayload resets the accumulated SLOK payload (but not SLOK
+// progress). psiphond calls this after the client has acknowledged
+// receipt of a payload.
+func (state *ClientSeedState) ClearSeedPayload() {
+
+	state.mutex.Lock()
+	defer state.mutex.Unlock()
+
+	state.payloadSLOKs = nil
+}
+
+// PaveFile describes an OSL data file to be paved to an out-of-band
+// distribution drop site. There are two types of files: a registry,
+// which describes how to assemble keys for OSLs, and the encrypted
+// OSL files.
+type PaveFile struct {
+	Name     string
+	Contents []byte
+}
+
+// Registry describes a set of OSL files.
+type Registry struct {
+	FileSpecs []*OSLFileSpec
+
+	// The following fields are ephemeral state.
+
+	oslIDLookup map[string]*OSLFileSpec
+}
+
+// An OSLFileSpec includes an ID which is used to reference the
+// OSL file and describes the key splits used to divide the OSL
+// file key along with the SLOKs required to reassemble those keys.
+//
+// The MD5Sum field is a checksum of the contents of the OSL file
+// to be used to skip redownloading previously downloaded files.
+// MD5 is not cryptogrpahically secure and this checksum is not
+// relied upon for OSL verification. MD5 is used for compatibility
+// with out-of-band distribution hosts.
+type OSLFileSpec struct {
+	ID        []byte
+	KeyShares *KeyShares
+	MD5Sum    []byte
+}
+
+// KeyShares is a tree data structure which describes the
+// key splits used to divide a secret key. BoxedShares are encrypted
+// shares of the key, and #Threshold amount of decrypted BoxedShares
+// are required to reconstruct the secret key. The keys for BoxedShares
+// are either SLOKs (referenced by SLOK ID) or random keys that are
+// themselves split as described in child KeyShares.
+type KeyShares struct {
+	Threshold   int
+	BoxedShares [][]byte
+	SLOKIDs     [][]byte
+	KeyShares   []*KeyShares
+}
+
+// Pave creates the full set of OSL files, for all schemes in the
+// configuration, to be dropped in an out-of-band distribution site.
+// Only OSLs for the propagation channel ID associated with the
+// distribution site are paved. This function is used by automation.
+//
+// The Name component of each file relates to the values returned by
+// the client functions GetRegistryURL and GetOSLFileURL.
+//
+// Pave returns a pave file for the entire registry of all OSLs from
+// epoch. It only returns pave files for OSLs referenced in
+// paveServerEntries. paveServerEntries is a list of maps, one for each
+// scheme, from the first SLOK time period identifying an OSL to a
+// payload to encrypt and pave.
+// The registry file spec MD5 checksum values are populated only for
+// OSLs referenced in paveServerEntries. To ensure a registry is fully
+// populated with hashes for skipping redownloading, all OSLs should
+// be paved.
+//
+// Automation is responsible for consistently distributing server entries
+// to OSLs in the case where OSLs are repaved in subsequent calls.
+func (config *Config) Pave(
+	endTime time.Time,
+	propagationChannelID string,
+	signingPublicKey string,
+	signingPrivateKey string,
+	paveServerEntries []map[time.Time]string) ([]*PaveFile, error) {
+
+	config.ReloadableFile.RLock()
+	defer config.ReloadableFile.RUnlock()
+
+	var paveFiles []*PaveFile
+
+	registry := &Registry{}
+
+	if len(paveServerEntries) != len(config.Schemes) {
+		return nil, common.ContextError(errors.New("invalid paveServerEntries"))
+	}
+
+	for schemeIndex, scheme := range config.Schemes {
+
+		slokTimePeriodsPerOSL := 1
+		for _, keySplit := range scheme.SeedPeriodKeySplits {
+			slokTimePeriodsPerOSL *= keySplit.Total
+		}
+
+		if common.Contains(scheme.PropagationChannelIDs, propagationChannelID) {
+			oslTime := scheme.epoch
+			for !oslTime.After(endTime) {
+
+				firstSLOKTime := oslTime
+				fileKey, fileSpec, err := makeOSLFileSpec(
+					scheme, propagationChannelID, firstSLOKTime)
+				if err != nil {
+					return nil, common.ContextError(err)
+				}
+
+				registry.FileSpecs = append(registry.FileSpecs, fileSpec)
+
+				serverEntries, ok := paveServerEntries[schemeIndex][oslTime]
+				if ok {
+
+					signedServerEntries, err := common.WriteAuthenticatedDataPackage(
+						serverEntries,
+						signingPublicKey,
+						signingPrivateKey)
+					if err != nil {
+						return nil, common.ContextError(err)
+					}
+
+					boxedServerEntries, err := box(fileKey, compress(signedServerEntries))
+					if err != nil {
+						return nil, common.ContextError(err)
+					}
+
+					md5sum := md5.Sum(boxedServerEntries)
+					fileSpec.MD5Sum = md5sum[:]
+
+					fileName := fmt.Sprintf(
+						OSL_FILENAME_FORMAT, hex.EncodeToString(fileSpec.ID))
+
+					paveFiles = append(paveFiles, &PaveFile{
+						Name:     fileName,
+						Contents: boxedServerEntries,
+					})
+				}
+
+				oslTime = oslTime.Add(
+					time.Duration(
+						int64(slokTimePeriodsPerOSL) * scheme.SeedPeriodNanoseconds))
+			}
+		}
+	}
+
+	registryJSON, err := json.Marshal(registry)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	signedRegistry, err := common.WriteAuthenticatedDataPackage(
+		base64.StdEncoding.EncodeToString(registryJSON),
+		signingPublicKey,
+		signingPrivateKey)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	paveFiles = append(paveFiles, &PaveFile{
+		Name:     REGISTRY_FILENAME,
+		Contents: compress(signedRegistry),
+	})
+
+	return paveFiles, nil
+}
+
+// makeOSLFileSpec creates a random OSL file key, splits it according
+// the the scheme's key splits, and sets the OSL ID as its first SLOK
+// ID. The returned key is used to encrypt the OSL payload and then
+// discarded; the key may be reassembled using the data in the KeyShares
+// tree, given sufficient SLOKs.
+func makeOSLFileSpec(
+	scheme *Scheme,
+	propagationChannelID string,
+	firstSLOKTime time.Time) ([]byte, *OSLFileSpec, error) {
+
+	ref := &slokReference{
+		PropagationChannelID: propagationChannelID,
+		SeedSpecID:           string(scheme.SeedSpecs[0].ID),
+		Time:                 firstSLOKTime,
+	}
+	firstSLOK := deriveSLOK(scheme, ref)
+	oslID := firstSLOK.ID
+
+	fileKey, err := common.MakeSecureRandomBytes(KEY_LENGTH_BYTES)
+	if err != nil {
+		return nil, nil, common.ContextError(err)
+	}
+
+	keyShares, err := divideKey(
+		scheme,
+		fileKey,
+		scheme.SeedPeriodKeySplits,
+		propagationChannelID,
+		&firstSLOKTime)
+	if err != nil {
+		return nil, nil, common.ContextError(err)
+	}
+
+	fileSpec := &OSLFileSpec{
+		ID:        oslID,
+		KeyShares: keyShares,
+	}
+
+	return fileKey, fileSpec, nil
+}
+
+// divideKey recursively constructs a KeyShares tree.
+func divideKey(
+	scheme *Scheme,
+	key []byte,
+	keySplits []KeySplit,
+	propagationChannelID string,
+	nextSLOKTime *time.Time) (*KeyShares, error) {
+
+	keySplitIndex := len(keySplits) - 1
+	keySplit := keySplits[keySplitIndex]
+
+	shares, err := shamirSplit(key, keySplit.Total, keySplit.Threshold)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	var boxedShares [][]byte
+	var keyShares []*KeyShares
+
+	for _, share := range shares {
+		shareKey, err := common.MakeSecureRandomBytes(KEY_LENGTH_BYTES)
+		if err != nil {
+			return nil, common.ContextError(err)
+		}
+		if keySplitIndex > 0 {
+			keyShare, err := divideKey(
+				scheme,
+				shareKey,
+				keySplits[0:keySplitIndex],
+				propagationChannelID,
+				nextSLOKTime)
+			if err != nil {
+				return nil, common.ContextError(err)
+			}
+			keyShares = append(keyShares, keyShare)
+		} else {
+			keyShare, err := divideKeyWithSeedSpecSLOKs(
+				scheme,
+				shareKey,
+				propagationChannelID,
+				nextSLOKTime)
+			if err != nil {
+				return nil, common.ContextError(err)
+			}
+			keyShares = append(keyShares, keyShare)
+
+			*nextSLOKTime = nextSLOKTime.Add(time.Duration(scheme.SeedPeriodNanoseconds))
+		}
+		boxedShare, err := box(shareKey, share)
+		if err != nil {
+			return nil, common.ContextError(err)
+		}
+		boxedShares = append(boxedShares, boxedShare)
+	}
+
+	return &KeyShares{
+		Threshold:   keySplit.Threshold,
+		BoxedShares: boxedShares,
+		SLOKIDs:     nil,
+		KeyShares:   keyShares,
+	}, nil
+}
+
+func divideKeyWithSeedSpecSLOKs(
+	scheme *Scheme,
+	key []byte,
+	propagationChannelID string,
+	nextSLOKTime *time.Time) (*KeyShares, error) {
+
+	var boxedShares [][]byte
+	var slokIDs [][]byte
+
+	shares, err := shamirSplit(
+		key, len(scheme.SeedSpecs), scheme.SeedSpecThreshold)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	for index, seedSpec := range scheme.SeedSpecs {
+
+		ref := &slokReference{
+			PropagationChannelID: propagationChannelID,
+			SeedSpecID:           string(seedSpec.ID),
+			Time:                 *nextSLOKTime,
+		}
+		slok := deriveSLOK(scheme, ref)
+
+		boxedShare, err := box(slok.Key, shares[index])
+		if err != nil {
+			return nil, common.ContextError(err)
+		}
+		boxedShares = append(boxedShares, boxedShare)
+
+		slokIDs = append(slokIDs, slok.ID)
+	}
+
+	return &KeyShares{
+		Threshold:   scheme.SeedSpecThreshold,
+		BoxedShares: boxedShares,
+		SLOKIDs:     slokIDs,
+		KeyShares:   nil,
+	}, nil
+}
+
+// GetOSLRegistryURL returns the URL for an OSL registry. Clients
+// call this when fetching the registry from out-of-band
+// distribution sites.
+// Clients are responsible for tracking whether the remote file has
+// changed or not before downloading.
+func GetOSLRegistryURL(baseURL string) string {
+	u, err := url.Parse(baseURL)
+	if err != nil {
+		return ""
+	}
+	u.Path = path.Join(u.Path, REGISTRY_FILENAME)
+	return u.String()
+}
+
+// GetOSLRegistryFilename returns an appropriate filename for
+// the resumable download destination for the OSL registry.
+func GetOSLRegistryFilename(baseDirectory string) string {
+	return filepath.Join(baseDirectory, REGISTRY_FILENAME)
+}
+
+// GetOSLFileURL returns the URL for an OSL file. Once the client
+// has determined, from GetSeededOSLIDs, which OSLs it has sufficiently
+// seeded, it calls this to fetch the OSLs for download and decryption.
+// Clients are responsible for tracking whether the remote file has
+// changed or not before downloading.
+func GetOSLFileURL(baseURL string, oslID []byte) string {
+	u, err := url.Parse(baseURL)
+	if err != nil {
+		return ""
+	}
+	u.Path = path.Join(
+		u.Path, fmt.Sprintf(OSL_FILENAME_FORMAT, hex.EncodeToString(oslID)))
+	return u.String()
+}
+
+// GetOSLFilename returns an appropriate filename for the resumable
+// download destination for the OSL file.
+func GetOSLFilename(baseDirectory string, oslID []byte) string {
+	return filepath.Join(
+		baseDirectory, fmt.Sprintf(OSL_FILENAME_FORMAT, hex.EncodeToString(oslID)))
+}
+
+// UnpackRegistry decompresses, validates, and loads a
+// JSON encoded OSL registry.
+func UnpackRegistry(
+	compressedRegistry []byte, signingPublicKey string) (*Registry, []byte, error) {
+
+	packagedRegistry, err := uncompress(compressedRegistry)
+	if err != nil {
+		return nil, nil, common.ContextError(err)
+	}
+
+	encodedRegistry, err := common.ReadAuthenticatedDataPackage(
+		packagedRegistry, signingPublicKey)
+	if err != nil {
+		return nil, nil, common.ContextError(err)
+	}
+
+	registryJSON, err := base64.StdEncoding.DecodeString(encodedRegistry)
+	if err != nil {
+		return nil, nil, common.ContextError(err)
+	}
+
+	registry, err := LoadRegistry(registryJSON)
+	return registry, registryJSON, err
+}
+
+// LoadRegistry loads a JSON encoded OSL registry.
+// Clients call this to process downloaded registry files.
+func LoadRegistry(registryJSON []byte) (*Registry, error) {
+
+	var registry Registry
+	err := json.Unmarshal(registryJSON, &registry)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	registry.oslIDLookup = make(map[string]*OSLFileSpec)
+	for _, fileSpec := range registry.FileSpecs {
+		registry.oslIDLookup[string(fileSpec.ID)] = fileSpec
+	}
+
+	return &registry, nil
+}
+
+// SLOKLookup is a callback to lookup SLOK keys by ID.
+type SLOKLookup func([]byte) []byte
+
+// GetSeededOSLIDs examines each OSL in the registry and returns a list for
+// which the client has sufficient SLOKs to reassemble the OSL key and
+// decrypt. This function simply does SLOK ID lookups and threshold counting
+// and does not derive keys for every OSL.
+// The client is responsible for using the resulting list of OSL IDs to fetch
+// the OSL files and process.
+//
+// The client's propagation channel ID is used implicitly: it determines the
+// base URL used to download the registry and OSL files. If the client has
+// seeded SLOKs from a propagation channel ID different than the one associated
+// with its present base URL, they will not appear in the registry and not
+// be used.
+//
+// SLOKLookup is called to determine which SLOKs are seeded with the client.
+// errorLogger is a callback to log errors; GetSeededOSLIDs will continue to
+// process each candidate OSL even in the case of an error processing a
+// particular one.
+func (registry *Registry) GetSeededOSLIDs(lookup SLOKLookup, errorLogger func(error)) [][]byte {
+
+	var OSLIDs [][]byte
+	for _, fileSpec := range registry.FileSpecs {
+		ok, _, err := fileSpec.KeyShares.reassembleKey(lookup, false)
+		if err != nil {
+			errorLogger(err)
+			continue
+		}
+		if ok {
+			OSLIDs = append(OSLIDs, fileSpec.ID)
+		}
+	}
+
+	return OSLIDs
+}
+
+// GetOSLMD5Sum returns the MD5 checksum for the specified OSL.
+func (registry *Registry) GetOSLMD5Sum(oslID []byte) ([]byte, error) {
+
+	fileSpec, ok := registry.oslIDLookup[string(oslID)]
+	if !ok {
+		return nil, common.ContextError(errors.New("unknown OSL ID"))
+	}
+
+	return fileSpec.MD5Sum, nil
+}
+
+// reassembleKey recursively traverses a KeyShares tree, determining
+// whether there exists suffient SLOKs to reassemble the root key and
+// performing the key assembly as required.
+func (keyShares *KeyShares) reassembleKey(lookup SLOKLookup, unboxKey bool) (bool, []byte, error) {
+
+	if (len(keyShares.SLOKIDs) > 0 && len(keyShares.KeyShares) > 0) ||
+		(len(keyShares.SLOKIDs) > 0 && len(keyShares.SLOKIDs) != len(keyShares.BoxedShares)) ||
+		(len(keyShares.KeyShares) > 0 && len(keyShares.KeyShares) != len(keyShares.BoxedShares)) {
+		return false, nil, common.ContextError(errors.New("unexpected KeyShares format"))
+	}
+
+	shareCount := 0
+	var shares [][]byte
+	if unboxKey {
+		// Note: shamirCombine infers share indices from slice offset, so the full
+		// keyShares.Total slots are allocated and missing shares are left nil.
+		shares = make([][]byte, len(keyShares.BoxedShares))
+	}
+	if len(keyShares.SLOKIDs) > 0 {
+		for i := 0; i < len(keyShares.SLOKIDs) && shareCount < keyShares.Threshold; i++ {
+			slokKey := lookup(keyShares.SLOKIDs[i])
+			if slokKey == nil {
+				continue
+			}
+			shareCount += 1
+			if unboxKey {
+				share, err := unbox(slokKey, keyShares.BoxedShares[i])
+				if err != nil {
+					return false, nil, common.ContextError(err)
+				}
+				shares[i] = share
+			}
+		}
+	} else {
+		for i := 0; i < len(keyShares.KeyShares) && shareCount < keyShares.Threshold; i++ {
+			ok, key, err := keyShares.KeyShares[i].reassembleKey(lookup, unboxKey)
+			if err != nil {
+				return false, nil, common.ContextError(err)
+			}
+			if !ok {
+				continue
+			}
+			shareCount += 1
+			if unboxKey {
+				share, err := unbox(key, keyShares.BoxedShares[i])
+				if err != nil {
+					return false, nil, common.ContextError(err)
+				}
+				shares[i] = share
+			}
+		}
+	}
+
+	if shareCount < keyShares.Threshold {
+		return false, nil, nil
+	}
+
+	if !unboxKey {
+		return true, nil, nil
+	}
+
+	joinedKey := shamirCombine(shares)
+
+	return true, joinedKey, nil
+}
+
+// UnpackOSL reassembles the key for the OSL specified by oslID and uses
+// that key to decrypt oslFileContents, uncompress the contents, validate
+// the authenticated package, and extract the payload.
+// Clients will call UnpackOSL for OSLs indicated by GetSeededOSLIDs along
+// with their downloaded content.
+// SLOKLookup is called to determine which SLOKs are seeded with the client.
+func (registry *Registry) UnpackOSL(
+	lookup SLOKLookup,
+	oslID []byte,
+	oslFileContents []byte,
+	signingPublicKey string) (string, error) {
+
+	fileSpec, ok := registry.oslIDLookup[string(oslID)]
+	if !ok {
+		return "", common.ContextError(errors.New("unknown OSL ID"))
+	}
+
+	ok, fileKey, err := fileSpec.KeyShares.reassembleKey(lookup, true)
+	if err != nil {
+		return "", common.ContextError(err)
+	}
+	if !ok {
+		return "", common.ContextError(errors.New("unseeded OSL"))
+	}
+
+	decryptedContents, err := unbox(fileKey, oslFileContents)
+	if err != nil {
+		return "", common.ContextError(err)
+	}
+
+	dataPackage, err := uncompress(decryptedContents)
+	if err != nil {
+		return "", common.ContextError(err)
+	}
+
+	oslPayload, err := common.ReadAuthenticatedDataPackage(
+		dataPackage, signingPublicKey)
+	if err != nil {
+		return "", common.ContextError(err)
+	}
+
+	return oslPayload, nil
+}
+
+// deriveKeyHKDF implements HKDF-Expand as defined in https://tools.ietf.org/html/rfc5869
+// where masterKey = PRK, context = info, and L = 32; SHA-256 is used so HashLen = 32
+func deriveKeyHKDF(masterKey []byte, context ...[]byte) []byte {
+	mac := hmac.New(sha256.New, masterKey)
+	for _, item := range context {
+		mac.Write([]byte(item))
+	}
+	mac.Write([]byte{byte(0x01)})
+	return mac.Sum(nil)
+}
+
+// isValidShamirSplit checks sss.Split constraints
+func isValidShamirSplit(total, threshold int) bool {
+	if total < 1 || total > 254 || threshold < 1 || threshold > total {
+		return false
+	}
+	return true
+}
+
+// shamirSplit is a helper wrapper for sss.Split
+func shamirSplit(secret []byte, total, threshold int) ([][]byte, error) {
+	if !isValidShamirSplit(total, threshold) {
+		return nil, common.ContextError(errors.New("invalid parameters"))
+	}
+
+	if threshold == 1 {
+		// Special case: each share is simply the secret
+		shares := make([][]byte, total)
+		for i := 0; i < total; i++ {
+			shares[i] = secret
+		}
+		return shares, nil
+	}
+
+	shareMap, err := sss.Split(byte(total), byte(threshold), secret)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	shares := make([][]byte, total)
+	for i := 0; i < total; i++ {
+		// Note: sss.Combine index starts at 1
+		shares[i] = shareMap[byte(i)+1]
+	}
+
+	return shares, nil
+}
+
+// shamirCombine is a helper wrapper for sss.Combine
+func shamirCombine(shares [][]byte) []byte {
+
+	if len(shares) == 1 {
+		// Special case: each share is simply the secret
+		return shares[0]
+	}
+
+	// Convert a sparse list into a map
+	shareMap := make(map[byte][]byte)
+	for index, share := range shares {
+		if share != nil {
+			// Note: sss.Combine index starts at 1
+			shareMap[byte(index)+1] = share
+		}
+	}
+
+	return sss.Combine(shareMap)
+}
+
+// box is a helper wrapper for secretbox.Seal.
+// A constant  nonce is used, which is secure so long as
+// each key is used to encrypt only one message.
+func box(key, plaintext []byte) ([]byte, error) {
+	if len(key) != 32 {
+		return nil, common.ContextError(errors.New("invalid key length"))
+	}
+	var nonce [24]byte
+	var secretboxKey [32]byte
+	copy(secretboxKey[:], key)
+	box := secretbox.Seal(nil, plaintext, &nonce, &secretboxKey)
+	return box, nil
+}
+
+// unbox is a helper wrapper for secretbox.Open
+func unbox(key, box []byte) ([]byte, error) {
+	if len(key) != 32 {
+		return nil, common.ContextError(errors.New("invalid key length"))
+	}
+	var nonce [24]byte
+	var secretboxKey [32]byte
+	copy(secretboxKey[:], key)
+	plaintext, ok := secretbox.Open(nil, box, &nonce, &secretboxKey)
+	if !ok {
+		return nil, common.ContextError(errors.New("unbox failed"))
+	}
+	return plaintext, nil
+}
+
+func compress(data []byte) []byte {
+	var compressedData bytes.Buffer
+	writer := zlib.NewWriter(&compressedData)
+	writer.Write(data)
+	writer.Close()
+	return compressedData.Bytes()
+}
+
+func uncompress(data []byte) ([]byte, error) {
+	reader, err := zlib.NewReader(bytes.NewReader(data))
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+	uncompressedData, err := ioutil.ReadAll(reader)
+	reader.Close()
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+	return uncompressedData, nil
+}

+ 557 - 0
psiphon/common/osl/osl_test.go

@@ -0,0 +1,557 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package osl
+
+import (
+	"encoding/base64"
+	"fmt"
+	"net"
+	"testing"
+	"time"
+
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+)
+
+func TestOSL(t *testing.T) {
+
+	configJSONTemplate := `
+{
+  "Schemes" : [
+    {
+      "Epoch" : "%s",
+
+      "Regions" : ["US", "CA"],
+
+      "PropagationChannelIDs" : ["2995DB0C968C59C4F23E87988D9C0D41", "E742C25A6D8BA8C17F37E725FA628569"],
+
+      "MasterKey" : "wFuSbqU/pJ/35vRmoM8T9ys1PgDa8uzJps1Y+FNKa5U=",
+
+      "SeedSpecs" : [
+        {
+          "Description": "spec1",
+          "ID" : "IXHWfVgWFkEKvgqsjmnJuN3FpaGuCzQMETya+DSQvsk=",
+          "UpstreamSubnets" : ["192.168.0.0/16", "172.16.0.0/12"],
+          "Targets" :
+          {
+              "BytesRead" : 1,
+              "BytesWritten" : 1,
+              "PortForwardDurationNanoseconds" : 1
+          }
+        },
+        {
+          "Description": "spec2",
+          "ID" : "qvpIcORLE2Pi5TZmqRtVkEp+OKov0MhfsYPLNV7FYtI=",
+          "UpstreamSubnets" : ["192.168.0.0/16", "10.0.0.0/8"],
+          "Targets" :
+          {
+              "BytesRead" : 10,
+              "BytesWritten" : 10,
+              "PortForwardDurationNanoseconds" : 10
+          }
+        },
+        {
+          "Description": "spec3",
+          "ID" : "ts5LInjFHbVKX+/C5/bSJqUh+cLT5kJy92TZGLvAtPU=",
+          "UpstreamSubnets" : ["100.64.0.0/10"],
+          "Targets" :
+          {
+              "BytesRead" : 100,
+              "BytesWritten" : 100,
+              "PortForwardDurationNanoseconds" : 100
+          }
+        }
+      ],
+
+      "SeedSpecThreshold" : 2,
+
+      "SeedPeriodNanoseconds" : 1000000,
+
+      "SeedPeriodKeySplits": [
+        {
+          "Total": 10,
+          "Threshold": 5
+        },
+        {
+          "Total": 10,
+          "Threshold": 5
+        }
+      ]
+    },
+    {
+      "Epoch" : "%s",
+
+      "Regions" : ["US", "CA"],
+
+      "PropagationChannelIDs" : ["36F1CF2DF1250BF0C7BA0629CE3DC657"],
+
+      "MasterKey" : "fcyQy8JSxLXHt/Iom9Qj9wMnSjrsccTiiSPEsJicet4=",
+
+      "SeedSpecs" : [
+        {
+          "Description": "spec1",
+          "ID" : "NXY0/4lqMxx5XIszIhMbwHobH/qb2Gl0Bw/OGndc1vM=",
+          "UpstreamSubnets" : ["192.168.0.0/16", "172.16.0.0/12"],
+          "Targets" :
+          {
+              "BytesRead" : 1,
+              "BytesWritten" : 1,
+              "PortForwardDurationNanoseconds" : 1
+          }
+        },
+        {
+          "Description": "spec2",
+          "ID" : "o78G6muv3idtbQKXoU05tF6gTlQj1LHmNe0eUWkZGxs=",
+          "UpstreamSubnets" : ["192.168.0.0/16", "10.0.0.0/8"],
+          "Targets" :
+          {
+              "BytesRead" : 10,
+              "BytesWritten" : 10,
+              "PortForwardDurationNanoseconds" : 10
+          }
+        },
+        {
+          "Description": "spec3",
+          "ID" : "1DlAvJYpoSEfcqMXYBV7bDEtYu3LCQO39ISD5tmi8Uo=",
+          "UpstreamSubnets" : ["100.64.0.0/10"],
+          "Targets" :
+          {
+              "BytesRead" : 0,
+              "BytesWritten" : 0,
+              "PortForwardDurationNanoseconds" : 0
+          }
+        }
+      ],
+
+      "SeedSpecThreshold" : 2,
+
+      "SeedPeriodNanoseconds" : 1000000,
+
+      "SeedPeriodKeySplits": [
+        {
+          "Total": 100,
+          "Threshold": 25
+        }
+      ]
+    }
+  ]
+}
+`
+	now := time.Now().UTC()
+	epoch := now.Truncate(1 * time.Millisecond)
+	epochStr := epoch.Format(time.RFC3339Nano)
+	configJSON := fmt.Sprintf(configJSONTemplate, epochStr, epochStr)
+
+	// The first scheme requires sufficient activity within 5/10 1 millisecond
+	// periods and 5/10 10 millisecond longer periods. The second scheme requires
+	// sufficient activity within 25/100 1 millisecond periods.
+
+	config, err := LoadConfig([]byte(configJSON))
+	if err != nil {
+		t.Fatalf("LoadConfig failed: %s", err)
+	}
+
+	t.Run("ineligible client, sufficient transfer", func(t *testing.T) {
+
+		clientSeedState := config.NewClientSeedState("US", "C5E8D2EDFD093B50D8D65CF59D0263CA", nil)
+
+		seedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("192.168.0.1"))
+
+		if seedPortForward != nil {
+			t.Fatalf("expected nil client seed port forward")
+		}
+	})
+
+	// This clientSeedState is used across multiple tests.
+	signalIssueSLOKs := make(chan struct{}, 1)
+	clientSeedState := config.NewClientSeedState("US", "2995DB0C968C59C4F23E87988D9C0D41", signalIssueSLOKs)
+
+	t.Run("eligible client, no transfer", func(t *testing.T) {
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 0 {
+			t.Fatalf("expected 0 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("eligible client, insufficient transfer", func(t *testing.T) {
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(5, 5, 5)
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 0 {
+			t.Fatalf("expected 0 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	rolloverToNextSLOKTime := func() {
+		// Rollover to the next SLOK time, so accrued data transfer will be reset.
+		now := time.Now().UTC()
+		time.Sleep(now.Add(1 * time.Millisecond).Truncate(1 * time.Millisecond).Sub(now))
+	}
+
+	t.Run("eligible client, insufficient transfer after rollover", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(5, 5, 5)
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 0 {
+			t.Fatalf("expected 0 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("eligible client, sufficient transfer, one port forward", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1"))
+
+		clientSeedPortForward.UpdateProgress(5, 5, 5)
+
+		clientSeedPortForward.UpdateProgress(5, 5, 5)
+
+		select {
+		case <-signalIssueSLOKs:
+		default:
+			t.Fatalf("expected issue SLOKs signal")
+		}
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 1 {
+			t.Fatalf("expected 1 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("eligible client, sufficient transfer, multiple port forwards", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(5, 5, 5)
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(5, 5, 5)
+
+		select {
+		case <-signalIssueSLOKs:
+		default:
+			t.Fatalf("expected issue SLOKs signal")
+		}
+
+		// Expect 2 SLOKS: 1 new, and 1 remaining in payload.
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 2 {
+			t.Fatalf("expected 2 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("eligible client, sufficient transfer multiple SLOKs", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("192.168.0.1")).UpdateProgress(5, 5, 5)
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(5, 5, 5)
+
+		select {
+		case <-signalIssueSLOKs:
+		default:
+			t.Fatalf("expected issue SLOKs signal")
+		}
+
+		// Expect 4 SLOKS: 2 new, and 2 remaining in payload.
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 4 {
+			t.Fatalf("expected 4 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("clear payload", func(t *testing.T) {
+		clientSeedState.ClearSeedPayload()
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 0 {
+			t.Fatalf("expected 0 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("no transfer required", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedState := config.NewClientSeedState("US", "36F1CF2DF1250BF0C7BA0629CE3DC657", nil)
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 1 {
+			t.Fatalf("expected 1 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	signingPublicKey, signingPrivateKey, err := common.GenerateAuthenticatedDataPackageKeys()
+	if err != nil {
+		t.Fatalf("GenerateAuthenticatedDataPackageKeys failed: %s", err)
+	}
+
+	pavedRegistries := make(map[string][]byte)
+	pavedOSLFileContents := make(map[string]map[string][]byte)
+
+	t.Run("pave OSLs", func(t *testing.T) {
+
+		// Pave sufficient OSLs to cover simulated elapsed time of all test cases.
+		endTime := epoch.Add(1000 * time.Millisecond)
+
+		// In actual deployment, paved files for each propagation channel ID
+		// are dropped in distinct distribution sites.
+		for _, propagationChannelID := range []string{
+			"2995DB0C968C59C4F23E87988D9C0D41",
+			"E742C25A6D8BA8C17F37E725FA628569",
+			"36F1CF2DF1250BF0C7BA0629CE3DC657"} {
+
+			// Dummy server entry payloads will be the OSL ID, which the following
+			// tests use to verify that the correct OSL file decrypts successfully.
+			paveServerEntries := make([]map[time.Time]string, len(config.Schemes))
+			for schemeIndex, scheme := range config.Schemes {
+
+				paveServerEntries[schemeIndex] = make(map[time.Time]string)
+
+				slokTimePeriodsPerOSL := 1
+				for _, keySplit := range scheme.SeedPeriodKeySplits {
+					slokTimePeriodsPerOSL *= keySplit.Total
+				}
+
+				oslTime := scheme.epoch
+				for oslTime.Before(endTime) {
+					firstSLOKRef := &slokReference{
+						PropagationChannelID: propagationChannelID,
+						SeedSpecID:           string(scheme.SeedSpecs[0].ID),
+						Time:                 oslTime,
+					}
+					firstSLOK := deriveSLOK(scheme, firstSLOKRef)
+					oslID := firstSLOK.ID
+					paveServerEntries[schemeIndex][oslTime] =
+						base64.StdEncoding.EncodeToString(oslID)
+
+					oslTime = oslTime.Add(
+						time.Duration(
+							int64(slokTimePeriodsPerOSL) * scheme.SeedPeriodNanoseconds))
+				}
+			}
+
+			paveFiles, err := config.Pave(
+				endTime,
+				propagationChannelID,
+				signingPublicKey,
+				signingPrivateKey,
+				paveServerEntries)
+			if err != nil {
+				t.Fatalf("Pave failed: %s", err)
+			}
+
+			// Check that the paved file name matches the name the client will look for.
+			if len(paveFiles) < 1 || paveFiles[len(paveFiles)-1].Name != GetOSLRegistryURL("") {
+				t.Fatalf("invalid registry pave file")
+			}
+
+			pavedRegistries[propagationChannelID] = paveFiles[len(paveFiles)-1].Contents
+
+			pavedOSLFileContents[propagationChannelID] = make(map[string][]byte)
+			for _, paveFile := range paveFiles[0:len(paveFiles)] {
+				pavedOSLFileContents[propagationChannelID][paveFile.Name] = paveFile.Contents
+			}
+		}
+	})
+
+	if len(pavedRegistries) != 3 {
+		// Previous subtest failed. Following tests cannot be completed, so abort.
+		t.Fatalf("pave failed")
+	}
+
+	// To ensure SLOKs are issued at precise time periods, the following tests
+	// bypass ClientSeedState and derive SLOKs directly.
+
+	expandRanges := func(ranges ...[2]int) []int {
+		a := make([]int, 0)
+		for _, r := range ranges {
+			for n := r[0]; n <= r[1]; n++ {
+				a = append(a, n)
+			}
+		}
+		return a
+	}
+
+	singleSplitPropagationChannelID := "36F1CF2DF1250BF0C7BA0629CE3DC657"
+	singleSplitScheme := config.Schemes[1]
+
+	doubleSplitPropagationChannelID := "2995DB0C968C59C4F23E87988D9C0D41"
+	doubleSplitScheme := config.Schemes[0]
+
+	keySplitTestCases := []struct {
+		description              string
+		propagationChannelID     string
+		scheme                   *Scheme
+		issueSLOKTimePeriods     []int
+		issueSLOKSeedSpecIndexes []int
+		expectedOSLCount         int
+	}{
+		{
+			"single split scheme: insufficient SLOK periods",
+			singleSplitPropagationChannelID,
+			singleSplitScheme,
+			expandRanges([2]int{0, 23}),
+			[]int{0, 1},
+			0,
+		},
+		{
+			"single split scheme: insufficient SLOK seed specs",
+			singleSplitPropagationChannelID,
+			singleSplitScheme,
+			expandRanges([2]int{0, 23}),
+			[]int{0},
+			0,
+		},
+		{
+			"single split scheme: sufficient SLOKs",
+			singleSplitPropagationChannelID,
+			singleSplitScheme,
+			expandRanges([2]int{0, 24}),
+			[]int{0, 1},
+			1,
+		},
+		{
+			"single split scheme: sufficient SLOKs (alternative seed specs)",
+			singleSplitPropagationChannelID,
+			singleSplitScheme,
+			expandRanges([2]int{0, 24}),
+			[]int{1, 2},
+			1,
+		},
+		{
+			"single split scheme: more than sufficient SLOKs",
+			singleSplitPropagationChannelID,
+			singleSplitScheme,
+			expandRanges([2]int{0, 49}),
+			[]int{0, 1},
+			1,
+		},
+		{
+			"double split scheme: insufficient SLOK periods",
+			doubleSplitPropagationChannelID,
+			doubleSplitScheme,
+			expandRanges([2]int{0, 4}, [2]int{10, 14}, [2]int{20, 24}, [2]int{30, 34}, [2]int{40, 43}),
+			[]int{0, 1},
+			0,
+		},
+		{
+			"double split scheme: insufficient SLOK period spread",
+			doubleSplitPropagationChannelID,
+			doubleSplitScheme,
+			expandRanges([2]int{0, 25}),
+			[]int{0, 1},
+			0,
+		},
+		{
+			"double split scheme: insufficient SLOK seed specs",
+			doubleSplitPropagationChannelID,
+			doubleSplitScheme,
+			expandRanges([2]int{0, 4}, [2]int{10, 14}, [2]int{20, 24}, [2]int{30, 34}, [2]int{40, 44}),
+			[]int{0},
+			0,
+		},
+		{
+			"double split scheme: sufficient SLOKs",
+			doubleSplitPropagationChannelID,
+			doubleSplitScheme,
+			expandRanges([2]int{0, 4}, [2]int{10, 14}, [2]int{20, 24}, [2]int{30, 34}, [2]int{40, 44}),
+			[]int{0, 1},
+			1,
+		},
+		{
+			"double split scheme: sufficient SLOKs (alternative seed specs)",
+			doubleSplitPropagationChannelID,
+			doubleSplitScheme,
+			expandRanges([2]int{0, 4}, [2]int{10, 14}, [2]int{20, 24}, [2]int{30, 34}, [2]int{40, 44}),
+			[]int{1, 2},
+			1,
+		},
+	}
+
+	for _, testCase := range keySplitTestCases {
+		t.Run(testCase.description, func(t *testing.T) {
+
+			slokMap := make(map[string][]byte)
+
+			for _, timePeriod := range testCase.issueSLOKTimePeriods {
+				for _, seedSpecIndex := range testCase.issueSLOKSeedSpecIndexes {
+
+					slok := deriveSLOK(
+						testCase.scheme,
+						&slokReference{
+							PropagationChannelID: testCase.propagationChannelID,
+							SeedSpecID:           string(testCase.scheme.SeedSpecs[seedSpecIndex].ID),
+							Time:                 epoch.Add(time.Duration(timePeriod) * time.Millisecond),
+						})
+
+					slokMap[string(slok.ID)] = slok.Key
+
+				}
+			}
+
+			t.Logf("SLOK count: %d", len(slokMap))
+
+			slokLookup := func(slokID []byte) []byte {
+				return slokMap[string(slokID)]
+			}
+
+			checkRegistryStartTime := time.Now()
+
+			registry, _, err := UnpackRegistry(
+				pavedRegistries[testCase.propagationChannelID], signingPublicKey)
+			if err != nil {
+				t.Fatalf("UnpackRegistry failed: %s", err)
+			}
+
+			t.Logf("registry size: %d", len(pavedRegistries[testCase.propagationChannelID]))
+			t.Logf("registry OSL count: %d", len(registry.FileSpecs))
+
+			oslIDs := registry.GetSeededOSLIDs(
+				slokLookup,
+				func(err error) {
+					// Actual client will treat errors as warnings.
+					t.Fatalf("GetSeededOSLIDs failed: %s", err)
+				})
+
+			t.Logf("check registry elapsed time: %s", time.Since(checkRegistryStartTime))
+
+			if len(oslIDs) != testCase.expectedOSLCount {
+				t.Fatalf("expected %d OSLs got %d", testCase.expectedOSLCount, len(oslIDs))
+			}
+
+			for _, oslID := range oslIDs {
+				oslFileContents, ok :=
+					pavedOSLFileContents[testCase.propagationChannelID][GetOSLFileURL("", oslID)]
+				if !ok {
+					t.Fatalf("unknown OSL file name")
+				}
+
+				plaintextOSL, err := registry.UnpackOSL(
+					slokLookup, oslID, oslFileContents, signingPublicKey)
+				if err != nil {
+					t.Fatalf("DecryptOSL failed: %s", err)
+				}
+
+				// The decrypted OSL should contain its own ID.
+				if plaintextOSL != base64.StdEncoding.EncodeToString(oslID) {
+					t.Fatalf("unexpected OSL file contents")
+				}
+			}
+		})
+	}
+}

+ 20 - 1
psiphon/common/protocol.go → psiphon/common/protocol/protocol.go

@@ -17,7 +17,11 @@
  *
  */
 
-package common
+package protocol
+
+import (
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl"
+)
 
 const (
 	TUNNEL_PROTOCOL_SSH                  = "SSH"
@@ -35,10 +39,13 @@ const (
 	CAPABILITY_SSH_API_REQUESTS            = "ssh-api-requests"
 	CAPABILITY_UNTUNNELED_WEB_API_REQUESTS = "handshake"
 
+	CLIENT_CAPABILITY_SERVER_REQUESTS = "server-requests"
+
 	PSIPHON_API_HANDSHAKE_REQUEST_NAME           = "psiphon-handshake"
 	PSIPHON_API_CONNECTED_REQUEST_NAME           = "psiphon-connected"
 	PSIPHON_API_STATUS_REQUEST_NAME              = "psiphon-status"
 	PSIPHON_API_CLIENT_VERIFICATION_REQUEST_NAME = "psiphon-client-verification"
+	PSIPHON_API_OSL_REQUEST_NAME                 = "psiphon-osl"
 
 	PSIPHON_API_CLIENT_SESSION_ID_LENGTH = 16
 
@@ -81,6 +88,7 @@ func TunnelProtocolUsesMeekHTTPS(protocol string) bool {
 }
 
 type HandshakeResponse struct {
+	SSHSessionID         string              `json:"ssh_session_id"`
 	Homepages            []string            `json:"homepages"`
 	UpgradeClientVersion string              `json:"upgrade_client_version"`
 	PageViewRegexes      []map[string]string `json:"page_view_regexes"`
@@ -93,3 +101,14 @@ type HandshakeResponse struct {
 type ConnectedResponse struct {
 	ConnectedTimestamp string `json:"connected_timestamp"`
 }
+
+type OSLRequest struct {
+	ClearLocalSLOKs bool             `json:"clear_local_sloks"`
+	SeedPayload     *osl.SeedPayload `json:"seed_payload"`
+}
+
+type SSHPasswordPayload struct {
+	SessionId          string   `json:"SessionId"`
+	SshPassword        string   `json:"SshPassword"`
+	ClientCapabilities []string `json:"ClientCapabilities"`
+}

+ 6 - 8
psiphon/serverEntry.go → psiphon/common/protocol/serverEntry.go

@@ -17,7 +17,7 @@
  *
  */
 
-package psiphon
+package protocol
 
 import (
 	"bytes"
@@ -82,7 +82,7 @@ func (serverEntry *ServerEntry) SupportsProtocol(protocol string) bool {
 // by the ServerEntry's capabilities.
 func (serverEntry *ServerEntry) GetSupportedProtocols() []string {
 	supportedProtocols := make([]string, 0)
-	for _, protocol := range common.SupportedTunnelProtocols {
+	for _, protocol := range SupportedTunnelProtocols {
 		if serverEntry.SupportsProtocol(protocol) {
 			supportedProtocols = append(supportedProtocols, protocol)
 		}
@@ -114,16 +114,16 @@ func (serverEntry *ServerEntry) DisableImpairedProtocols(impairedProtocols []str
 // SupportsSSHAPIRequests returns true when the server supports
 // SSH API requests.
 func (serverEntry *ServerEntry) SupportsSSHAPIRequests() bool {
-	return common.Contains(serverEntry.Capabilities, common.CAPABILITY_SSH_API_REQUESTS)
+	return common.Contains(serverEntry.Capabilities, CAPABILITY_SSH_API_REQUESTS)
 }
 
 func (serverEntry *ServerEntry) GetUntunneledWebRequestPorts() []string {
 	ports := make([]string, 0)
-	if common.Contains(serverEntry.Capabilities, common.CAPABILITY_UNTUNNELED_WEB_API_REQUESTS) {
+	if common.Contains(serverEntry.Capabilities, CAPABILITY_UNTUNNELED_WEB_API_REQUESTS) {
 		// Server-side configuration quirk: there's a port forward from
 		// port 443 to the web server, which we can try, except on servers
 		// running FRONTED_MEEK, which listens on port 443.
-		if !serverEntry.SupportsProtocol(common.TUNNEL_PROTOCOL_FRONTED_MEEK) {
+		if !serverEntry.SupportsProtocol(TUNNEL_PROTOCOL_FRONTED_MEEK) {
 			ports = append(ports, "443")
 		}
 		ports = append(ports, serverEntry.WebServerPort)
@@ -195,9 +195,6 @@ func ValidateServerEntry(serverEntry *ServerEntry) error {
 	ipAddr := net.ParseIP(serverEntry.IpAddress)
 	if ipAddr == nil {
 		errMsg := fmt.Sprintf("server entry has invalid IpAddress: '%s'", serverEntry.IpAddress)
-		// Some callers skip invalid server entries without propagating
-		// the error mesage, so issue a notice.
-		NoticeAlert(errMsg)
 		return common.ContextError(errors.New(errMsg))
 	}
 	return nil
@@ -225,6 +222,7 @@ func DecodeAndValidateServerEntryList(
 
 		if ValidateServerEntry(serverEntry) != nil {
 			// Skip this entry and continue with the next one
+			// TODO: invoke a logging callback
 			continue
 		}
 

+ 3 - 3
psiphon/serverEntry_test.go → psiphon/common/protocol/serverEntry_test.go

@@ -17,7 +17,7 @@
  *
  */
 
-package psiphon
+package protocol
 
 import (
 	"encoding/hex"
@@ -43,7 +43,7 @@ func TestDecodeAndValidateServerEntryList(t *testing.T) {
 		hex.EncodeToString([]byte(_INVALID_MALFORMED_IP_ADDRESS_SERVER_ENTRY))
 
 	serverEntries, err := DecodeAndValidateServerEntryList(
-		testEncodedServerEntryList, common.GetCurrentTimestamp(), common.SERVER_ENTRY_SOURCE_EMBEDDED)
+		testEncodedServerEntryList, common.GetCurrentTimestamp(), SERVER_ENTRY_SOURCE_EMBEDDED)
 	if err != nil {
 		t.Error(err.Error())
 		t.FailNow()
@@ -66,7 +66,7 @@ func TestInvalidServerEntries(t *testing.T) {
 	for _, testCase := range testCases {
 		encodedServerEntry := hex.EncodeToString([]byte(testCase))
 		serverEntry, err := DecodeServerEntry(
-			encodedServerEntry, common.GetCurrentTimestamp(), common.SERVER_ENTRY_SOURCE_EMBEDDED)
+			encodedServerEntry, common.GetCurrentTimestamp(), SERVER_ENTRY_SOURCE_EMBEDDED)
 		if err != nil {
 			t.Error(err.Error())
 		}

+ 35 - 38
psiphon/common/reloader.go

@@ -20,33 +20,11 @@
 package common
 
 import (
-	"os"
+	"hash/crc64"
+	"io/ioutil"
 	"sync"
 )
 
-// IsFileChanged uses os.Stat to check if the name, size, or last mod time of the
-// file has changed (which is a heuristic, but sufficiently robust for users of this
-// function). Returns nil if file has not changed; otherwise, returns a changed
-// os.FileInfo which may be used to check for subsequent changes.
-func IsFileChanged(path string, previousFileInfo os.FileInfo) (os.FileInfo, error) {
-
-	fileInfo, err := os.Stat(path)
-	if err != nil {
-		return nil, ContextError(err)
-	}
-
-	changed := previousFileInfo == nil ||
-		fileInfo.Name() != previousFileInfo.Name() ||
-		fileInfo.Size() != previousFileInfo.Size() ||
-		fileInfo.ModTime() != previousFileInfo.ModTime()
-
-	if !changed {
-		return nil, nil
-	}
-
-	return fileInfo, nil
-}
-
 // Reloader represents a read-only, in-memory reloadable data object. For example,
 // a JSON data file that is loaded into memory and accessed for read-only lookups;
 // and from time to time may be reloaded from the same file, updating the memory
@@ -71,10 +49,11 @@ type Reloader interface {
 // in other types that add the actual reloadable data structures.
 //
 // ReloadableFile has a multi-reader mutex for synchronization. Its Reload() function
-// will obtain a write lock before reloading the data structures. Actually reloading
-// action is to be provided via the reloadAction callback (for example, read the contents
-// of the file and unmarshall the contents into data structures). All read access to
-// the data structures should be guarded by RLocks on the ReloadableFile mutex.
+// will obtain a write lock before reloading the data structures. The actual reloading
+// action is to be provided via the reloadAction callback, which receives the content
+// of reloaded files and must process the new data (for example, unmarshall the contents
+// into data structures). All read access to the data structures should be guarded by
+// RLocks on the ReloadableFile mutex.
 //
 // reloadAction must ensure that data structures revert to their previous state when
 // a reload fails.
@@ -82,14 +61,14 @@ type Reloader interface {
 type ReloadableFile struct {
 	sync.RWMutex
 	fileName     string
-	fileInfo     os.FileInfo
-	reloadAction func(string) error
+	checksum     uint64
+	reloadAction func([]byte) error
 }
 
 // NewReloadableFile initializes a new ReloadableFile
 func NewReloadableFile(
 	fileName string,
-	reloadAction func(string) error) ReloadableFile {
+	reloadAction func([]byte) error) ReloadableFile {
 
 	return ReloadableFile{
 		fileName:     fileName,
@@ -103,10 +82,23 @@ func (reloadable *ReloadableFile) WillReload() bool {
 	return reloadable.fileName != ""
 }
 
-// Reload checks if the underlying file has changed (using IsFileChanged semantics, which
-// are heuristics) and, when changed, invokes the reloadAction callback which should
-// reload, from the file, the in-memory data structures.
+var crc64table = crc64.MakeTable(crc64.ISO)
+
+// Reload checks if the underlying file has changed and, when changed, invokes
+// the reloadAction callback which should reload the in-memory data structures.
+//
+// In some case (e.g., traffic rules and OSL), there are penalties associated
+// with proceeding with reload, so care is taken to not invoke the reload action
+// unless the contents have changed.
+//
+// The file content is loaded and a checksum is taken to determine whether it
+// has changed. Neither file size (may not change when content changes) nor
+// modified date (may change when identical file is repaved) is a sufficient
+// indicator.
+//
 // All data structure readers should be blocked by the ReloadableFile mutex.
+//
+// Reload must not be called from multiple concurrent goroutines.
 func (reloadable *ReloadableFile) Reload() (bool, error) {
 
 	if !reloadable.WillReload() {
@@ -116,13 +108,18 @@ func (reloadable *ReloadableFile) Reload() (bool, error) {
 	// Check whether the file has changed _before_ blocking readers
 
 	reloadable.RLock()
-	changedFileInfo, err := IsFileChanged(reloadable.fileName, reloadable.fileInfo)
+	fileName := reloadable.fileName
+	previousChecksum := reloadable.checksum
 	reloadable.RUnlock()
+
+	content, err := ioutil.ReadFile(fileName)
 	if err != nil {
 		return false, ContextError(err)
 	}
 
-	if changedFileInfo == nil {
+	checksum := crc64.Checksum(content, crc64table)
+
+	if checksum == previousChecksum {
 		return false, nil
 	}
 
@@ -131,12 +128,12 @@ func (reloadable *ReloadableFile) Reload() (bool, error) {
 	reloadable.Lock()
 	defer reloadable.Unlock()
 
-	err = reloadable.reloadAction(reloadable.fileName)
+	err = reloadable.reloadAction(content)
 	if err != nil {
 		return false, ContextError(err)
 	}
 
-	reloadable.fileInfo = changedFileInfo
+	reloadable.checksum = checksum
 
 	return true, nil
 }

+ 12 - 26
psiphon/common/reloader_test.go

@@ -23,13 +23,20 @@ import (
 	"bytes"
 	"io/ioutil"
 	"os"
+	"path/filepath"
 	"testing"
-	"time"
 )
 
 func TestReloader(t *testing.T) {
 
-	fileName := "reloader_test.dat"
+	dirname, err := ioutil.TempDir("", "psiphon-reloader-test")
+	if err != nil {
+		t.Fatalf("TempDir failed: %s", err)
+	}
+	defer os.RemoveAll(dirname)
+
+	fileName := filepath.Join(dirname, "reloader_test.dat")
+
 	initialContents := []byte("contents1\n")
 	modifiedContents := []byte("contents2\n")
 
@@ -40,29 +47,18 @@ func TestReloader(t *testing.T) {
 
 	file.ReloadableFile = NewReloadableFile(
 		fileName,
-		func(filename string) error {
-			contents, err := ioutil.ReadFile(filename)
-			if err != nil {
-				return err
-			}
-			file.contents = contents
+		func(fileContent []byte) error {
+			file.contents = fileContent
 			return nil
 		})
 
 	// Test: initial load
 
-	err := ioutil.WriteFile(fileName, initialContents, 0600)
+	err = ioutil.WriteFile(fileName, initialContents, 0600)
 	if err != nil {
 		t.Fatalf("WriteFile failed: %s", err)
 	}
 
-	time.Sleep(2 * time.Second)
-	fileInfo, err := os.Stat(fileName)
-	if err != nil {
-		t.Fatalf("Stat failed: %s", err)
-	}
-	t.Logf("ModTime: %s", fileInfo.ModTime())
-
 	reloaded, err := file.Reload()
 	if err != nil {
 		t.Fatalf("Reload failed: %s", err)
@@ -98,16 +94,6 @@ func TestReloader(t *testing.T) {
 		t.Fatalf("WriteFile failed: %s", err)
 	}
 
-	// TODO: without the sleeps, the os.Stat ModTime doesn't
-	// change and IsFileChanged fails to detect the modification.
-
-	time.Sleep(2 * time.Second)
-	fileInfo, err = os.Stat(fileName)
-	if err != nil {
-		t.Fatalf("Stat failed: %s", err)
-	}
-	t.Logf("ModTime: %s", fileInfo.ModTime())
-
 	reloaded, err = file.Reload()
 	if err != nil {
 		t.Fatalf("Reload failed: %s", err)

+ 152 - 0
psiphon/common/subnet.go

@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package common
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"net"
+	"sort"
+	"strings"
+)
+
+// SubnetLookup provides an efficient lookup for individual
+// IP addresses within a list of subnets.
+type SubnetLookup []net.IPNet
+
+// NewSubnetLookup creates a SubnetLookup from a list of
+// subnet CIDRs.
+func NewSubnetLookup(CIDRs []string) (SubnetLookup, error) {
+
+	subnets := make([]net.IPNet, len(CIDRs))
+
+	for i, CIDR := range CIDRs {
+		_, network, err := net.ParseCIDR(CIDR)
+		if err != nil {
+			return nil, ContextError(err)
+		}
+		subnets[i] = *network
+	}
+
+	lookup := SubnetLookup(subnets)
+	sort.Sort(lookup)
+
+	return lookup, nil
+}
+
+// NewSubnetLookup creates a SubnetLookup from text routes data.
+// The input format is expected to be text lines where each line
+// is, e.g., "1.2.3.0\t255.255.255.0\n"
+func NewSubnetLookupFromRoutes(routesData []byte) (SubnetLookup, error) {
+
+	// Parse text routes data
+	var subnets []net.IPNet
+	scanner := bufio.NewScanner(bytes.NewReader(routesData))
+	scanner.Split(bufio.ScanLines)
+	for scanner.Scan() {
+		s := strings.Split(scanner.Text(), "\t")
+		if len(s) != 2 {
+			continue
+		}
+
+		ip := parseIPv4(s[0])
+		mask := parseIPv4Mask(s[1])
+		if ip == nil || mask == nil {
+			continue
+		}
+
+		subnets = append(subnets, net.IPNet{IP: ip.Mask(mask), Mask: mask})
+	}
+	if len(subnets) == 0 {
+		return nil, ContextError(errors.New("Routes data contains no networks"))
+	}
+
+	lookup := SubnetLookup(subnets)
+	sort.Sort(lookup)
+
+	return lookup, nil
+}
+
+func parseIPv4(s string) net.IP {
+	ip := net.ParseIP(s)
+	if ip == nil {
+		return nil
+	}
+	return ip.To4()
+}
+
+func parseIPv4Mask(s string) net.IPMask {
+	ip := parseIPv4(s)
+	if ip == nil {
+		return nil
+	}
+	mask := net.IPMask(ip)
+	if bits, size := mask.Size(); bits == 0 || size == 0 {
+		return nil
+	}
+	return mask
+}
+
+// Len implements Sort.Interface
+func (lookup SubnetLookup) Len() int {
+	return len(lookup)
+}
+
+// Swap implements Sort.Interface
+func (lookup SubnetLookup) Swap(i, j int) {
+	lookup[i], lookup[j] = lookup[j], lookup[i]
+}
+
+// Less implements Sort.Interface
+func (lookup SubnetLookup) Less(i, j int) bool {
+	return binary.BigEndian.Uint32(lookup[i].IP) < binary.BigEndian.Uint32(lookup[j].IP)
+}
+
+// ContainsIPAddress performs a binary search on the sorted subnet
+// list to find a network containing the candidate IP address.
+func (lookup SubnetLookup) ContainsIPAddress(addr net.IP) bool {
+
+	// Search criteria
+	//
+	// The following conditions are satisfied when address_IP is in the network:
+	// 1. address_IP ^ network_mask == network_IP ^ network_mask
+	// 2. address_IP >= network_IP.
+	// We are also assuming that network ranges do not overlap.
+	//
+	// For an ascending array of networks, the sort.Search returns the smallest
+	// index idx for which condition network_IP > address_IP is satisfied, so we
+	// are checking whether or not adrress_IP belongs to the network[idx-1].
+
+	// Edge conditions check
+	//
+	// idx == 0 means that address_IP is lesser than the first (smallest) network_IP
+	// thus never satisfies search condition 2.
+	// idx == array_length means that address_IP is larger than the last (largest)
+	// network_IP so we need to check the last element for condition 1.
+
+	addrValue := binary.BigEndian.Uint32(addr.To4())
+	index := sort.Search(len(lookup), func(i int) bool {
+		networkValue := binary.BigEndian.Uint32(lookup[i].IP)
+		return networkValue > addrValue
+	})
+	return index > 0 && lookup[index-1].IP.Equal(addr.Mask(lookup[index-1].Mask))
+}

+ 126 - 0
psiphon/common/subnet_test.go

@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package common
+
+import (
+	"encoding/binary"
+	"io/ioutil"
+	"math/rand"
+	"net"
+	"testing"
+)
+
+func TestSubnetLookup(t *testing.T) {
+	CIDRs := []string{
+		"192.168.0.0/16",
+		"10.0.0.0/8",
+		"172.16.0.0/12",
+		"100.64.0.0/10"}
+
+	routes := []byte("192.168.0.0\t255.255.0.0\n10.0.0.0\t255.0.0.0\n" +
+		"172.16.0.0\t255.240.0.0\n100.64.0.0\t255.192.0.0\n")
+
+	var subnetLookup SubnetLookup
+
+	t.Run("new subnet lookup", func(t *testing.T) {
+
+		var err error
+		subnetLookup, err = NewSubnetLookup(CIDRs)
+		if err != nil {
+			t.Fatalf("NewSubnetLookup failed: %s", err)
+		}
+	})
+
+	var subnetLookupRoutes SubnetLookup
+
+	t.Run("new subnet lookup (routes case)", func(t *testing.T) {
+
+		var err error
+		subnetLookupRoutes, err = NewSubnetLookupFromRoutes(routes)
+		if err != nil {
+			t.Fatalf("NewSubnetLookupFromRoutes failed: %s", err)
+		}
+	})
+
+	if subnetLookup == nil || subnetLookupRoutes == nil {
+		t.Fatalf("new subnet list failed")
+	}
+
+	testCases := []struct {
+		description    string
+		ipAddress      net.IP
+		expectedResult bool
+	}{
+		{"IP address in subnet", net.ParseIP("172.17.3.2"), true},
+		{"IP address not in subnet", net.ParseIP("169.254.1.1"), false},
+		{"IP address not in subnet (prefix case)", net.ParseIP("172.15.3.2"), false},
+	}
+
+	for _, testCase := range testCases {
+		t.Run(testCase.description, func(t *testing.T) {
+
+			result := subnetLookup.ContainsIPAddress(testCase.ipAddress)
+			if result != testCase.expectedResult {
+				t.Fatalf(
+					"ContainsIPAddress returned %+v expected %+v",
+					result, testCase.expectedResult)
+			}
+
+			result = subnetLookupRoutes.ContainsIPAddress(testCase.ipAddress)
+			if result != testCase.expectedResult {
+				t.Fatalf(
+					"ContainsIPAddress (routes case) returned %+v expected %+v",
+					result, testCase.expectedResult)
+			}
+		})
+	}
+}
+
+func BenchmarkSubnetLookup(b *testing.B) {
+
+	var subnetLookup SubnetLookup
+
+	b.Run("load routes file", func(b *testing.B) {
+
+		routesData, err := ioutil.ReadFile("test_routes.dat")
+		if err != nil {
+			b.Skipf("can't load test routes file: %s", err)
+		}
+
+		for n := 0; n < b.N; n++ {
+			subnetLookup, err = NewSubnetLookupFromRoutes(routesData)
+			if err != nil {
+				b.Fatalf("NewSubnetLookup failed: %s", err)
+			}
+		}
+	})
+
+	if subnetLookup == nil {
+		b.Skipf("no test routes file")
+	}
+
+	b.Run("lookup random IP address", func(b *testing.B) {
+		for n := 0; n < b.N; n++ {
+			ip := make([]byte, 4)
+			binary.BigEndian.PutUint32(ip, rand.Uint32())
+			_ = subnetLookup.ContainsIPAddress(net.IP(ip))
+		}
+	})
+}

+ 47 - 6
psiphon/config.go

@@ -29,6 +29,7 @@ import (
 	"time"
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 // TODO: allow all params to be configured
@@ -66,7 +67,7 @@ const (
 	PSIPHON_API_STATUS_REQUEST_PADDING_MAX_BYTES         = 256
 	PSIPHON_API_CONNECTED_REQUEST_PERIOD                 = 24 * time.Hour
 	PSIPHON_API_CONNECTED_REQUEST_RETRY_PERIOD           = 5 * time.Second
-	PSIPHON_API_TUNNEL_STATS_MAX_COUNT                   = 100
+	PSIPHON_API_PERSISTENT_STATS_MAX_COUNT               = 100
 	PSIPHON_API_CLIENT_VERIFICATION_REQUEST_RETRY_PERIOD = 5 * time.Second
 	PSIPHON_API_CLIENT_VERIFICATION_REQUEST_MAX_RETRIES  = 10
 	FETCH_ROUTES_TIMEOUT_SECONDS                         = 60
@@ -122,9 +123,7 @@ type Config struct {
 	// RemoteServerListDownloadFilename specifies a target filename for
 	// storing the remote server list download. Data is stored in co-located
 	// files (RemoteServerListDownloadFilename.part*) to allow for resumable
-	// downloading. If not specified, the default is to use the
-	// remote object name as the filename, stored in the current working
-	// directory.
+	// downloading.
 	RemoteServerListDownloadFilename string
 
 	// RemoteServerListSignaturePublicKey specifies a public key that's
@@ -133,6 +132,18 @@ type Config struct {
 	// typically embedded in the client binary.
 	RemoteServerListSignaturePublicKey string
 
+	// ObfuscatedServerListRootURL is a URL which specifies the root location
+	// from which to fetch obfuscated server list files.
+	// This value is supplied by and depends on the Psiphon Network, and is
+	// typically embedded in the client binary.
+	ObfuscatedServerListRootURL string
+
+	// ObfuscatedServerListDownloadDirectory specifies a target directory for
+	// storing the obfuscated remote server list downloads. Data is stored in
+	// co-located files (<OSL filename>.part*) to allow for resumable
+	// downloading.
+	ObfuscatedServerListDownloadDirectory string
+
 	// ClientVersion is the client version number that the client reports
 	// to the server. The version number refers to the host client application,
 	// not the core tunnel library. One purpose of this value is to enable
@@ -393,6 +404,11 @@ type Config struct {
 
 	// RateLimits specify throttling configuration for the tunnel.
 	RateLimits common.RateLimits
+
+	// EmitSLOKs indicates whether to emit notices for each seeded SLOK. As this
+	// could reveal user browsing activity, it's intended for debugging and testing
+	// only.
+	EmitSLOKs bool
 }
 
 // LoadConfig parses and validates a JSON format Psiphon config JSON
@@ -437,7 +453,7 @@ func LoadConfig(configJson []byte) (*Config, error) {
 	}
 
 	if config.TunnelProtocol != "" {
-		if !common.Contains(common.SupportedTunnelProtocols, config.TunnelProtocol) {
+		if !common.Contains(protocol.SupportedTunnelProtocols, config.TunnelProtocol) {
 			return nil, common.ContextError(
 				errors.New("invalid tunnel protocol"))
 		}
@@ -477,7 +493,7 @@ func LoadConfig(configJson []byte) (*Config, error) {
 	}
 
 	if !common.Contains(
-		[]string{"", common.PSIPHON_SSH_API_PROTOCOL, common.PSIPHON_WEB_API_PROTOCOL},
+		[]string{"", protocol.PSIPHON_SSH_API_PROTOCOL, protocol.PSIPHON_WEB_API_PROTOCOL},
 		config.TargetApiProtocol) {
 
 		return nil, common.ContextError(
@@ -490,6 +506,31 @@ func LoadConfig(configJson []byte) (*Config, error) {
 			"UpgradeDownloadUrl requires UpgradeDownloadClientVersionHeader and UpgradeDownloadFilename"))
 	}
 
+	if !config.DisableRemoteServerListFetcher {
+
+		if config.RemoteServerListUrl != "" {
+
+			if config.RemoteServerListSignaturePublicKey == "" {
+				return nil, common.ContextError(errors.New("missing RemoteServerListSignaturePublicKey"))
+			}
+
+			if config.RemoteServerListDownloadFilename == "" {
+				return nil, common.ContextError(errors.New("missing RemoteServerListDownloadFilename"))
+			}
+		}
+
+		if config.ObfuscatedServerListRootURL != "" {
+
+			if config.RemoteServerListSignaturePublicKey == "" {
+				return nil, common.ContextError(errors.New("missing RemoteServerListSignaturePublicKey"))
+			}
+
+			if config.ObfuscatedServerListDownloadDirectory == "" {
+				return nil, common.ContextError(errors.New("missing ObfuscatedServerListDownloadDirectory"))
+			}
+		}
+	}
+
 	if config.TunnelConnectTimeoutSeconds == nil {
 		defaultTunnelConnectTimeoutSeconds := TUNNEL_CONNECT_TIMEOUT_SECONDS
 		config.TunnelConnectTimeoutSeconds = &defaultTunnelConnectTimeoutSeconds

+ 97 - 56
psiphon/controller.go

@@ -32,42 +32,44 @@ import (
 
 	"github.com/Psiphon-Inc/goarista/monotime"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 // Controller is a tunnel lifecycle coordinator. It manages lists of servers to
 // connect to; establishes and monitors tunnels; and runs local proxies which
 // route traffic through the tunnels.
 type Controller struct {
-	config                         *Config
-	sessionId                      string
-	componentFailureSignal         chan struct{}
-	shutdownBroadcast              chan struct{}
-	runWaitGroup                   *sync.WaitGroup
-	establishedTunnels             chan *Tunnel
-	failedTunnels                  chan *Tunnel
-	tunnelMutex                    sync.Mutex
-	establishedOnce                bool
-	tunnels                        []*Tunnel
-	nextTunnel                     int
-	startedConnectedReporter       bool
-	isEstablishing                 bool
-	establishWaitGroup             *sync.WaitGroup
-	stopEstablishingBroadcast      chan struct{}
-	candidateServerEntries         chan *candidateServerEntry
-	establishPendingConns          *common.Conns
-	untunneledPendingConns         *common.Conns
-	untunneledDialConfig           *DialConfig
-	splitTunnelClassifier          *SplitTunnelClassifier
-	signalFetchRemoteServerList    chan struct{}
-	signalDownloadUpgrade          chan string
-	impairedProtocolClassification map[string]int
-	signalReportConnected          chan struct{}
-	serverAffinityDoneBroadcast    chan struct{}
-	newClientVerificationPayload   chan string
+	config                            *Config
+	sessionId                         string
+	componentFailureSignal            chan struct{}
+	shutdownBroadcast                 chan struct{}
+	runWaitGroup                      *sync.WaitGroup
+	establishedTunnels                chan *Tunnel
+	failedTunnels                     chan *Tunnel
+	tunnelMutex                       sync.Mutex
+	establishedOnce                   bool
+	tunnels                           []*Tunnel
+	nextTunnel                        int
+	startedConnectedReporter          bool
+	isEstablishing                    bool
+	establishWaitGroup                *sync.WaitGroup
+	stopEstablishingBroadcast         chan struct{}
+	candidateServerEntries            chan *candidateServerEntry
+	establishPendingConns             *common.Conns
+	untunneledPendingConns            *common.Conns
+	untunneledDialConfig              *DialConfig
+	splitTunnelClassifier             *SplitTunnelClassifier
+	signalFetchCommonRemoteServerList chan struct{}
+	signalFetchObfuscatedServerLists  chan struct{}
+	signalDownloadUpgrade             chan string
+	impairedProtocolClassification    map[string]int
+	signalReportConnected             chan struct{}
+	serverAffinityDoneBroadcast       chan struct{}
+	newClientVerificationPayload      chan string
 }
 
 type candidateServerEntry struct {
-	serverEntry                *ServerEntry
+	serverEntry                *protocol.ServerEntry
 	isServerAffinityCandidate  bool
 	adjustedEstablishStartTime monotime.Time
 }
@@ -132,9 +134,10 @@ func NewController(config *Config) (controller *Controller, err error) {
 		// TODO: Add a buffer of 1 so we don't miss a signal while receiver is
 		// starting? Trade-off is potential back-to-back fetch remotes. As-is,
 		// establish will eventually signal another fetch remote.
-		signalFetchRemoteServerList: make(chan struct{}),
-		signalDownloadUpgrade:       make(chan string),
-		signalReportConnected:       make(chan struct{}),
+		signalFetchCommonRemoteServerList: make(chan struct{}),
+		signalFetchObfuscatedServerLists:  make(chan struct{}),
+		signalDownloadUpgrade:             make(chan string),
+		signalReportConnected:             make(chan struct{}),
 		// Buffer allows SetClientVerificationPayloadForActiveTunnels to submit one
 		// new payload without blocking or dropping it.
 		newClientVerificationPayload: make(chan string, 1),
@@ -155,11 +158,12 @@ func NewController(config *Config) (controller *Controller, err error) {
 // - a local SOCKS proxy that port forwards through the pool of tunnels
 // - a local HTTP proxy that port forwards through the pool of tunnels
 func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
+
 	ReportAvailableRegions()
 
 	// Start components
 
-	listenIP, err := GetInterfaceIPAddress(controller.config.ListenInterface)
+	listenIP, err := common.GetInterfaceIPAddress(controller.config.ListenInterface)
 	if err != nil {
 		NoticeError("error getting listener IP: %s", err)
 		return
@@ -181,8 +185,29 @@ func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
 	defer httpProxy.Close()
 
 	if !controller.config.DisableRemoteServerListFetcher {
-		controller.runWaitGroup.Add(1)
-		go controller.remoteServerListFetcher()
+
+		retryPeriod := time.Duration(
+			*controller.config.FetchRemoteServerListRetryPeriodSeconds) * time.Second
+
+		if controller.config.RemoteServerListUrl != "" {
+			controller.runWaitGroup.Add(1)
+			go controller.remoteServerListFetcher(
+				"common",
+				FetchCommonRemoteServerList,
+				controller.signalFetchCommonRemoteServerList,
+				retryPeriod,
+				FETCH_REMOTE_SERVER_LIST_STALE_PERIOD)
+		}
+
+		if controller.config.ObfuscatedServerListRootURL != "" {
+			controller.runWaitGroup.Add(1)
+			go controller.remoteServerListFetcher(
+				"obfuscated",
+				FetchObfuscatedServerLists,
+				controller.signalFetchObfuscatedServerLists,
+				retryPeriod,
+				FETCH_REMOTE_SERVER_LIST_STALE_PERIOD)
+		}
 	}
 
 	if controller.config.UpgradeDownloadUrl != "" &&
@@ -277,17 +302,13 @@ func (controller *Controller) SetClientVerificationPayloadForActiveTunnels(clien
 // remoteServerListFetcher fetches an out-of-band list of server entries
 // for more tunnel candidates. It fetches when signalled, with retries
 // on failure.
-func (controller *Controller) remoteServerListFetcher() {
-	defer controller.runWaitGroup.Done()
+func (controller *Controller) remoteServerListFetcher(
+	name string,
+	fetcher RemoteServerListFetcher,
+	signal <-chan struct{},
+	retryPeriod, stalePeriod time.Duration) {
 
-	if controller.config.RemoteServerListUrl == "" {
-		NoticeAlert("remote server list URL is blank")
-		return
-	}
-	if controller.config.RemoteServerListSignaturePublicKey == "" {
-		NoticeAlert("remote server list signature public key blank")
-		return
-	}
+	defer controller.runWaitGroup.Done()
 
 	var lastFetchTime monotime.Time
 
@@ -295,7 +316,7 @@ fetcherLoop:
 	for {
 		// Wait for a signal before fetching
 		select {
-		case <-controller.signalFetchRemoteServerList:
+		case <-signal:
 		case <-controller.shutdownBroadcast:
 			break fetcherLoop
 		}
@@ -303,7 +324,7 @@ fetcherLoop:
 		// Skip fetch entirely (i.e., send no request at all, even when ETag would save
 		// on response size) when a recent fetch was successful
 		if lastFetchTime != 0 &&
-			lastFetchTime.Add(FETCH_REMOTE_SERVER_LIST_STALE_PERIOD).After(monotime.Now()) {
+			lastFetchTime.Add(stalePeriod).After(monotime.Now()) {
 			continue
 		}
 
@@ -321,7 +342,7 @@ fetcherLoop:
 			// no active tunnel, the untunneledDialConfig will be used.
 			tunnel := controller.getNextActiveTunnel()
 
-			err := FetchRemoteServerList(
+			err := fetcher(
 				controller.config,
 				tunnel,
 				controller.untunneledDialConfig)
@@ -331,10 +352,9 @@ fetcherLoop:
 				break retryLoop
 			}
 
-			NoticeAlert("failed to fetch remote server list: %s", err)
+			NoticeAlert("failed to fetch %s remote server list: %s", name, err)
 
-			timeout := time.After(
-				time.Duration(*controller.config.FetchRemoteServerListRetryPeriodSeconds) * time.Second)
+			timeout := time.After(retryPeriod)
 			select {
 			case <-timeout:
 			case <-controller.shutdownBroadcast:
@@ -343,7 +363,7 @@ fetcherLoop:
 		}
 	}
 
-	NoticeInfo("exiting remote server list fetcher")
+	NoticeInfo("exiting %s remote server list fetcher", name)
 }
 
 // establishTunnelWatcher terminates the controller if a tunnel
@@ -677,7 +697,7 @@ func (controller *Controller) classifyImpairedProtocol(failedTunnel *Tunnel) {
 	} else {
 		controller.impairedProtocolClassification[failedTunnel.protocol] = 0
 	}
-	if len(controller.getImpairedProtocols()) == len(common.SupportedTunnelProtocols) {
+	if len(controller.getImpairedProtocols()) == len(protocol.SupportedTunnelProtocols) {
 		// Reset classification if all protocols are classified as impaired as
 		// the network situation (or attack) may not be protocol-specific.
 		// TODO: compare against count of distinct supported protocols for
@@ -709,6 +729,17 @@ func (controller *Controller) isImpairedProtocol(protocol string) bool {
 	return ok && count >= IMPAIRED_PROTOCOL_CLASSIFICATION_THRESHOLD
 }
 
+// SignalSeededNewSLOK implements the TunnelOwner interface. This function
+// is called by Tunnel.operateTunnel when the tunnel has received a new,
+// previously unknown SLOK from the server. The Controller triggers an OSL
+// fetch, as the new SLOK may be sufficient to access new OSLs.
+func (controller *Controller) SignalSeededNewSLOK() {
+	select {
+	case controller.signalFetchObfuscatedServerLists <- *new(struct{}):
+	default:
+	}
+}
+
 // SignalTunnelFailure implements the TunnelOwner interface. This function
 // is called by Tunnel.operateTunnel when the tunnel has detected that it
 // has failed. The Controller will signal runTunnels to create a new
@@ -845,7 +876,9 @@ func (controller *Controller) getNextActiveTunnel() (tunnel *Tunnel) {
 
 // isActiveTunnelServerEntry is used to check if there's already
 // an existing tunnel to a candidate server.
-func (controller *Controller) isActiveTunnelServerEntry(serverEntry *ServerEntry) bool {
+func (controller *Controller) isActiveTunnelServerEntry(
+	serverEntry *protocol.ServerEntry) bool {
+
 	controller.tunnelMutex.Lock()
 	defer controller.tunnelMutex.Unlock()
 	for _, activeTunnel := range controller.tunnels {
@@ -1045,7 +1078,7 @@ loop:
 				break
 			}
 
-			if controller.config.TargetApiProtocol == common.PSIPHON_SSH_API_PROTOCOL &&
+			if controller.config.TargetApiProtocol == protocol.PSIPHON_SSH_API_PROTOCOL &&
 				!serverEntry.SupportsSSHAPIRequests() {
 				continue
 			}
@@ -1103,15 +1136,23 @@ loop:
 		// Free up resources now, but don't reset until after the pause.
 		iterator.Close()
 
-		// Trigger a fetch remote server list, since we may have failed to
-		// connect with all known servers. Don't block sending signal, since
+		// Trigger a common remote server list fetch, since we may have failed
+		// to connect with all known servers. Don't block sending signal, since
 		// this signal may have already been sent.
 		// Don't wait for fetch remote to succeed, since it may fail and
 		// enter a retry loop and we're better off trying more known servers.
 		// TODO: synchronize the fetch response, so it can be incorporated
 		// into the server entry iterator as soon as available.
 		select {
-		case controller.signalFetchRemoteServerList <- *new(struct{}):
+		case controller.signalFetchCommonRemoteServerList <- *new(struct{}):
+		default:
+		}
+
+		// Trigger an OSL fetch in parallel. Both fetches are run in parallel
+		// so that if one out of the common RLS and OSL set is large, it doesn't
+		// doesn't entirely block fetching the other.
+		select {
+		case controller.signalFetchObfuscatedServerLists <- *new(struct{}):
 		default:
 		}
 

+ 63 - 27
psiphon/controller_test.go

@@ -20,6 +20,7 @@
 package psiphon
 
 import (
+	"encoding/json"
 	"flag"
 	"fmt"
 	"io"
@@ -28,6 +29,7 @@ import (
 	"net/http"
 	"net/url"
 	"os"
+	"path/filepath"
 	"strings"
 	"sync"
 	"sync/atomic"
@@ -37,15 +39,30 @@ import (
 	"github.com/Psiphon-Inc/goarista/monotime"
 	socks "github.com/Psiphon-Inc/goptlib"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 	"github.com/elazarl/goproxy"
 )
 
+var testDataDirName string
+
 func TestMain(m *testing.M) {
 	flag.Parse()
-	os.Remove(DATA_STORE_FILENAME)
+
+	var err error
+	testDataDirName, err = ioutil.TempDir("", "psiphon-controller-test")
+	if err != nil {
+		fmt.Printf("TempDir failed: %s", err)
+		os.Exit(1)
+	}
+	defer os.RemoveAll(testDataDirName)
+
+	os.Remove(filepath.Join(testDataDirName, DATA_STORE_FILENAME))
+
+	SetEmitDiagnosticNotices(true)
+
 	initDisruptor()
 	initUpstreamProxy()
-	SetEmitDiagnosticNotices(true)
+
 	os.Exit(m.Run())
 }
 
@@ -124,7 +141,7 @@ func TestUntunneledUpgradeClientIsLatestVersion(t *testing.T) {
 		})
 }
 
-func TestUntunneledResumableFetchRemoveServerList(t *testing.T) {
+func TestUntunneledResumableFetchRemoteServerList(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    true,
@@ -187,7 +204,7 @@ func TestSSH(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_SSH,
+			protocol:                 protocol.TUNNEL_PROTOCOL_SSH,
 			clientIsLatestVersion:    false,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -204,7 +221,7 @@ func TestObfuscatedSSH(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_OBFUSCATED_SSH,
+			protocol:                 protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH,
 			clientIsLatestVersion:    false,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -221,7 +238,7 @@ func TestUnfrontedMeek(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_UNFRONTED_MEEK,
+			protocol:                 protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK,
 			clientIsLatestVersion:    false,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -238,7 +255,7 @@ func TestUnfrontedMeekWithTransformer(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_UNFRONTED_MEEK,
+			protocol:                 protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK,
 			clientIsLatestVersion:    true,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -255,7 +272,7 @@ func TestFrontedMeek(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_FRONTED_MEEK,
+			protocol:                 protocol.TUNNEL_PROTOCOL_FRONTED_MEEK,
 			clientIsLatestVersion:    false,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -272,7 +289,7 @@ func TestFrontedMeekWithTransformer(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_FRONTED_MEEK,
+			protocol:                 protocol.TUNNEL_PROTOCOL_FRONTED_MEEK,
 			clientIsLatestVersion:    true,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -289,7 +306,7 @@ func TestFrontedMeekHTTP(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP,
+			protocol:                 protocol.TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP,
 			clientIsLatestVersion:    true,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -306,7 +323,7 @@ func TestUnfrontedMeekHTTPS(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS,
+			protocol:                 protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS,
 			clientIsLatestVersion:    false,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -323,7 +340,7 @@ func TestUnfrontedMeekHTTPSWithTransformer(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS,
+			protocol:                 protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS,
 			clientIsLatestVersion:    true,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -357,7 +374,7 @@ func TestObfuscatedSSHWithUpstreamProxy(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_OBFUSCATED_SSH,
+			protocol:                 protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH,
 			clientIsLatestVersion:    false,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -374,7 +391,7 @@ func TestUnfrontedMeekWithUpstreamProxy(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_UNFRONTED_MEEK,
+			protocol:                 protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK,
 			clientIsLatestVersion:    false,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -391,7 +408,7 @@ func TestUnfrontedMeekHTTPSWithUpstreamProxy(t *testing.T) {
 	controllerRun(t,
 		&controllerRunConfig{
 			expectNoServerEntries:    false,
-			protocol:                 common.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS,
+			protocol:                 protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS,
 			clientIsLatestVersion:    false,
 			disableUntunneledUpgrade: true,
 			disableEstablishing:      false,
@@ -420,12 +437,23 @@ type controllerRunConfig struct {
 
 func controllerRun(t *testing.T, runConfig *controllerRunConfig) {
 
-	configFileContents, err := ioutil.ReadFile("controller_test.config")
+	configJSON, err := ioutil.ReadFile("controller_test.config")
 	if err != nil {
 		// Skip, don't fail, if config file is not present
 		t.Skipf("error loading configuration file: %s", err)
 	}
-	config, err := LoadConfig(configFileContents)
+
+	// These fields must be filled in before calling LoadConfig
+	var modifyConfig map[string]interface{}
+	json.Unmarshal(configJSON, &modifyConfig)
+	modifyConfig["DataStoreDirectory"] = testDataDirName
+	modifyConfig["RemoteServerListDownloadFilename"] = filepath.Join(testDataDirName, "server_list_compressed")
+	modifyConfig["ObfuscatedServerListDownloadDirectory"] = testDataDirName
+	modifyConfig["ObfuscatedServerListRootURL"] = "http://127.0.0.1/osl" // will fail
+	modifyConfig["UpgradeDownloadFilename"] = filepath.Join(testDataDirName, "upgrade")
+	configJSON, _ = json.Marshal(modifyConfig)
+
+	config, err := LoadConfig(configJSON)
 	if err != nil {
 		t.Fatalf("error processing configuration file: %s", err)
 	}
@@ -437,7 +465,7 @@ func controllerRun(t *testing.T, runConfig *controllerRunConfig) {
 	if runConfig.disableEstablishing {
 		// Clear remote server list so tunnel cannot be established.
 		// TODO: also delete all server entries in the datastore.
-		config.RemoteServerListUrl = ""
+		config.DisableRemoteServerListFetcher = true
 	}
 
 	if runConfig.disableApi {
@@ -474,10 +502,11 @@ func controllerRun(t *testing.T, runConfig *controllerRunConfig) {
 	establishTunnelPausePeriodSeconds := 1
 	config.EstablishTunnelPausePeriodSeconds = &establishTunnelPausePeriodSeconds
 
-	os.Remove(config.UpgradeDownloadFilename)
-
 	config.TunnelProtocol = runConfig.protocol
 
+	os.Remove(config.UpgradeDownloadFilename)
+	os.Remove(config.RemoteServerListDownloadFilename)
+
 	err = InitDataStore(config)
 	if err != nil {
 		t.Fatalf("error initializing datastore: %s", err)
@@ -572,16 +601,23 @@ func controllerRun(t *testing.T, runConfig *controllerRunConfig) {
 				default:
 				}
 
-			case "RemoteServerListDownloadedBytes":
+			case "RemoteServerListResourceDownloadedBytes":
 
-				atomic.AddInt32(&remoteServerListDownloadedBytesCount, 1)
-				t.Logf("RemoteServerListDownloadedBytes: %d", int(payload["bytes"].(float64)))
+				url := payload["url"].(string)
+				if url == config.RemoteServerListUrl {
+					t.Logf("RemoteServerListResourceDownloadedBytes: %d", int(payload["bytes"].(float64)))
+					atomic.AddInt32(&remoteServerListDownloadedBytesCount, 1)
+				}
 
-			case "RemoteServerListDownloaded":
+			case "RemoteServerListResourceDownloaded":
 
-				select {
-				case remoteServerListDownloaded <- *new(struct{}):
-				default:
+				url := payload["url"].(string)
+				if url == config.RemoteServerListUrl {
+					t.Logf("RemoteServerListResourceDownloaded")
+					select {
+					case remoteServerListDownloaded <- *new(struct{}):
+					default:
+					}
 				}
 
 			case "ImpairedProtocolClassification":

+ 232 - 117
psiphon/dataStore.go

@@ -33,6 +33,7 @@ import (
 
 	"github.com/Psiphon-Inc/bolt"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 // The BoltDB dataStore implementation is an alternative to the sqlite3-based
@@ -57,9 +58,18 @@ const (
 	urlETagsBucket              = "urlETags"
 	keyValueBucket              = "keyValues"
 	tunnelStatsBucket           = "tunnelStats"
+	remoteServerListStatsBucket = "remoteServerListStats"
+	slokBucket                  = "SLOKs"
 	rankedServerEntryCount      = 100
 )
 
+const (
+	DATA_STORE_LAST_CONNECTED_KEY           = "lastConnected"
+	DATA_STORE_OSL_REGISTRY_KEY             = "OSLRegistry"
+	PERSISTENT_STAT_TYPE_TUNNEL             = tunnelStatsBucket
+	PERSISTENT_STAT_TYPE_REMOTE_SERVER_LIST = remoteServerListStatsBucket
+)
+
 var singleton dataStore
 
 // InitDataStore initializes the singleton instance of dataStore. This
@@ -103,6 +113,8 @@ func InitDataStore(config *Config) (err error) {
 				urlETagsBucket,
 				keyValueBucket,
 				tunnelStatsBucket,
+				remoteServerListStatsBucket,
+				slokBucket,
 			}
 			for _, bucket := range requiredBuckets {
 				_, err := tx.CreateBucketIfNotExists([]byte(bucket))
@@ -136,7 +148,7 @@ func InitDataStore(config *Config) (err error) {
 			migrateEntries(migratableServerEntries, filepath.Join(config.DataStoreDirectory, LEGACY_DATA_STORE_FILENAME))
 		}
 
-		resetAllTunnelStatsToUnreported()
+		resetAllPersistentStatsToUnreported()
 	})
 
 	return err
@@ -157,12 +169,12 @@ func checkInitDataStore() {
 // overwritten; otherwise, the existing record is unchanged.
 // If the server entry data is malformed, an alert notice is issued and
 // the entry is skipped; no error is returned.
-func StoreServerEntry(serverEntry *ServerEntry, replaceIfExists bool) error {
+func StoreServerEntry(serverEntry *protocol.ServerEntry, replaceIfExists bool) error {
 	checkInitDataStore()
 
 	// Server entries should already be validated before this point,
 	// so instead of skipping we fail with an error.
-	err := ValidateServerEntry(serverEntry)
+	err := protocol.ValidateServerEntry(serverEntry)
 	if err != nil {
 		return common.ContextError(errors.New("invalid server entry"))
 	}
@@ -184,7 +196,7 @@ func StoreServerEntry(serverEntry *ServerEntry, replaceIfExists bool) error {
 		existingServerEntryValid := false
 		existingData := serverEntries.Get([]byte(serverEntry.IpAddress))
 		if existingData != nil {
-			existingServerEntry := new(ServerEntry)
+			existingServerEntry := new(protocol.ServerEntry)
 			if json.Unmarshal(existingData, existingServerEntry) == nil {
 				existingServerEntryValid = true
 			}
@@ -227,7 +239,7 @@ func StoreServerEntry(serverEntry *ServerEntry, replaceIfExists bool) error {
 // Shuffling is performed on imported server entrues as part of client-side
 // load balancing.
 // There is an independent transaction for each entry insert/update.
-func StoreServerEntries(serverEntries []*ServerEntry, replaceIfExists bool) error {
+func StoreServerEntries(serverEntries []*protocol.ServerEntry, replaceIfExists bool) error {
 	checkInitDataStore()
 
 	for index := len(serverEntries) - 1; index > 0; index-- {
@@ -353,7 +365,7 @@ func insertRankedServerEntry(tx *bolt.Tx, serverEntryId string, position int) er
 	return nil
 }
 
-func serverEntrySupportsProtocol(serverEntry *ServerEntry, protocol string) bool {
+func serverEntrySupportsProtocol(serverEntry *protocol.ServerEntry, protocol string) bool {
 	// Note: for meek, the capabilities are FRONTED-MEEK and UNFRONTED-MEEK
 	// and the additonal OSSH service is assumed to be available internally.
 	requiredCapability := strings.TrimSuffix(protocol, "-OSSH")
@@ -370,7 +382,7 @@ type ServerEntryIterator struct {
 	serverEntryIndex            int
 	isTargetServerEntryIterator bool
 	hasNextTargetServerEntry    bool
-	targetServerEntry           *ServerEntry
+	targetServerEntry           *protocol.ServerEntry
 }
 
 // NewServerEntryIterator creates a new ServerEntryIterator
@@ -397,8 +409,8 @@ func NewServerEntryIterator(config *Config) (iterator *ServerEntryIterator, err
 
 // newTargetServerEntryIterator is a helper for initializing the TargetServerEntry case
 func newTargetServerEntryIterator(config *Config) (iterator *ServerEntryIterator, err error) {
-	serverEntry, err := DecodeServerEntry(
-		config.TargetServerEntry, common.GetCurrentTimestamp(), common.SERVER_ENTRY_SOURCE_TARGET)
+	serverEntry, err := protocol.DecodeServerEntry(
+		config.TargetServerEntry, common.GetCurrentTimestamp(), protocol.SERVER_ENTRY_SOURCE_TARGET)
 	if err != nil {
 		return nil, err
 	}
@@ -501,7 +513,7 @@ func (iterator *ServerEntryIterator) Close() {
 
 // Next returns the next server entry, by rank, for a ServerEntryIterator.
 // Returns nil with no error when there is no next item.
-func (iterator *ServerEntryIterator) Next() (serverEntry *ServerEntry, err error) {
+func (iterator *ServerEntryIterator) Next() (serverEntry *protocol.ServerEntry, err error) {
 	defer func() {
 		if err != nil {
 			iterator.Close()
@@ -550,7 +562,7 @@ func (iterator *ServerEntryIterator) Next() (serverEntry *ServerEntry, err error
 			continue
 		}
 
-		serverEntry = new(ServerEntry)
+		serverEntry = new(protocol.ServerEntry)
 		err = json.Unmarshal(data, serverEntry)
 		if err != nil {
 			// In case of data corruption or a bug causing this condition,
@@ -574,7 +586,7 @@ func (iterator *ServerEntryIterator) Next() (serverEntry *ServerEntry, err error
 // which have a single meekFrontingDomain and not a meekFrontingAddresses array.
 // By copying this one meekFrontingDomain into meekFrontingAddresses, this client effectively
 // uses that single value as legacy clients do.
-func MakeCompatibleServerEntry(serverEntry *ServerEntry) *ServerEntry {
+func MakeCompatibleServerEntry(serverEntry *protocol.ServerEntry) *protocol.ServerEntry {
 	if len(serverEntry.MeekFrontingAddresses) == 0 && serverEntry.MeekFrontingDomain != "" {
 		serverEntry.MeekFrontingAddresses =
 			append(serverEntry.MeekFrontingAddresses, serverEntry.MeekFrontingDomain)
@@ -583,13 +595,13 @@ func MakeCompatibleServerEntry(serverEntry *ServerEntry) *ServerEntry {
 	return serverEntry
 }
 
-func scanServerEntries(scanner func(*ServerEntry)) error {
+func scanServerEntries(scanner func(*protocol.ServerEntry)) error {
 	err := singleton.db.View(func(tx *bolt.Tx) error {
 		bucket := tx.Bucket([]byte(serverEntriesBucket))
 		cursor := bucket.Cursor()
 
 		for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
-			serverEntry := new(ServerEntry)
+			serverEntry := new(protocol.ServerEntry)
 			err := json.Unmarshal(value, serverEntry)
 			if err != nil {
 				// In case of data corruption or a bug causing this condition,
@@ -612,13 +624,13 @@ func scanServerEntries(scanner func(*ServerEntry)) error {
 
 // CountServerEntries returns a count of stored servers for the
 // specified region and protocol.
-func CountServerEntries(region, protocol string) int {
+func CountServerEntries(region, tunnelProtocol string) int {
 	checkInitDataStore()
 
 	count := 0
-	err := scanServerEntries(func(serverEntry *ServerEntry) {
+	err := scanServerEntries(func(serverEntry *protocol.ServerEntry) {
 		if (region == "" || serverEntry.Region == region) &&
-			(protocol == "" || serverEntrySupportsProtocol(serverEntry, protocol)) {
+			(tunnelProtocol == "" || serverEntrySupportsProtocol(serverEntry, tunnelProtocol)) {
 			count += 1
 		}
 	})
@@ -637,7 +649,7 @@ func ReportAvailableRegions() {
 	checkInitDataStore()
 
 	regions := make(map[string]bool)
-	err := scanServerEntries(func(serverEntry *ServerEntry) {
+	err := scanServerEntries(func(serverEntry *protocol.ServerEntry) {
 		regions[serverEntry.Region] = true
 	})
 
@@ -664,7 +676,7 @@ func GetServerEntryIpAddresses() (ipAddresses []string, err error) {
 	checkInitDataStore()
 
 	ipAddresses = make([]string, 0)
-	err = scanServerEntries(func(serverEntry *ServerEntry) {
+	err = scanServerEntries(func(serverEntry *protocol.ServerEntry) {
 		ipAddresses = append(ipAddresses, serverEntry.IpAddress)
 	})
 
@@ -803,195 +815,298 @@ func GetKeyValue(key string) (value string, err error) {
 	return value, nil
 }
 
-// Tunnel stats records in the tunnelStatsStateUnreported
+// Persistent stat records in the persistentStatStateUnreported
 // state are available for take out.
-// Records in the tunnelStatsStateReporting have been
-// taken out and are pending either deleting (for a
-// successful request) or change to StateUnreported (for
-// a failed request).
-// All tunnel stats records are reverted to StateUnreported
+//
+// Records in the persistentStatStateReporting have been taken
+// out and are pending either deletion (for a successful request)
+// or change to StateUnreported (for a failed request).
+//
+// All persistent stat records are reverted to StateUnreported
 // when the datastore is initialized at start up.
 
-var tunnelStatsStateUnreported = []byte("0")
-var tunnelStatsStateReporting = []byte("1")
+var persistentStatStateUnreported = []byte("0")
+var persistentStatStateReporting = []byte("1")
+
+var persistentStatTypes = []string{
+	PERSISTENT_STAT_TYPE_REMOTE_SERVER_LIST,
+	PERSISTENT_STAT_TYPE_TUNNEL,
+}
 
-// StoreTunnelStats adds a new tunnel stats record, which is
-// set to StateUnreported and is an immediate candidate for
+// StorePersistentStats adds a new persistent stat record, which
+// is set to StateUnreported and is an immediate candidate for
 // reporting.
-// tunnelStats is a JSON byte array containing fields as
-// required by the Psiphon server API (see RecordTunnelStats).
-// It's assumed that the JSON value contains enough unique
-// information for the value to function as a key in the
-// key/value datastore. This assumption is currently satisfied
-// by the fields sessionId + tunnelNumber.
-func StoreTunnelStats(tunnelStats []byte) error {
+//
+// The stat is a JSON byte array containing fields as
+// required by the Psiphon server API. It's assumed that the
+// JSON value contains enough unique information for the value to
+// function as a key in the key/value datastore. This assumption
+// is currently satisfied by the fields sessionId + tunnelNumber
+// for tunnel stats, and URL + ETag for remote server list stats.
+func StorePersistentStat(statType string, stat []byte) error {
 	checkInitDataStore()
 
+	if !common.Contains(persistentStatTypes, statType) {
+		return common.ContextError(fmt.Errorf("invalid persistent stat type: %s", statType))
+	}
+
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		err := bucket.Put(tunnelStats, tunnelStatsStateUnreported)
+		bucket := tx.Bucket([]byte(statType))
+		err := bucket.Put(stat, persistentStatStateUnreported)
 		return err
 	})
 
 	if err != nil {
 		return common.ContextError(err)
 	}
+
 	return nil
 }
 
-// CountUnreportedTunnelStats returns the number of tunnel
-// stats records in StateUnreported.
-func CountUnreportedTunnelStats() int {
+// CountUnreportedPersistentStats returns the number of persistent
+// stat records in StateUnreported.
+func CountUnreportedPersistentStats() int {
 	checkInitDataStore()
 
 	unreported := 0
 
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		cursor := bucket.Cursor()
-		for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
-			if 0 == bytes.Compare(value, tunnelStatsStateUnreported) {
-				unreported++
-				break
+
+		for _, statType := range persistentStatTypes {
+
+			bucket := tx.Bucket([]byte(statType))
+			cursor := bucket.Cursor()
+			for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
+				if 0 == bytes.Compare(value, persistentStatStateUnreported) {
+					unreported++
+					break
+				}
 			}
 		}
 		return nil
 	})
 
 	if err != nil {
-		NoticeAlert("CountUnreportedTunnelStats failed: %s", err)
+		NoticeAlert("CountUnreportedPersistentStats failed: %s", err)
 		return 0
 	}
 
 	return unreported
 }
 
-// TakeOutUnreportedTunnelStats returns up to maxCount tunnel
-// stats records that are in StateUnreported. The records are set
-// to StateReporting. If the records are successfully reported,
-// clear them with ClearReportedTunnelStats. If the records are
-// not successfully reported, restore them with
-// PutBackUnreportedTunnelStats.
-func TakeOutUnreportedTunnelStats(maxCount int) ([][]byte, error) {
+// TakeOutUnreportedPersistentStats returns up to maxCount persistent
+// stats records that are in StateUnreported. The records are set to
+// StateReporting. If the records are successfully reported, clear them
+// with ClearReportedPersistentStats. If the records are not successfully
+// reported, restore them with PutBackUnreportedPersistentStats.
+func TakeOutUnreportedPersistentStats(maxCount int) (map[string][][]byte, error) {
 	checkInitDataStore()
 
-	tunnelStats := make([][]byte, 0)
+	stats := make(map[string][][]byte)
 
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		cursor := bucket.Cursor()
-		for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
 
-			// Perform a test JSON unmarshaling. In case of data corruption or a bug,
-			// skip the record.
-			var jsonData interface{}
-			err := json.Unmarshal(key, &jsonData)
-			if err != nil {
-				NoticeAlert(
-					"Invalid key in TakeOutUnreportedTunnelStats: %s: %s",
-					string(key), err)
-				continue
-			}
+		count := 0
 
-			if 0 == bytes.Compare(value, tunnelStatsStateUnreported) {
-				// Must make a copy as slice is only valid within transaction.
-				data := make([]byte, len(key))
-				copy(data, key)
-				tunnelStats = append(tunnelStats, data)
-				if len(tunnelStats) >= maxCount {
+		for _, statType := range persistentStatTypes {
+
+			stats[statType] = make([][]byte, 0)
+
+			bucket := tx.Bucket([]byte(statType))
+			cursor := bucket.Cursor()
+			for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
+
+				if count >= maxCount {
 					break
 				}
+
+				// Perform a test JSON unmarshaling. In case of data corruption or a bug,
+				// skip the record.
+				var jsonData interface{}
+				err := json.Unmarshal(key, &jsonData)
+				if err != nil {
+					NoticeAlert(
+						"Invalid key in TakeOutUnreportedPersistentStats: %s: %s",
+						string(key), err)
+					continue
+				}
+
+				if 0 == bytes.Compare(value, persistentStatStateUnreported) {
+					// Must make a copy as slice is only valid within transaction.
+					data := make([]byte, len(key))
+					copy(data, key)
+					stats[statType] = append(stats[statType], data)
+					count += 1
+				}
 			}
-		}
-		for _, key := range tunnelStats {
-			err := bucket.Put(key, tunnelStatsStateReporting)
-			if err != nil {
-				return err
+
+			for _, key := range stats[statType] {
+				err := bucket.Put(key, persistentStatStateReporting)
+				if err != nil {
+					return err
+				}
 			}
-		}
 
+		}
 		return nil
 	})
 
 	if err != nil {
 		return nil, common.ContextError(err)
 	}
-	return tunnelStats, nil
+
+	return stats, nil
 }
 
-// PutBackUnreportedTunnelStats restores a list of tunnel
-// stats records to StateUnreported.
-func PutBackUnreportedTunnelStats(tunnelStats [][]byte) error {
+// PutBackUnreportedPersistentStats restores a list of persistent
+// stat records to StateUnreported.
+func PutBackUnreportedPersistentStats(stats map[string][][]byte) error {
 	checkInitDataStore()
 
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		for _, key := range tunnelStats {
-			err := bucket.Put(key, tunnelStatsStateUnreported)
-			if err != nil {
-				return err
+
+		for _, statType := range persistentStatTypes {
+
+			bucket := tx.Bucket([]byte(statType))
+			for _, key := range stats[statType] {
+				err := bucket.Put(key, persistentStatStateUnreported)
+				if err != nil {
+					return err
+				}
 			}
 		}
+
 		return nil
 	})
 
 	if err != nil {
 		return common.ContextError(err)
 	}
+
 	return nil
 }
 
-// ClearReportedTunnelStats deletes a list of tunnel
-// stats records that were succesdfully reported.
-func ClearReportedTunnelStats(tunnelStats [][]byte) error {
+// ClearReportedPersistentStats deletes a list of persistent
+// stat records that were successfully reported.
+func ClearReportedPersistentStats(stats map[string][][]byte) error {
 	checkInitDataStore()
 
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		for _, key := range tunnelStats {
-			err := bucket.Delete(key)
-			if err != nil {
-				return err
+
+		for _, statType := range persistentStatTypes {
+
+			bucket := tx.Bucket([]byte(statType))
+			for _, key := range stats[statType] {
+				err := bucket.Delete(key)
+				if err != nil {
+					return err
+				}
 			}
 		}
+
 		return nil
 	})
 
 	if err != nil {
 		return common.ContextError(err)
 	}
+
 	return nil
 }
 
-// resetAllTunnelStatsToUnreported sets all tunnel
-// stats records to StateUnreported. This reset is called
-// when the datastore is initialized at start up, as we do
-// not know if tunnel records in StateReporting were reported
-// or not.
-func resetAllTunnelStatsToUnreported() error {
+// resetAllPersistentStatsToUnreported sets all persistent stat
+// records to StateUnreported. This reset is called when the
+// datastore is initialized at start up, as we do not know if
+// persistent records in StateReporting were reported or not.
+func resetAllPersistentStatsToUnreported() error {
 	checkInitDataStore()
 
 	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(tunnelStatsBucket))
-		resetKeys := make([][]byte, 0)
-		cursor := bucket.Cursor()
-		for key, _ := cursor.First(); key != nil; key, _ = cursor.Next() {
-			resetKeys = append(resetKeys, key)
-		}
-		// TODO: data mutation is done outside cursor. Is this
-		// strictly necessary in this case?
-		// https://godoc.org/github.com/boltdb/bolt#Cursor
-		for _, key := range resetKeys {
-			err := bucket.Put(key, tunnelStatsStateUnreported)
-			if err != nil {
-				return err
+
+		for _, statType := range persistentStatTypes {
+
+			bucket := tx.Bucket([]byte(statType))
+			resetKeys := make([][]byte, 0)
+			cursor := bucket.Cursor()
+			for key, _ := cursor.First(); key != nil; key, _ = cursor.Next() {
+				resetKeys = append(resetKeys, key)
+			}
+			// TODO: data mutation is done outside cursor. Is this
+			// strictly necessary in this case? As is, this means
+			// all stats need to be loaded into memory at once.
+			// https://godoc.org/github.com/boltdb/bolt#Cursor
+			for _, key := range resetKeys {
+				err := bucket.Put(key, persistentStatStateUnreported)
+				if err != nil {
+					return err
+				}
 			}
 		}
+
 		return nil
 	})
 
 	if err != nil {
 		return common.ContextError(err)
 	}
+
 	return nil
 }
+
+// DeleteSLOKs deletes all SLOK records.
+func DeleteSLOKs() error {
+	checkInitDataStore()
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(slokBucket))
+		return bucket.ForEach(
+			func(id, _ []byte) error {
+				return bucket.Delete(id)
+			})
+	})
+
+	if err != nil {
+		return common.ContextError(err)
+	}
+
+	return nil
+}
+
+// SetSLOK stores a SLOK key, referenced by its ID. The bool
+// return value indicates whether the SLOK was already stored.
+func SetSLOK(id, key []byte) (bool, error) {
+	checkInitDataStore()
+
+	var duplicate bool
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(slokBucket))
+		duplicate = bucket.Get(id) != nil
+		err := bucket.Put([]byte(id), []byte(key))
+		return err
+	})
+
+	if err != nil {
+		return false, common.ContextError(err)
+	}
+
+	return duplicate, nil
+}
+
+// GetSLOK returns a SLOK key for the specified ID. The return
+// value is nil if the SLOK is not found.
+func GetSLOK(id []byte) (key []byte, err error) {
+	checkInitDataStore()
+
+	err = singleton.db.View(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(slokBucket))
+		key = bucket.Get(id)
+		return nil
+	})
+
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	return key, nil
+}

+ 26 - 8
psiphon/meekConn.go

@@ -105,7 +105,7 @@ type MeekConfig struct {
 // through a CDN.
 type MeekConn struct {
 	url                  *url.URL
-	additionalHeaders    map[string]string
+	additionalHeaders    http.Header
 	cookie               *http.Cookie
 	pendingConns         *common.Conns
 	transport            transporter
@@ -153,6 +153,8 @@ func DialMeek(
 	meekDialConfig.PendingConns = pendingConns
 
 	var transport transporter
+	var additionalHeaders http.Header
+	var proxyUrl func(*http.Request) (*url.URL, error)
 
 	if meekConfig.UseHTTPS {
 		// Custom TLS dialer:
@@ -216,7 +218,6 @@ func DialMeek(
 		// http.Transport will put the the HTTP server address in the HTTP
 		// request line. In this one case, we can use an HTTP proxy that does
 		// not offer CONNECT support.
-		var proxyUrl func(*http.Request) (*url.URL, error)
 		if strings.HasPrefix(meekDialConfig.UpstreamProxyUrl, "http://") &&
 			(meekConfig.DialAddress == meekConfig.HostHeader ||
 				meekConfig.DialAddress == meekConfig.HostHeader+":80") {
@@ -257,14 +258,17 @@ func DialMeek(
 		Path:   "/",
 	}
 
-	var additionalHeaders map[string]string
 	if meekConfig.UseHTTPS {
 		host, _, err := net.SplitHostPort(meekConfig.DialAddress)
 		if err != nil {
 			return nil, common.ContextError(err)
 		}
-		additionalHeaders = map[string]string{
-			"X-Psiphon-Fronting-Address": host,
+		additionalHeaders = map[string][]string{
+			"X-Psiphon-Fronting-Address": {host},
+		}
+	} else {
+		if proxyUrl == nil {
+			additionalHeaders = meekDialConfig.UpstreamProxyCustomHeaders
 		}
 	}
 
@@ -574,8 +578,22 @@ func (meek *MeekConn) roundTrip(sendPayload []byte) (io.ReadCloser, error) {
 
 		request.Header.Set("Content-Type", "application/octet-stream")
 
+		// Set additional headers to the HTTP request using the same method we use for adding
+		// custom headers to HTTP proxy requests
 		for name, value := range meek.additionalHeaders {
-			request.Header.Set(name, value)
+			// hack around special case of "Host" header
+			// https://golang.org/src/net/http/request.go#L474
+			// using URL.Opaque, see URL.RequestURI() https://golang.org/src/net/url/url.go#L915
+			if name == "Host" {
+				if len(value) > 0 {
+					if request.URL.Opaque == "" {
+						request.URL.Opaque = request.URL.Scheme + "://" + request.Host + request.URL.RequestURI()
+					}
+					request.Host = value[0]
+				}
+			} else {
+				request.Header[name] = value
+			}
 		}
 
 		request.AddCookie(meek.cookie)
@@ -687,8 +705,8 @@ func makeMeekCookie(meekConfig *MeekConfig) (cookie *http.Cookie, err error) {
 	copy(encryptedCookie[32:], box)
 
 	// Obfuscate the encrypted data
-	obfuscator, err := NewClientObfuscator(
-		&ObfuscatorConfig{Keyword: meekConfig.MeekObfuscatedKey, MaxPadding: MEEK_COOKIE_MAX_PADDING})
+	obfuscator, err := common.NewClientObfuscator(
+		&common.ObfuscatorConfig{Keyword: meekConfig.MeekObfuscatedKey, MaxPadding: MEEK_COOKIE_MAX_PADDING})
 	if err != nil {
 		return nil, common.ContextError(err)
 	}

+ 6 - 2
psiphon/migrateDataStore.go

@@ -21,11 +21,15 @@
 
 package psiphon
 
+import (
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
+)
+
 // Stub function to return an empty list for non-Windows builds
-func prepareMigrationEntries(config *Config) []*ServerEntry {
+func prepareMigrationEntries(config *Config) []*protocol.ServerEntry {
 	return nil
 }
 
 // Stub function to return immediately for non-Windows builds
-func migrateEntries(serverEntries []*ServerEntry, legacyDataStoreFilename string) {
+func migrateEntries(serverEntries []*protocol.ServerEntry, legacyDataStoreFilename string) {
 }

+ 6 - 5
psiphon/migrateDataStore_windows.go

@@ -28,12 +28,13 @@ import (
 
 	_ "github.com/Psiphon-Inc/go-sqlite3"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 var legacyDb *sql.DB
 
-func prepareMigrationEntries(config *Config) []*ServerEntry {
-	var migratableServerEntries []*ServerEntry
+func prepareMigrationEntries(config *Config) []*protocol.ServerEntry {
+	var migratableServerEntries []*protocol.ServerEntry
 
 	// If DATA_STORE_FILENAME does not exist on disk
 	if _, err := os.Stat(filepath.Join(config.DataStoreDirectory, DATA_STORE_FILENAME)); os.IsNotExist(err) {
@@ -84,7 +85,7 @@ func prepareMigrationEntries(config *Config) []*ServerEntry {
 // migrateEntries calls the BoltDB data store method to shuffle
 // and store an array of server entries (StoreServerEntries)
 // Failing to migrate entries, or delete the legacy file is never fatal
-func migrateEntries(serverEntries []*ServerEntry, legacyDataStoreFilename string) {
+func migrateEntries(serverEntries []*protocol.ServerEntry, legacyDataStoreFilename string) {
 	checkInitDataStore()
 
 	err := StoreServerEntries(serverEntries, false)
@@ -149,7 +150,7 @@ func (iterator *legacyServerEntryIterator) Close() {
 
 // Next returns the next server entry, by rank, for a legacyServerEntryIterator.
 // Returns nil with no error when there is no next item.
-func (iterator *legacyServerEntryIterator) Next() (serverEntry *ServerEntry, err error) {
+func (iterator *legacyServerEntryIterator) Next() (serverEntry *protocol.ServerEntry, err error) {
 	defer func() {
 		if err != nil {
 			iterator.Close()
@@ -170,7 +171,7 @@ func (iterator *legacyServerEntryIterator) Next() (serverEntry *ServerEntry, err
 	if err != nil {
 		return nil, common.ContextError(err)
 	}
-	serverEntry = new(ServerEntry)
+	serverEntry = new(protocol.ServerEntry)
 	err = json.Unmarshal(data, serverEntry)
 	if err != nil {
 		return nil, common.ContextError(err)

+ 22 - 3
psiphon/net.go

@@ -241,10 +241,14 @@ func ResolveIP(host string, conn net.Conn) (addrs []net.IP, ttls []time.Duration
 // UseIndistinguishableTLS, etc. -- for a specific HTTPS request URL.
 // If verifyLegacyCertificate is not nil, it's used for certificate
 // verification.
+//
 // Because UseIndistinguishableTLS requires a hack to work with
 // net/http, MakeUntunneledHttpClient may return a modified request URL
 // to be used. Callers should always use this return value to make
 // requests, not the input value.
+//
+// MakeUntunneledHttpsClient ignores the input requestUrl scheme,
+// which may be "http" or "https", and always performs HTTPS requests.
 func MakeUntunneledHttpsClient(
 	dialConfig *DialConfig,
 	verifyLegacyCertificate *x509.Certificate,
@@ -352,16 +356,31 @@ func MakeDownloadHttpClient(
 	var err error
 
 	if tunnel != nil {
+		// MakeTunneledHttpClient works with both "http" and "https" schemes
 		httpClient, err = MakeTunneledHttpClient(config, tunnel, requestTimeout)
 		if err != nil {
 			return nil, "", common.ContextError(err)
 		}
 	} else {
-		httpClient, requestUrl, err = MakeUntunneledHttpsClient(
-			untunneledDialConfig, nil, requestUrl, requestTimeout)
+		urlComponents, err := url.Parse(requestUrl)
 		if err != nil {
 			return nil, "", common.ContextError(err)
 		}
+		// MakeUntunneledHttpsClient works only with "https" schemes
+		if urlComponents.Scheme == "https" {
+			httpClient, requestUrl, err = MakeUntunneledHttpsClient(
+				untunneledDialConfig, nil, requestUrl, requestTimeout)
+			if err != nil {
+				return nil, "", common.ContextError(err)
+			}
+		} else {
+			httpClient = &http.Client{
+				Timeout: requestTimeout,
+				Transport: &http.Transport{
+					Dial: NewTCPDialer(untunneledDialConfig),
+				},
+			}
+		}
 	}
 
 	return httpClient, requestUrl, nil
@@ -373,7 +392,7 @@ func MakeDownloadHttpClient(
 // downloadFilename.part and downloadFilename.part.etag.
 // Any existing downloadFilename file will be overwritten.
 //
-// In the case where the remote object has change while a partial download
+// In the case where the remote object has changed while a partial download
 // is to be resumed, the partial state is reset and resumeDownload fails.
 // The caller must restart the download.
 //

+ 13 - 6
psiphon/notice.go

@@ -364,21 +364,28 @@ func NoticeExiting() {
 	outputNotice("Exiting", 0)
 }
 
-// NoticeRemoteServerListDownloadedBytes reports remote server list download progress.
-func NoticeRemoteServerListDownloadedBytes(bytes int64) {
-	outputNotice("RemoteServerListDownloadedBytes", noticeIsDiagnostic, "bytes", bytes)
+// NoticeRemoteServerListResourceDownloadedBytes reports remote server list download progress.
+func NoticeRemoteServerListResourceDownloadedBytes(url string, bytes int64) {
+	outputNotice("RemoteServerListResourceDownloadedBytes", noticeIsDiagnostic, "url", url, "bytes", bytes)
 }
 
-// NoticeRemoteServerListDownloaded indicates that a remote server list download
+// NoticeRemoteServerListResourceDownloaded indicates that a remote server list download
 // completed successfully.
-func NoticeRemoteServerListDownloaded(filename string) {
-	outputNotice("RemoteServerListDownloaded", noticeIsDiagnostic, "filename", filename)
+func NoticeRemoteServerListResourceDownloaded(url string) {
+	outputNotice("RemoteServerListResourceDownloaded", noticeIsDiagnostic, "url", url)
 }
 
 func NoticeClientVerificationRequestCompleted(ipAddress string) {
+	// TODO: remove "Notice" prefix
 	outputNotice("NoticeClientVerificationRequestCompleted", noticeIsDiagnostic, "ipAddress", ipAddress)
 }
 
+// NoticeSLOKSeeded indicates that the SLOK with the specified ID was received from
+// the Psiphon server. The "duplicate" flags indicates whether the SLOK was previously known.
+func NoticeSLOKSeeded(slokID string, duplicate bool) {
+	outputNotice("SLOKSeeded", noticeIsDiagnostic, "slokID", slokID, "duplicate", duplicate)
+}
+
 type repetitiveNoticeState struct {
 	message string
 	repeats int

+ 0 - 80
psiphon/package.go

@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2015, Psiphon Inc.
- * All rights reserved.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-package psiphon
-
-import (
-	"crypto"
-	"crypto/rsa"
-	"crypto/sha256"
-	"crypto/x509"
-	"encoding/base64"
-	"encoding/json"
-	"errors"
-
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
-)
-
-// AuthenticatedDataPackage is a JSON record containing some Psiphon data
-// payload, such as list of Psiphon server entries. As it may be downloaded
-// from various sources, it is digitally signed so that the data may be
-// authenticated.
-type AuthenticatedDataPackage struct {
-	Data                   string `json:"data"`
-	SigningPublicKeyDigest string `json:"signingPublicKeyDigest"`
-	Signature              string `json:"signature"`
-}
-
-func ReadAuthenticatedDataPackage(
-	rawPackage []byte, signingPublicKey string) (data string, err error) {
-
-	var authenticatedDataPackage *AuthenticatedDataPackage
-	err = json.Unmarshal(rawPackage, &authenticatedDataPackage)
-	if err != nil {
-		return "", common.ContextError(err)
-	}
-
-	derEncodedPublicKey, err := base64.StdEncoding.DecodeString(signingPublicKey)
-	if err != nil {
-		return "", common.ContextError(err)
-	}
-	publicKey, err := x509.ParsePKIXPublicKey(derEncodedPublicKey)
-	if err != nil {
-		return "", common.ContextError(err)
-	}
-	rsaPublicKey, ok := publicKey.(*rsa.PublicKey)
-	if !ok {
-		return "", common.ContextError(errors.New("unexpected signing public key type"))
-	}
-	signature, err := base64.StdEncoding.DecodeString(authenticatedDataPackage.Signature)
-	if err != nil {
-		return "", common.ContextError(err)
-	}
-	// TODO: can distinguish signed-with-different-key from other errors:
-	// match digest(publicKey) against authenticatedDataPackage.SigningPublicKeyDigest
-	hash := sha256.New()
-	hash.Write([]byte(authenticatedDataPackage.Data))
-	digest := hash.Sum(nil)
-	err = rsa.VerifyPKCS1v15(rsaPublicKey, crypto.SHA256, digest, signature)
-	if err != nil {
-		return "", common.ContextError(err)
-	}
-
-	return authenticatedDataPackage.Data, nil
-}

+ 295 - 48
psiphon/remoteServerList.go

@@ -21,115 +21,362 @@ package psiphon
 
 import (
 	"compress/zlib"
+	"encoding/hex"
+	"errors"
+	"fmt"
 	"io/ioutil"
 	"os"
-	"strings"
 	"time"
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
-// FetchRemoteServerList downloads a remote server list JSON record from
-// config.RemoteServerListUrl; validates its digital signature using the
-// public key config.RemoteServerListSignaturePublicKey; and parses the
+type RemoteServerListFetcher func(
+	config *Config, tunnel *Tunnel, untunneledDialConfig *DialConfig) error
+
+// FetchCommonRemoteServerList downloads the common remote server list from
+// config.RemoteServerListUrl. It validates its digital signature using the
+// public key config.RemoteServerListSignaturePublicKey and parses the
 // data field into ServerEntry records.
-func FetchRemoteServerList(
+// config.RemoteServerListDownloadFilename is the location to store the
+// download. As the download is resumed after failure, this filename must
+// be unique and persistent.
+func FetchCommonRemoteServerList(
 	config *Config,
 	tunnel *Tunnel,
 	untunneledDialConfig *DialConfig) error {
 
-	NoticeInfo("fetching remote server list")
-
-	// Select tunneled or untunneled configuration
+	NoticeInfo("fetching common remote server list")
 
-	httpClient, requestUrl, err := MakeDownloadHttpClient(
+	newETag, err := downloadRemoteServerListFile(
 		config,
 		tunnel,
 		untunneledDialConfig,
 		config.RemoteServerListUrl,
-		time.Duration(*config.FetchRemoteServerListTimeoutSeconds)*time.Second)
+		"",
+		config.RemoteServerListDownloadFilename)
 	if err != nil {
-		return common.ContextError(err)
+		return fmt.Errorf("failed to download common remote server list: %s", common.ContextError(err))
 	}
 
-	// Proceed with download
+	// When the resource is unchanged, skip.
+	if newETag == "" {
+		return nil
+	}
 
-	downloadFilename := config.RemoteServerListDownloadFilename
-	if downloadFilename == "" {
-		splitPath := strings.Split(config.RemoteServerListUrl, "/")
-		downloadFilename = splitPath[len(splitPath)-1]
+	serverListPayload, err := unpackRemoteServerListFile(config, config.RemoteServerListDownloadFilename)
+	if err != nil {
+		return fmt.Errorf("failed to unpack common remote server list: %s", common.ContextError(err))
 	}
 
-	lastETag, err := GetUrlETag(config.RemoteServerListUrl)
+	err = storeServerEntries(serverListPayload)
 	if err != nil {
-		return common.ContextError(err)
+		return fmt.Errorf("failed to store common remote server list: %s", common.ContextError(err))
+	}
+
+	// Now that the server entries are successfully imported, store the response
+	// ETag so we won't re-download this same data again.
+	err = SetUrlETag(config.RemoteServerListUrl, newETag)
+	if err != nil {
+		NoticeAlert("failed to set ETag for common remote server list: %s", common.ContextError(err))
+		// This fetch is still reported as a success, even if we can't store the etag
+	}
+
+	return nil
+}
+
+// FetchObfuscatedServerLists downloads the obfuscated remote server lists
+// from config.ObfuscatedServerListRootURL.
+// It first downloads the OSL registry, and then downloads each seeded OSL
+// advertised in the registry. All downloads are resumable, ETags are used
+// to skip both an unchanged registry or unchanged OSL files, and when an
+// individual download fails, the fetch proceeds if it can.
+// Authenticated package digital signatures are validated using the
+// public key config.RemoteServerListSignaturePublicKey.
+// config.ObfuscatedServerListDownloadDirectory is the location to store the
+// downloaded files. As  downloads are resumed after failure, this directory
+// must be unique and persistent.
+func FetchObfuscatedServerLists(
+	config *Config,
+	tunnel *Tunnel,
+	untunneledDialConfig *DialConfig) error {
+
+	NoticeInfo("fetching obfuscated remote server lists")
+
+	downloadFilename := osl.GetOSLRegistryFilename(config.ObfuscatedServerListDownloadDirectory)
+	downloadURL := osl.GetOSLRegistryURL(config.ObfuscatedServerListRootURL)
+
+	// failed is set if any operation fails and should trigger a retry. When the OSL registry
+	// fails to download, any cached registry is used instead; when any single OSL fails
+	// to download, the overall operation proceeds. So this flag records whether to report
+	// failure at the end when downloading has proceeded after a failure.
+	// TODO: should disk-full conditions not trigger retries?
+	var failed bool
+
+	var oslRegistry *osl.Registry
+
+	newETag, err := downloadRemoteServerListFile(
+		config,
+		tunnel,
+		untunneledDialConfig,
+		downloadURL,
+		"",
+		downloadFilename)
+	if err != nil {
+		failed = true
+		NoticeAlert("failed to download obfuscated server list registry: %s", common.ContextError(err))
+	} else if newETag != "" {
+
+		fileContent, err := ioutil.ReadFile(downloadFilename)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to read obfuscated server list registry: %s", common.ContextError(err))
+		}
+
+		var oslRegistryJSON []byte
+		if err == nil {
+			oslRegistry, oslRegistryJSON, err = osl.UnpackRegistry(
+				fileContent, config.RemoteServerListSignaturePublicKey)
+			if err != nil {
+				failed = true
+				NoticeAlert("failed to unpack obfuscated server list registry: %s", common.ContextError(err))
+			}
+		}
+
+		if err == nil {
+			err = SetKeyValue(DATA_STORE_OSL_REGISTRY_KEY, string(oslRegistryJSON))
+			if err != nil {
+				failed = true
+				NoticeAlert("failed to set cached obfuscated server list registry: %s", common.ContextError(err))
+			}
+		}
+	}
+
+	if failed || newETag == "" {
+		// Proceed with the cached OSL registry.
+		oslRegistryJSON, err := GetKeyValue(DATA_STORE_OSL_REGISTRY_KEY)
+		if err == nil && oslRegistryJSON == "" {
+			err = errors.New("not found")
+		}
+		if err != nil {
+			return fmt.Errorf("failed to get cached obfuscated server list registry: %s", common.ContextError(err))
+		}
+
+		oslRegistry, err = osl.LoadRegistry([]byte(oslRegistryJSON))
+		if err != nil {
+			return fmt.Errorf("failed to load obfuscated server list registry: %s", common.ContextError(err))
+		}
+	}
+
+	// When a new registry is downloaded, validated, and parsed, store the
+	// response ETag so we won't re-download this same data again.
+	if !failed && newETag != "" {
+		err = SetUrlETag(downloadURL, newETag)
+		if err != nil {
+			NoticeAlert("failed to set ETag for obfuscated server list registry: %s", common.ContextError(err))
+			// This fetch is still reported as a success, even if we can't store the etag
+		}
+	}
+
+	// Note: we proceed to check individual OSLs even if the direcory is unchanged,
+	// as the set of local SLOKs may have changed.
+
+	lookupSLOKs := func(slokID []byte) []byte {
+		// Lookup SLOKs in local datastore
+		key, err := GetSLOK(slokID)
+		if err != nil {
+			NoticeAlert("GetSLOK failed: %s", err)
+		}
+		return key
+	}
+
+	oslIDs := oslRegistry.GetSeededOSLIDs(
+		lookupSLOKs,
+		func(err error) {
+			NoticeAlert("GetSeededOSLIDs failed: %s", err)
+		})
+
+	for _, oslID := range oslIDs {
+		downloadFilename := osl.GetOSLFilename(config.ObfuscatedServerListDownloadDirectory, oslID)
+		downloadURL := osl.GetOSLFileURL(config.ObfuscatedServerListRootURL, oslID)
+		hexID := hex.EncodeToString(oslID)
+
+		// Note: the MD5 checksum step assumes the remote server list host's ETag uses MD5
+		// with a hex encoding. If this is not the case, the remoteETag should be left blank.
+		remoteETag := ""
+		md5sum, err := oslRegistry.GetOSLMD5Sum(oslID)
+		if err == nil {
+			remoteETag = hex.EncodeToString(md5sum)
+		}
+
+		// TODO: store ETags in OSL registry to enable skipping requests entirely
+
+		newETag, err := downloadRemoteServerListFile(
+			config,
+			tunnel,
+			untunneledDialConfig,
+			downloadURL,
+			remoteETag,
+			downloadFilename)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to download obfuscated server list file (%s): %s", hexID, common.ContextError(err))
+			continue
+		}
+
+		// When the resource is unchanged, skip.
+		if newETag == "" {
+			continue
+		}
+
+		fileContent, err := ioutil.ReadFile(downloadFilename)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to read obfuscated server list file (%s): %s", hexID, common.ContextError(err))
+			continue
+		}
+
+		serverListPayload, err := oslRegistry.UnpackOSL(
+			lookupSLOKs, oslID, fileContent, config.RemoteServerListSignaturePublicKey)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to unpack obfuscated server list file (%s): %s", hexID, common.ContextError(err))
+			continue
+		}
+
+		err = storeServerEntries(serverListPayload)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to store obfuscated server list file (%s): %s", hexID, common.ContextError(err))
+			continue
+		}
+
+		// Now that the server entries are successfully imported, store the response
+		// ETag so we won't re-download this same data again.
+		err = SetUrlETag(downloadURL, newETag)
+		if err != nil {
+			failed = true
+			NoticeAlert("failed to set Etag for obfuscated server list file (%s): %s", hexID, common.ContextError(err))
+			continue
+			// This fetch is still reported as a success, even if we can't store the etag
+		}
+	}
+
+	if failed {
+		return errors.New("one or more operations failed")
+	}
+	return nil
+}
+
+// downloadRemoteServerListFile downloads the source URL to
+// the destination file, performing a resumable download. When
+// the download completes and the file content has changed, the
+// new resource ETag is returned. Otherwise, blank is returned.
+// The caller is responsible for calling SetUrlETag once the file
+// content has been validated.
+func downloadRemoteServerListFile(
+	config *Config,
+	tunnel *Tunnel,
+	untunneledDialConfig *DialConfig,
+	sourceURL, sourceETag, destinationFilename string) (string, error) {
+
+	lastETag, err := GetUrlETag(sourceURL)
+	if err != nil {
+		return "", common.ContextError(err)
+	}
+
+	// sourceETag, when specified, is prior knowlegde of the
+	// remote ETag that can be used to skip the request entirely.
+	// This will be set in the case of OSL files, from the MD5Sum
+	// values stored in the registry.
+	if lastETag != "" && sourceETag == lastETag {
+		// TODO: notice?
+		return "", nil
+	}
+
+	// MakeDownloadHttpClient will select either a tunneled
+	// or untunneled configuration.
+
+	httpClient, requestURL, err := MakeDownloadHttpClient(
+		config,
+		tunnel,
+		untunneledDialConfig,
+		sourceURL,
+		time.Duration(*config.FetchRemoteServerListTimeoutSeconds)*time.Second)
+	if err != nil {
+		return "", common.ContextError(err)
 	}
 
 	n, responseETag, err := ResumeDownload(
-		httpClient, requestUrl, downloadFilename, lastETag)
+		httpClient, requestURL, destinationFilename, lastETag)
 
-	NoticeRemoteServerListDownloadedBytes(n)
+	NoticeRemoteServerListResourceDownloadedBytes(sourceURL, n)
 
 	if err != nil {
-		return common.ContextError(err)
+		return "", common.ContextError(err)
 	}
 
 	if responseETag == lastETag {
-		// The remote server list is unchanged and no data was downloaded
-		return nil
+		return "", nil
 	}
 
-	NoticeRemoteServerListDownloaded(downloadFilename)
+	NoticeRemoteServerListResourceDownloaded(sourceURL)
 
-	// The downloaded content is a zlib compressed authenticated
-	// data package containing a list of encoded server entries.
+	RecordRemoteServerListStat(sourceURL, responseETag)
 
-	downloadContent, err := os.Open(downloadFilename)
+	return responseETag, nil
+}
+
+// unpackRemoteServerListFile reads a file that contains a
+// zlib compressed authenticated data package, validates
+// the package, and returns the payload.
+func unpackRemoteServerListFile(
+	config *Config, filename string) (string, error) {
+
+	fileReader, err := os.Open(filename)
 	if err != nil {
-		return common.ContextError(err)
+		return "", common.ContextError(err)
 	}
-	defer downloadContent.Close()
+	defer fileReader.Close()
 
-	zlibReader, err := zlib.NewReader(downloadContent)
+	zlibReader, err := zlib.NewReader(fileReader)
 	if err != nil {
-		return common.ContextError(err)
+		return "", common.ContextError(err)
 	}
 
 	dataPackage, err := ioutil.ReadAll(zlibReader)
 	zlibReader.Close()
 	if err != nil {
-		return common.ContextError(err)
+		return "", common.ContextError(err)
 	}
 
-	remoteServerList, err := ReadAuthenticatedDataPackage(
+	payload, err := common.ReadAuthenticatedDataPackage(
 		dataPackage, config.RemoteServerListSignaturePublicKey)
 	if err != nil {
-		return common.ContextError(err)
+		return "", common.ContextError(err)
 	}
 
-	serverEntries, err := DecodeAndValidateServerEntryList(
-		remoteServerList,
+	return payload, nil
+}
+
+func storeServerEntries(serverList string) error {
+
+	serverEntries, err := protocol.DecodeAndValidateServerEntryList(
+		serverList,
 		common.GetCurrentTimestamp(),
-		common.SERVER_ENTRY_SOURCE_REMOTE)
+		protocol.SERVER_ENTRY_SOURCE_REMOTE)
 	if err != nil {
 		return common.ContextError(err)
 	}
 
+	// TODO: record stats for newly discovered servers
+
 	err = StoreServerEntries(serverEntries, true)
 	if err != nil {
 		return common.ContextError(err)
 	}
 
-	// Now that the server entries are successfully imported, store the response
-	// ETag so we won't re-download this same data again.
-
-	if responseETag != "" {
-		err := SetUrlETag(config.RemoteServerListUrl, responseETag)
-		if err != nil {
-			NoticeAlert("failed to set remote server list ETag: %s", common.ContextError(err))
-			// This fetch is still reported as a success, even if we can't store the etag
-		}
-	}
-
 	return nil
 }

+ 370 - 0
psiphon/remoteServerList_test.go

@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package psiphon
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"path"
+	"path/filepath"
+	"sync"
+	"testing"
+	"time"
+
+	socks "github.com/Psiphon-Inc/goptlib"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server"
+)
+
+// TODO: TestCommonRemoteServerList (this is currently covered by controller_test.go)
+
+func TestObfuscatedRemoteServerLists(t *testing.T) {
+
+	testDataDirName, err := ioutil.TempDir("", "psiphon-remote-server-list-test")
+	if err != nil {
+		t.Fatalf("TempDir failed: %s", err)
+	}
+	defer os.RemoveAll(testDataDirName)
+
+	//
+	// create a server
+	//
+
+	serverIPaddress := ""
+	for _, interfaceName := range []string{"eth0", "en0"} {
+		serverIPaddress, err = common.GetInterfaceIPAddress(interfaceName)
+		if err == nil {
+			break
+		}
+	}
+	if err != nil {
+		t.Fatalf("error getting server IP address: %s", err)
+	}
+
+	serverConfigJSON, _, encodedServerEntry, err := server.GenerateConfig(
+		&server.GenerateConfigParams{
+			ServerIPAddress:      serverIPaddress,
+			EnableSSHAPIRequests: true,
+			WebServerPort:        8001,
+			TunnelProtocolPorts:  map[string]int{"OSSH": 4001},
+		})
+	if err != nil {
+		t.Fatalf("error generating server config: %s", err)
+	}
+
+	//
+	// pave OSLs
+	//
+
+	oslConfigJSONTemplate := `
+    {
+      "Schemes" : [
+        {
+          "Epoch" : "%s",
+          "Regions" : [],
+          "PropagationChannelIDs" : ["%s"],
+          "MasterKey" : "vwab2WY3eNyMBpyFVPtsivMxF4MOpNHM/T7rHJIXctg=",
+          "SeedSpecs" : [
+            {
+              "ID" : "KuP2V6gLcROIFzb/27fUVu4SxtEfm2omUoISlrWv1mA=",
+              "UpstreamSubnets" : ["0.0.0.0/0"],
+              "Targets" :
+              {
+                  "BytesRead" : 1,
+                  "BytesWritten" : 1,
+                  "PortForwardDurationNanoseconds" : 1
+              }
+            }
+          ],
+          "SeedSpecThreshold" : 1,
+          "SeedPeriodNanoseconds" : %d,
+          "SeedPeriodKeySplits": [
+            {
+              "Total": 1,
+              "Threshold": 1
+            }
+          ]
+        }
+      ]
+    }`
+
+	now := time.Now().UTC()
+	seedPeriod := 24 * time.Hour
+	epoch := now.Truncate(seedPeriod)
+	epochStr := epoch.Format(time.RFC3339Nano)
+
+	propagationChannelID, _ := common.MakeRandomStringHex(8)
+
+	oslConfigJSON := fmt.Sprintf(
+		oslConfigJSONTemplate,
+		epochStr,
+		propagationChannelID,
+		seedPeriod)
+
+	oslConfig, err := osl.LoadConfig([]byte(oslConfigJSON))
+	if err != nil {
+		t.Fatalf("error loading OSL config: %s", err)
+	}
+
+	signingPublicKey, signingPrivateKey, err := common.GenerateAuthenticatedDataPackageKeys()
+	if err != nil {
+		t.Fatalf("error generating package keys: %s", err)
+	}
+
+	paveFiles, err := oslConfig.Pave(
+		epoch,
+		propagationChannelID,
+		signingPublicKey,
+		signingPrivateKey,
+		[]map[time.Time]string{
+			map[time.Time]string{
+				epoch: string(encodedServerEntry),
+			},
+		})
+	if err != nil {
+		t.Fatalf("error paving OSL files: %s", err)
+	}
+
+	//
+	// mock seeding SLOKs
+	//
+
+	singleton = dataStore{}
+	os.Remove(filepath.Join(testDataDirName, DATA_STORE_FILENAME))
+
+	err = InitDataStore(&Config{DataStoreDirectory: testDataDirName})
+	if err != nil {
+		t.Fatalf("error initializing client datastore: %s", err)
+	}
+
+	if CountServerEntries("", "") > 0 {
+		t.Fatalf("unexpected server entries")
+	}
+
+	seedState := oslConfig.NewClientSeedState("", propagationChannelID, nil)
+	seedPortForward := seedState.NewClientSeedPortForward(net.ParseIP("0.0.0.0"))
+	seedPortForward.UpdateProgress(1, 1, 1)
+	payload := seedState.GetSeedPayload()
+	if len(payload.SLOKs) != 1 {
+		t.Fatalf("expected 1 SLOKs, got %d", len(payload.SLOKs))
+	}
+
+	SetSLOK(payload.SLOKs[0].ID, payload.SLOKs[0].Key)
+
+	//
+	// run mock remote server list host
+	//
+
+	remoteServerListHostAddress := net.JoinHostPort(serverIPaddress, "8081")
+
+	// The common remote server list fetches will 404
+	remoteServerListURL := fmt.Sprintf("http://%s/server_list_compressed", remoteServerListHostAddress)
+	remoteServerListDownloadFilename := filepath.Join(testDataDirName, "server_list_compressed")
+
+	obfuscatedServerListRootURL := fmt.Sprintf("http://%s/", remoteServerListHostAddress)
+	obfuscatedServerListDownloadDirectory := testDataDirName
+
+	go func() {
+		startTime := time.Now()
+		serveMux := http.NewServeMux()
+		for _, paveFile := range paveFiles {
+			file := paveFile
+			serveMux.HandleFunc("/"+file.Name, func(w http.ResponseWriter, req *http.Request) {
+				md5sum := md5.Sum(file.Contents)
+				w.Header().Add("Content-Type", "application/octet-stream")
+				w.Header().Add("ETag", hex.EncodeToString(md5sum[:]))
+				http.ServeContent(w, req, file.Name, startTime, bytes.NewReader(file.Contents))
+			})
+		}
+		httpServer := &http.Server{
+			Addr:    remoteServerListHostAddress,
+			Handler: serveMux,
+		}
+		err := httpServer.ListenAndServe()
+		if err != nil {
+			// TODO: wrong goroutine for t.FatalNow()
+			t.Fatalf("error running remote server list host: %s", err)
+
+		}
+	}()
+
+	//
+	// run Psiphon server
+	//
+
+	go func() {
+		err := server.RunServices(serverConfigJSON)
+		if err != nil {
+			// TODO: wrong goroutine for t.FatalNow()
+			t.Fatalf("error running server: %s", err)
+		}
+	}()
+
+	//
+	// disrupt remote server list downloads
+	//
+
+	disruptorProxyAddress := "127.0.0.1:2162"
+	disruptorProxyURL := "socks4a://" + disruptorProxyAddress
+
+	go func() {
+		listener, err := socks.ListenSocks("tcp", disruptorProxyAddress)
+		if err != nil {
+			fmt.Errorf("disruptor proxy listen error: %s", err)
+			return
+		}
+		for {
+			localConn, err := listener.AcceptSocks()
+			if err != nil {
+				fmt.Errorf("disruptor proxy accept error: %s", err)
+				return
+			}
+			go func() {
+				remoteConn, err := net.Dial("tcp", localConn.Req.Target)
+				if err != nil {
+					fmt.Errorf("disruptor proxy dial error: %s", err)
+					return
+				}
+				err = localConn.Grant(&net.TCPAddr{IP: net.ParseIP("0.0.0.0"), Port: 0})
+				if err != nil {
+					fmt.Errorf("disruptor proxy grant error: %s", err)
+					return
+				}
+
+				waitGroup := new(sync.WaitGroup)
+				waitGroup.Add(1)
+				go func() {
+					defer waitGroup.Done()
+					io.Copy(remoteConn, localConn)
+				}()
+				if localConn.Req.Target == remoteServerListHostAddress {
+					io.CopyN(localConn, remoteConn, 500)
+				} else {
+					io.Copy(localConn, remoteConn)
+				}
+				localConn.Close()
+				remoteConn.Close()
+				waitGroup.Wait()
+			}()
+		}
+	}()
+
+	//
+	// connect to Psiphon server with Psiphon client
+	//
+
+	SetEmitDiagnosticNotices(true)
+
+	// Note: calling LoadConfig ensures all *int config fields are initialized
+	clientConfigJSONTemplate := `
+    {
+        "ClientPlatform" : "",
+        "ClientVersion" : "0",
+        "SponsorId" : "0",
+        "PropagationChannelId" : "0",
+        "ConnectionPoolSize" : 1,
+        "EstablishTunnelPausePeriodSeconds" : 1,
+        "FetchRemoteServerListRetryPeriodSeconds" : 1,
+		"RemoteServerListSignaturePublicKey" : "%s",
+		"RemoteServerListUrl" : "%s",
+		"RemoteServerListDownloadFilename" : "%s",
+		"ObfuscatedServerListRootURL" : "%s",
+		"ObfuscatedServerListDownloadDirectory" : "%s",
+		"UpstreamProxyUrl" : "%s"
+    }`
+
+	clientConfigJSON := fmt.Sprintf(
+		clientConfigJSONTemplate,
+		signingPublicKey,
+		remoteServerListURL,
+		remoteServerListDownloadFilename,
+		obfuscatedServerListRootURL,
+		obfuscatedServerListDownloadDirectory,
+		disruptorProxyURL)
+
+	clientConfig, _ := LoadConfig([]byte(clientConfigJSON))
+
+	controller, err := NewController(clientConfig)
+	if err != nil {
+		t.Fatalf("error creating client controller: %s", err)
+	}
+
+	tunnelEstablished := make(chan struct{}, 1)
+
+	SetNoticeOutput(NewNoticeReceiver(
+		func(notice []byte) {
+
+			noticeType, payload, err := GetNotice(notice)
+			if err != nil {
+				return
+			}
+
+			printNotice := false
+
+			switch noticeType {
+			case "Tunnels":
+				printNotice = true
+				count := int(payload["count"].(float64))
+				if count == 1 {
+					tunnelEstablished <- *new(struct{})
+				}
+			case "RemoteServerListResourceDownloadedBytes":
+				// TODO: check for resumed download for each URL
+				//url := payload["url"].(string)
+				printNotice = true
+			case "RemoteServerListResourceDownloaded":
+				printNotice = true
+			}
+
+			if printNotice {
+				fmt.Printf("%s\n", string(notice))
+			}
+		}))
+
+	go func() {
+		controller.Run(make(chan struct{}))
+	}()
+
+	establishTimeout := time.NewTimer(30 * time.Second)
+	select {
+	case <-tunnelEstablished:
+	case <-establishTimeout.C:
+		t.Fatalf("tunnel establish timeout exceeded")
+	}
+
+	for _, paveFile := range paveFiles {
+		u, _ := url.Parse(obfuscatedServerListRootURL)
+		u.Path = path.Join(u.Path, paveFile.Name)
+		etag, _ := GetUrlETag(u.String())
+		md5sum := md5.Sum(paveFile.Contents)
+		if etag != hex.EncodeToString(md5sum[:]) {
+			t.Fatalf("unexpected ETag for %s", u)
+		}
+	}
+}

+ 72 - 17
psiphon/server/api.go

@@ -31,18 +31,20 @@ import (
 	"unicode"
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 const (
 	MAX_API_PARAMS_SIZE = 256 * 1024 // 256KB
 
-	CLIENT_VERIFICATION_REQUIRED    = true
 	CLIENT_VERIFICATION_TTL_SECONDS = 60 * 60 * 24 * 7 // 7 days
 
 	CLIENT_PLATFORM_ANDROID = "Android"
 	CLIENT_PLATFORM_WINDOWS = "Windows"
 )
 
+var CLIENT_VERIFICATION_REQUIRED = false
+
 type requestJSONObject map[string]interface{}
 
 // sshAPIRequestHandler routes Psiphon API requests transported as
@@ -74,7 +76,7 @@ func sshAPIRequestHandler(
 
 	return dispatchAPIRequestHandler(
 		support,
-		common.PSIPHON_SSH_API_PROTOCOL,
+		protocol.PSIPHON_SSH_API_PROTOCOL,
 		geoIPData,
 		name,
 		params)
@@ -102,13 +104,13 @@ func dispatchAPIRequestHandler(
 	}()
 
 	switch name {
-	case common.PSIPHON_API_HANDSHAKE_REQUEST_NAME:
+	case protocol.PSIPHON_API_HANDSHAKE_REQUEST_NAME:
 		return handshakeAPIRequestHandler(support, apiProtocol, geoIPData, params)
-	case common.PSIPHON_API_CONNECTED_REQUEST_NAME:
+	case protocol.PSIPHON_API_CONNECTED_REQUEST_NAME:
 		return connectedAPIRequestHandler(support, geoIPData, params)
-	case common.PSIPHON_API_STATUS_REQUEST_NAME:
+	case protocol.PSIPHON_API_STATUS_REQUEST_NAME:
 		return statusAPIRequestHandler(support, geoIPData, params)
-	case common.PSIPHON_API_CLIENT_VERIFICATION_REQUEST_NAME:
+	case protocol.PSIPHON_API_CLIENT_VERIFICATION_REQUEST_NAME:
 		return clientVerificationAPIRequestHandler(support, geoIPData, params)
 	}
 
@@ -168,9 +170,11 @@ func handshakeAPIRequestHandler(
 
 	// Note: no guarantee that PsinetDatabase won't reload between database calls
 	db := support.PsinetDatabase
-	handshakeResponse := common.HandshakeResponse{
+	handshakeResponse := protocol.HandshakeResponse{
+		SSHSessionID:         sessionID,
 		Homepages:            db.GetRandomHomepage(sponsorID, geoIPData.Country, isMobile),
 		UpgradeClientVersion: db.GetUpgradeClientVersion(clientVersion, normalizedPlatform),
+		PageViewRegexes:      make([]map[string]string, 0),
 		HttpsRequestRegexes:  db.GetHttpsRequestRegexes(sponsorID),
 		EncodedServerList:    db.DiscoverServers(geoIPData.DiscoveryValue),
 		ClientRegion:         geoIPData.Country,
@@ -214,7 +218,7 @@ func connectedAPIRequestHandler(
 			params,
 			connectedRequestParams))
 
-	connectedResponse := common.ConnectedResponse{
+	connectedResponse := protocol.ConnectedResponse{
 		ConnectedTimestamp: common.TruncateTimestampToHour(common.GetCurrentTimestamp()),
 	}
 
@@ -253,6 +257,13 @@ func statusAPIRequestHandler(
 		return nil, common.ContextError(err)
 	}
 
+	// Logs are queued until the input is fully validated. Otherwise, stats
+	// could be double counted if the client has a bug in its request
+	// formatting: partial stats would be logged (counted), the request would
+	// fail, and clients would then resend all the same stats again.
+
+	logQueue := make([]LogFields, 0)
+
 	// Overall bytes transferred stats
 
 	bytesTransferred, err := getInt64RequestParam(statusData, "bytes_transferred")
@@ -262,7 +273,7 @@ func statusAPIRequestHandler(
 	bytesTransferredFields := getRequestLogFields(
 		support, "bytes_transferred", geoIPData, params, statusRequestParams)
 	bytesTransferredFields["bytes"] = bytesTransferred
-	log.LogRawFieldsWithTimestamp(bytesTransferredFields)
+	logQueue = append(logQueue, bytesTransferredFields)
 
 	// Domain bytes transferred stats
 	// Older clients may not submit this data
@@ -273,12 +284,15 @@ func statusAPIRequestHandler(
 		if err != nil {
 			return nil, common.ContextError(err)
 		}
-		domainBytesFields := getRequestLogFields(
-			support, "domain_bytes", geoIPData, params, statusRequestParams)
 		for domain, bytes := range hostBytes {
+
+			domainBytesFields := getRequestLogFields(
+				support, "domain_bytes", geoIPData, params, statusRequestParams)
+
 			domainBytesFields["domain"] = domain
 			domainBytesFields["bytes"] = bytes
-			log.LogRawFieldsWithTimestamp(domainBytesFields)
+
+			logQueue = append(logQueue, domainBytesFields)
 		}
 	}
 
@@ -291,10 +305,11 @@ func statusAPIRequestHandler(
 		if err != nil {
 			return nil, common.ContextError(err)
 		}
-		sessionFields := getRequestLogFields(
-			support, "session", geoIPData, params, statusRequestParams)
 		for _, tunnelStat := range tunnelStats {
 
+			sessionFields := getRequestLogFields(
+				support, "session", geoIPData, params, statusRequestParams)
+
 			sessionID, err := getStringRequestParam(tunnelStat, "session_id")
 			if err != nil {
 				return nil, common.ContextError(err)
@@ -357,10 +372,50 @@ func statusAPIRequestHandler(
 			}
 			sessionFields["total_bytes_received"] = totalBytesReceived
 
-			log.LogRawFieldsWithTimestamp(sessionFields)
+			logQueue = append(logQueue, sessionFields)
+		}
+	}
+
+	// Remote server list download stats
+	// Older clients may not submit this data
+
+	if statusData["remote_server_list_stats"] != nil {
+
+		remoteServerListStats, err := getJSONObjectArrayRequestParam(statusData, "remote_server_list_stats")
+		if err != nil {
+			return nil, common.ContextError(err)
+		}
+		for _, remoteServerListStat := range remoteServerListStats {
+
+			remoteServerListFields := getRequestLogFields(
+				support, "remote_server_list", geoIPData, params, statusRequestParams)
+
+			clientDownloadTimestamp, err := getStringRequestParam(remoteServerListStat, "client_download_timestamp")
+			if err != nil {
+				return nil, common.ContextError(err)
+			}
+			remoteServerListFields["client_download_timestamp"] = clientDownloadTimestamp
+
+			url, err := getStringRequestParam(remoteServerListStat, "url")
+			if err != nil {
+				return nil, common.ContextError(err)
+			}
+			remoteServerListFields["url"] = url
+
+			etag, err := getStringRequestParam(remoteServerListStat, "etag")
+			if err != nil {
+				return nil, common.ContextError(err)
+			}
+			remoteServerListFields["etag"] = etag
+
+			logQueue = append(logQueue, remoteServerListFields)
 		}
 	}
 
+	for _, logItem := range logQueue {
+		log.LogRawFieldsWithTimestamp(logItem)
+	}
+
 	return make([]byte, 0), nil
 }
 
@@ -773,7 +828,7 @@ func isClientPlatform(_ *SupportServices, value string) bool {
 }
 
 func isRelayProtocol(_ *SupportServices, value string) bool {
-	return common.Contains(common.SupportedTunnelProtocols, value)
+	return common.Contains(protocol.SupportedTunnelProtocols, value)
 }
 
 func isBooleanFlag(_ *SupportServices, value string) bool {
@@ -855,7 +910,7 @@ func isHostHeader(support *SupportServices, value string) bool {
 }
 
 func isServerEntrySource(_ *SupportServices, value string) bool {
-	return common.Contains(common.SupportedServerEntrySources, value)
+	return common.Contains(protocol.SupportedServerEntrySources, value)
 }
 
 var isISO8601DateRegex = regexp.MustCompile(

+ 25 - 22
psiphon/server/config.go

@@ -34,8 +34,8 @@ import (
 
 	"github.com/Psiphon-Inc/crypto/nacl/box"
 	"github.com/Psiphon-Inc/crypto/ssh"
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 const (
@@ -67,7 +67,7 @@ type Config struct {
 	// used to determine a unique discovery strategy.
 	DiscoveryValueHMACKey string
 
-	// GeoIPDatabaseFilenames ares paths of GeoIP2/GeoLite2
+	// GeoIPDatabaseFilenames are paths of GeoIP2/GeoLite2
 	// MaxMind database files. When empty, no GeoIP lookups are
 	// performed. Each file is queried, in order, for the
 	// logged fields: country code, city, and ISP. Multiple
@@ -221,10 +221,13 @@ type Config struct {
 	// CPU profiling. For the default, 0, no CPU profile is taken.
 	ProcessCPUProfileDurationSeconds int
 
-	// TrafficRulesFilename is the path of a file containing a
-	// JSON-encoded TrafficRulesSet, the traffic rules to apply to
-	// Psiphon client tunnels.
+	// TrafficRulesFilename is the path of a file containing a JSON-encoded
+	// TrafficRulesSet, the traffic rules to apply to Psiphon client tunnels.
 	TrafficRulesFilename string
+
+	// OSLConfigFilename is the path of a file containing a JSON-encoded
+	// OSL Config, the OSL schemes to apply to Psiphon client tunnels.
+	OSLConfigFilename string
 }
 
 // RunWebServer indicates whether to run a web server component.
@@ -276,11 +279,11 @@ func LoadConfig(configJSON []byte) (*Config, error) {
 	}
 
 	for tunnelProtocol, _ := range config.TunnelProtocolPorts {
-		if !common.Contains(common.SupportedTunnelProtocols, tunnelProtocol) {
+		if !common.Contains(protocol.SupportedTunnelProtocols, tunnelProtocol) {
 			return nil, fmt.Errorf("Unsupported tunnel protocol: %s", tunnelProtocol)
 		}
-		if common.TunnelProtocolUsesSSH(tunnelProtocol) ||
-			common.TunnelProtocolUsesObfuscatedSSH(tunnelProtocol) {
+		if protocol.TunnelProtocolUsesSSH(tunnelProtocol) ||
+			protocol.TunnelProtocolUsesObfuscatedSSH(tunnelProtocol) {
 			if config.SSHPrivateKey == "" || config.SSHServerVersion == "" ||
 				config.SSHUserName == "" || config.SSHPassword == "" {
 				return nil, fmt.Errorf(
@@ -288,22 +291,22 @@ func LoadConfig(configJSON []byte) (*Config, error) {
 					tunnelProtocol)
 			}
 		}
-		if common.TunnelProtocolUsesObfuscatedSSH(tunnelProtocol) {
+		if protocol.TunnelProtocolUsesObfuscatedSSH(tunnelProtocol) {
 			if config.ObfuscatedSSHKey == "" {
 				return nil, fmt.Errorf(
 					"Tunnel protocol %s requires ObfuscatedSSHKey",
 					tunnelProtocol)
 			}
 		}
-		if common.TunnelProtocolUsesMeekHTTP(tunnelProtocol) ||
-			common.TunnelProtocolUsesMeekHTTPS(tunnelProtocol) {
+		if protocol.TunnelProtocolUsesMeekHTTP(tunnelProtocol) ||
+			protocol.TunnelProtocolUsesMeekHTTPS(tunnelProtocol) {
 			if config.MeekCookieEncryptionPrivateKey == "" || config.MeekObfuscatedKey == "" {
 				return nil, fmt.Errorf(
 					"Tunnel protocol %s requires MeekCookieEncryptionPrivateKey, MeekObfuscatedKey",
 					tunnelProtocol)
 			}
 		}
-		if common.TunnelProtocolUsesMeekHTTPS(tunnelProtocol) {
+		if protocol.TunnelProtocolUsesMeekHTTPS(tunnelProtocol) {
 			if config.MeekCertificateCommonName == "" {
 				return nil, fmt.Errorf(
 					"Tunnel protocol %s requires MeekCertificateCommonName",
@@ -382,9 +385,9 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, error
 
 	usingMeek := false
 
-	for protocol, port := range params.TunnelProtocolPorts {
+	for tunnelProtocol, port := range params.TunnelProtocolPorts {
 
-		if !common.Contains(common.SupportedTunnelProtocols, protocol) {
+		if !common.Contains(protocol.SupportedTunnelProtocols, tunnelProtocol) {
 			return nil, nil, nil, common.ContextError(errors.New("invalid tunnel protocol"))
 		}
 
@@ -393,8 +396,8 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, error
 		}
 		usedPort[port] = true
 
-		if common.TunnelProtocolUsesMeekHTTP(protocol) ||
-			common.TunnelProtocolUsesMeekHTTPS(protocol) {
+		if protocol.TunnelProtocolUsesMeekHTTP(tunnelProtocol) ||
+			protocol.TunnelProtocolUsesMeekHTTPS(tunnelProtocol) {
 			usingMeek = true
 		}
 	}
@@ -559,15 +562,15 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, error
 	capabilities := []string{}
 
 	if params.EnableSSHAPIRequests {
-		capabilities = append(capabilities, common.CAPABILITY_SSH_API_REQUESTS)
+		capabilities = append(capabilities, protocol.CAPABILITY_SSH_API_REQUESTS)
 	}
 
 	if params.WebServerPort != 0 {
-		capabilities = append(capabilities, common.CAPABILITY_UNTUNNELED_WEB_API_REQUESTS)
+		capabilities = append(capabilities, protocol.CAPABILITY_UNTUNNELED_WEB_API_REQUESTS)
 	}
 
-	for protocol, _ := range params.TunnelProtocolPorts {
-		capabilities = append(capabilities, psiphon.GetCapability(protocol))
+	for tunnelProtocol, _ := range params.TunnelProtocolPorts {
+		capabilities = append(capabilities, protocol.GetCapability(tunnelProtocol))
 	}
 
 	sshPort := params.TunnelProtocolPorts["SSH"]
@@ -596,7 +599,7 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, error
 		strippedWebServerCertificate = strings.Join(lines[1:len(lines)-2], "")
 	}
 
-	serverEntry := &psiphon.ServerEntry{
+	serverEntry := &protocol.ServerEntry{
 		IpAddress:                     params.ServerIPAddress,
 		WebServerPort:                 serverEntryWebServerPort,
 		WebServerSecret:               webServerSecret,
@@ -617,7 +620,7 @@ func GenerateConfig(params *GenerateConfigParams) ([]byte, []byte, []byte, error
 		MeekFrontingDisableSNI:        false,
 	}
 
-	encodedServerEntry, err := psiphon.EncodeServerEntry(serverEntry)
+	encodedServerEntry, err := protocol.EncodeServerEntry(serverEntry)
 	if err != nil {
 		return nil, nil, nil, common.ContextError(err)
 	}

+ 8 - 10
psiphon/server/dns.go

@@ -21,9 +21,9 @@ package server
 
 import (
 	"bufio"
+	"bytes"
 	"errors"
 	"net"
-	"os"
 	"strings"
 	"sync/atomic"
 	"time"
@@ -77,9 +77,9 @@ func NewDNSResolver(defaultResolver string) (*DNSResolver, error) {
 
 	dns.ReloadableFile = common.NewReloadableFile(
 		DNS_SYSTEM_CONFIG_FILENAME,
-		func(filename string) error {
+		func(fileContent []byte) error {
 
-			resolver, err := parseResolveConf(filename)
+			resolver, err := parseResolveConf(fileContent)
 			if err != nil {
 				// On error, state remains the same
 				return common.ContextError(err)
@@ -161,14 +161,10 @@ func (dns *DNSResolver) Get() net.IP {
 	return dns.resolver
 }
 
-func parseResolveConf(filename string) (net.IP, error) {
-	file, err := os.Open(filename)
-	if err != nil {
-		return nil, common.ContextError(err)
-	}
-	defer file.Close()
+func parseResolveConf(fileContent []byte) (net.IP, error) {
+
+	scanner := bufio.NewScanner(bytes.NewReader(fileContent))
 
-	scanner := bufio.NewScanner(file)
 	for scanner.Scan() {
 		line := scanner.Text()
 		if strings.HasPrefix(line, ";") || strings.HasPrefix(line, "#") {
@@ -182,9 +178,11 @@ func parseResolveConf(filename string) (net.IP, error) {
 			return parseResolver(fields[1])
 		}
 	}
+
 	if err := scanner.Err(); err != nil {
 		return nil, common.ContextError(err)
 	}
+
 	return nil, common.ContextError(errors.New("nameserver not found"))
 }
 

+ 26 - 5
psiphon/server/geoip.go

@@ -89,8 +89,8 @@ func NewGeoIPService(
 		database := &geoIPDatabase{}
 		database.ReloadableFile = common.NewReloadableFile(
 			filename,
-			func(filename string) error {
-				maxMindReader, err := maxminddb.Open(filename)
+			func(fileContent []byte) error {
+				maxMindReader, err := maxminddb.FromBytes(fileContent)
 				if err != nil {
 					// On error, database state remains the same
 					return common.ContextError(err)
@@ -175,12 +175,33 @@ func (geoIP *GeoIPService) Lookup(ipAddress string) GeoIPData {
 	return result
 }
 
+// SetSessionCache adds the sessionID/geoIPData pair to the
+// session cache. This value will not expire; the caller must
+// call MarkSessionCacheToExpire to initiate expiry.
+// Calling SetSessionCache for an existing sessionID will
+// replace the previous value and reset any expiry.
 func (geoIP *GeoIPService) SetSessionCache(sessionID string, geoIPData GeoIPData) {
-	geoIP.sessionCache.Set(sessionID, geoIPData, cache.DefaultExpiration)
+	geoIP.sessionCache.Set(sessionID, geoIPData, cache.NoExpiration)
 }
 
-func (geoIP *GeoIPService) GetSessionCache(
-	sessionID string) GeoIPData {
+// MarkSessionCacheToExpire initiates expiry for an existing
+// session cache entry, if the session ID is found in the cache.
+// Concurrency note: SetSessionCache and MarkSessionCacheToExpire
+// should not be called concurrently for a single session ID.
+func (geoIP *GeoIPService) MarkSessionCacheToExpire(sessionID string) {
+	geoIPData, found := geoIP.sessionCache.Get(sessionID)
+	// Note: potential race condition between Get and Set. In practice,
+	// the tunnel server won't clobber a SetSessionCache value by calling
+	// MarkSessionCacheToExpire concurrently.
+	if found {
+		geoIP.sessionCache.Set(sessionID, geoIPData, cache.DefaultExpiration)
+	}
+}
+
+// GetSessionCache returns the cached GeoIPData for the
+// specified session ID; a blank GeoIPData is returned
+// if the session ID is not found in the cache.
+func (geoIP *GeoIPService) GetSessionCache(sessionID string) GeoIPData {
 	geoIPData, found := geoIP.sessionCache.Get(sessionID)
 	if !found {
 		return NewGeoIPData()

+ 7 - 6
psiphon/server/meek.go

@@ -35,7 +35,6 @@ import (
 
 	"github.com/Psiphon-Inc/crypto/nacl/box"
 	"github.com/Psiphon-Inc/goarista/monotime"
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 )
 
@@ -329,7 +328,9 @@ func (server *MeekServer) getSession(
 				// list of IPs (each proxy in a chain). The first IP should be
 				// the client IP.
 				proxyClientIP := strings.Split(value, ",")[0]
-				if net.ParseIP(proxyClientIP) != nil {
+				if net.ParseIP(proxyClientIP) != nil &&
+					server.support.GeoIPService.Lookup(proxyClientIP).Country != GEOIP_UNKNOWN_VALUE {
+
 					clientIP = proxyClientIP
 					break
 				}
@@ -529,9 +530,9 @@ func getMeekCookiePayload(support *SupportServices, cookieValue string) ([]byte,
 
 	reader := bytes.NewReader(decodedValue[:])
 
-	obfuscator, err := psiphon.NewServerObfuscator(
+	obfuscator, err := common.NewServerObfuscator(
 		reader,
-		&psiphon.ObfuscatorConfig{Keyword: support.Config.MeekObfuscatedKey})
+		&common.ObfuscatorConfig{Keyword: support.Config.MeekObfuscatedKey})
 	if err != nil {
 		return nil, common.ContextError(err)
 	}
@@ -709,8 +710,8 @@ func (conn *meekConn) pumpWrites(writer io.Writer) error {
 				return err
 			}
 
-			if conn.protocolVersion < MEEK_PROTOCOL_VERSION_2 {
-				// Protocol v1 clients expect at most
+			if conn.protocolVersion < MEEK_PROTOCOL_VERSION_1 {
+				// Pre-protocol version 1 clients expect at most
 				// MEEK_MAX_PAYLOAD_LENGTH response bodies
 				return nil
 			}

+ 21 - 27
psiphon/server/psinet/psinet.go

@@ -27,7 +27,6 @@ import (
 	"encoding/hex"
 	"encoding/json"
 	"fmt"
-	"io/ioutil"
 	"math"
 	"math/rand"
 	"strconv"
@@ -127,13 +126,8 @@ func NewDatabase(filename string) (*Database, error) {
 
 	database.ReloadableFile = common.NewReloadableFile(
 		filename,
-		func(filename string) error {
-			psinetJSON, err := ioutil.ReadFile(filename)
-			if err != nil {
-				// On error, state remains the same
-				return common.ContextError(err)
-			}
-			err = json.Unmarshal(psinetJSON, &database)
+		func(fileContent []byte) error {
+			err := json.Unmarshal(fileContent, &database)
 			if err != nil {
 				// On error, state remains the same
 				// (Unmarshal first validates the provided
@@ -157,9 +151,9 @@ func (db *Database) GetRandomHomepage(sponsorID, clientRegion string, isMobilePl
 	homepages := db.GetHomepages(sponsorID, clientRegion, isMobilePlatform)
 	if len(homepages) > 0 {
 		index := rand.Intn(len(homepages))
-		return homepages[index:index+1]
+		return homepages[index : index+1]
 	}
-	return nil
+	return homepages
 }
 
 // GetHomepages returns a list of home pages for the specified sponsor,
@@ -173,7 +167,7 @@ func (db *Database) GetHomepages(sponsorID, clientRegion string, isMobilePlatfor
 	// Sponsor id does not exist: fail gracefully
 	sponsor, ok := db.Sponsors[sponsorID]
 	if !ok {
-		return nil
+		return sponsorHomePages
 	}
 
 	homePages := sponsor.HomePages
@@ -408,21 +402,21 @@ func (db *Database) getEncodedServerEntry(server Server) string {
 
 	// Extended (new) entry fields are in a JSON string
 	var extendedConfig struct {
-		IpAddress                     string
-		WebServerPort                 string
-		WebServerSecret               string
-		WebServerCertificate          string
-		SshPort                       int
-		SshUsername                   string
-		SshPassword                   string
-		SshHostKey                    string
-		SshObfuscatedPort             int
-		SshObfuscatedKey              string
-		Region                        string
-		MeekCookieEncryptionPublicKey string
-		MeekObfuscatedKey             string
-		MeekServerPort                int
-		capabilities                  []string
+		IpAddress                     string   `json:"ipAddress"`
+		WebServerPort                 string   `json:"webServerPort"` // not an int
+		WebServerSecret               string   `json:"webServerSecret"`
+		WebServerCertificate          string   `json:"webServerCertificate"`
+		SshPort                       int      `json:"sshPort"`
+		SshUsername                   string   `json:"sshUsername"`
+		SshPassword                   string   `json:"sshPassword"`
+		SshHostKey                    string   `json:"sshHostKey"`
+		SshObfuscatedPort             int      `json:"sshObfuscatedPort"`
+		SshObfuscatedKey              string   `json:"sshObfuscatedKey"`
+		Capabilities                  []string `json:"capabilities"`
+		Region                        string   `json:"region"`
+		MeekServerPort                int      `json:"meekServerPort"`
+		MeekCookieEncryptionPublicKey string   `json:"meekCookieEncryptionPublicKey"`
+		MeekObfuscatedKey             string   `json:"meekObfuscatedKey"`
 	}
 
 	// NOTE: also putting original values in extended config for easier parsing by new clients
@@ -476,7 +470,7 @@ func (db *Database) getEncodedServerEntry(server Server) string {
 
 	for capability, enabled := range serverCapabilities {
 		if enabled == true {
-			extendedConfig.capabilities = append(extendedConfig.capabilities, capability)
+			extendedConfig.Capabilities = append(extendedConfig.Capabilities, capability)
 		}
 	}
 

+ 199 - 40
psiphon/server/server_test.go

@@ -29,6 +29,7 @@ import (
 	"net/http"
 	"net/url"
 	"os"
+	"path/filepath"
 	"strconv"
 	"sync"
 	"syscall"
@@ -40,10 +41,25 @@ import (
 	"golang.org/x/net/proxy"
 )
 
+var testDataDirName string
+
 func TestMain(m *testing.M) {
 	flag.Parse()
-	os.Remove(psiphon.DATA_STORE_FILENAME)
+
+	var err error
+	testDataDirName, err = ioutil.TempDir("", "psiphon-server-test")
+	if err != nil {
+		fmt.Printf("TempDir failed: %s", err)
+		os.Exit(1)
+	}
+	defer os.RemoveAll(testDataDirName)
+
+	os.Remove(filepath.Join(testDataDirName, psiphon.DATA_STORE_FILENAME))
+
 	psiphon.SetEmitDiagnosticNotices(true)
+
+	CLIENT_VERIFICATION_REQUIRED = true
+
 	os.Exit(m.Run())
 }
 
@@ -57,6 +73,9 @@ func TestSSH(t *testing.T) {
 			enableSSHAPIRequests: true,
 			doHotReload:          false,
 			denyTrafficRules:     false,
+			doClientVerification: true,
+			doTunneledWebRequest: true,
+			doTunneledNTPRequest: true,
 		})
 }
 
@@ -67,6 +86,9 @@ func TestOSSH(t *testing.T) {
 			enableSSHAPIRequests: true,
 			doHotReload:          false,
 			denyTrafficRules:     false,
+			doClientVerification: false,
+			doTunneledWebRequest: true,
+			doTunneledNTPRequest: true,
 		})
 }
 
@@ -77,6 +99,9 @@ func TestUnfrontedMeek(t *testing.T) {
 			enableSSHAPIRequests: true,
 			doHotReload:          false,
 			denyTrafficRules:     false,
+			doClientVerification: false,
+			doTunneledWebRequest: true,
+			doTunneledNTPRequest: true,
 		})
 }
 
@@ -87,6 +112,9 @@ func TestUnfrontedMeekHTTPS(t *testing.T) {
 			enableSSHAPIRequests: true,
 			doHotReload:          false,
 			denyTrafficRules:     false,
+			doClientVerification: false,
+			doTunneledWebRequest: true,
+			doTunneledNTPRequest: true,
 		})
 }
 
@@ -97,6 +125,9 @@ func TestWebTransportAPIRequests(t *testing.T) {
 			enableSSHAPIRequests: false,
 			doHotReload:          false,
 			denyTrafficRules:     false,
+			doClientVerification: true,
+			doTunneledWebRequest: true,
+			doTunneledNTPRequest: true,
 		})
 }
 
@@ -107,6 +138,9 @@ func TestHotReload(t *testing.T) {
 			enableSSHAPIRequests: true,
 			doHotReload:          true,
 			denyTrafficRules:     false,
+			doClientVerification: false,
+			doTunneledWebRequest: true,
+			doTunneledNTPRequest: true,
 		})
 }
 
@@ -117,6 +151,35 @@ func TestDenyTrafficRules(t *testing.T) {
 			enableSSHAPIRequests: true,
 			doHotReload:          true,
 			denyTrafficRules:     true,
+			doClientVerification: false,
+			doTunneledWebRequest: true,
+			doTunneledNTPRequest: true,
+		})
+}
+
+func TestTCPOnlySLOK(t *testing.T) {
+	runServer(t,
+		&runServerConfig{
+			tunnelProtocol:       "OSSH",
+			enableSSHAPIRequests: true,
+			doHotReload:          false,
+			denyTrafficRules:     false,
+			doClientVerification: false,
+			doTunneledWebRequest: true,
+			doTunneledNTPRequest: false,
+		})
+}
+
+func TestUDPOnlySLOK(t *testing.T) {
+	runServer(t,
+		&runServerConfig{
+			tunnelProtocol:       "OSSH",
+			enableSSHAPIRequests: true,
+			doHotReload:          false,
+			denyTrafficRules:     false,
+			doClientVerification: false,
+			doTunneledWebRequest: false,
+			doTunneledNTPRequest: true,
 		})
 }
 
@@ -125,6 +188,9 @@ type runServerConfig struct {
 	enableSSHAPIRequests bool
 	doHotReload          bool
 	denyTrafficRules     bool
+	doClientVerification bool
+	doTunneledWebRequest bool
+	doTunneledNTPRequest bool
 }
 
 func sendNotificationReceived(c chan<- struct{}) {
@@ -155,7 +221,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 	var err error
 	serverIPaddress := ""
 	for _, interfaceName := range []string{"eth0", "en0"} {
-		serverIPaddress, err = psiphon.GetInterfaceIPAddress(interfaceName)
+		serverIPaddress, err = common.GetInterfaceIPAddress(interfaceName)
 		if err == nil {
 			break
 		}
@@ -178,25 +244,25 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 	// customize server config
 
 	// Pave psinet with random values to test handshake homepages.
-	psinetFilename := "psinet.json"
+	psinetFilename := filepath.Join(testDataDirName, "psinet.json")
 	sponsorID, expectedHomepageURL := pavePsinetDatabaseFile(t, psinetFilename)
 
 	// Pave traffic rules file which exercises handshake parameter filtering. Client
 	// must handshake with specified sponsor ID in order to allow ports for tunneled
 	// requests.
-	trafficRulesFilename := "traffic_rules.json"
+	trafficRulesFilename := filepath.Join(testDataDirName, "traffic_rules.json")
 	paveTrafficRulesFile(t, trafficRulesFilename, sponsorID, runConfig.denyTrafficRules)
 
-	var serverConfig interface{}
-	json.Unmarshal(serverConfigJSON, &serverConfig)
-	serverConfig.(map[string]interface{})["GeoIPDatabaseFilename"] = ""
-	serverConfig.(map[string]interface{})["PsinetDatabaseFilename"] = psinetFilename
-	serverConfig.(map[string]interface{})["TrafficRulesFilename"] = trafficRulesFilename
-	serverConfig.(map[string]interface{})["LogLevel"] = "debug"
+	oslConfigFilename := filepath.Join(testDataDirName, "osl_config.json")
+	propagationChannelID := paveOSLConfigFile(t, oslConfigFilename)
 
-	// 1 second is the minimum period; should be small enough to emit a log during the
-	// test run, but not guaranteed
-	serverConfig.(map[string]interface{})["LoadMonitorPeriodSeconds"] = 1
+	var serverConfig map[string]interface{}
+	json.Unmarshal(serverConfigJSON, &serverConfig)
+	serverConfig["GeoIPDatabaseFilename"] = ""
+	serverConfig["PsinetDatabaseFilename"] = psinetFilename
+	serverConfig["TrafficRulesFilename"] = trafficRulesFilename
+	serverConfig["OSLConfigFilename"] = oslConfigFilename
+	serverConfig["LogLevel"] = "error"
 
 	serverConfigJSON, _ = json.Marshal(serverConfig)
 
@@ -234,11 +300,12 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 		}
 	}()
 
+	// TODO: monitor logs for more robust wait-until-loaded
+	time.Sleep(1 * time.Second)
+
 	// Test: hot reload (of psinet and traffic rules)
 
 	if runConfig.doHotReload {
-		// TODO: monitor logs for more robust wait-until-loaded
-		time.Sleep(1 * time.Second)
 
 		// Pave a new psinet and traffic rules with different random values.
 		sponsorID, expectedHomepageURL = pavePsinetDatabaseFile(t, psinetFilename)
@@ -255,6 +322,10 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 		// handler below.
 	}
 
+	// Exercise server_load logging
+	p, _ := os.FindProcess(os.Getpid())
+	p.Signal(syscall.SIGUSR2)
+
 	// connect to server with client
 
 	// TODO: currently, TargetServerEntry only works with one tunnel
@@ -266,23 +337,30 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 	// Note: calling LoadConfig ensures all *int config fields are initialized
 	clientConfigJSON := `
     {
-        "ClientPlatform" : "Android",
+        "ClientPlatform" : "Windows",
         "ClientVersion" : "0",
         "SponsorId" : "0",
-        "PropagationChannelId" : "0"
+        "PropagationChannelId" : "0",
+        "DisableRemoteServerListFetcher" : true
     }`
 	clientConfig, _ := psiphon.LoadConfig([]byte(clientConfigJSON))
 
 	clientConfig.SponsorId = sponsorID
+	clientConfig.PropagationChannelId = propagationChannelID
 	clientConfig.ConnectionWorkerPoolSize = numTunnels
 	clientConfig.TunnelPoolSize = numTunnels
-	clientConfig.DisableRemoteServerListFetcher = true
 	clientConfig.EstablishTunnelPausePeriodSeconds = &establishTunnelPausePeriodSeconds
 	clientConfig.TargetServerEntry = string(encodedServerEntry)
 	clientConfig.TunnelProtocol = runConfig.tunnelProtocol
 	clientConfig.LocalSocksProxyPort = localSOCKSProxyPort
 	clientConfig.LocalHttpProxyPort = localHTTPProxyPort
+	clientConfig.EmitSLOKs = true
+
+	if runConfig.doClientVerification {
+		clientConfig.ClientPlatform = "Android"
+	}
 
+	clientConfig.DataStoreDirectory = testDataDirName
 	err = psiphon.InitDataStore(clientConfig)
 	if err != nil {
 		t.Fatalf("error initializing client datastore: %s", err)
@@ -295,6 +373,7 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 
 	tunnelsEstablished := make(chan struct{}, 1)
 	homepageReceived := make(chan struct{}, 1)
+	slokSeeded := make(chan struct{}, 1)
 	verificationRequired := make(chan struct{}, 1)
 	verificationCompleted := make(chan struct{}, 1)
 
@@ -324,6 +403,8 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 					t.Fatalf("unexpected homepage: %s", homepageURL)
 				}
 				sendNotificationReceived(homepageReceived)
+			case "SLOKSeeded":
+				sendNotificationReceived(slokSeeded)
 			case "ClientVerificationRequired":
 				sendNotificationReceived(verificationRequired)
 				controller.SetClientVerificationPayloadForActiveTunnels(dummyClientVerificationPayload)
@@ -369,38 +450,54 @@ func runServer(t *testing.T, runConfig *runServerConfig) {
 
 	waitOnNotification(t, tunnelsEstablished, timeoutSignal, "tunnel establish timeout exceeded")
 	waitOnNotification(t, homepageReceived, timeoutSignal, "homepage received timeout exceeded")
-	waitOnNotification(t, verificationRequired, timeoutSignal, "verification required timeout exceeded")
-	waitOnNotification(t, verificationCompleted, timeoutSignal, "verification completed timeout exceeded")
 
-	// Test: tunneled web site fetch
+	if runConfig.doClientVerification {
+		waitOnNotification(t, verificationRequired, timeoutSignal, "verification required timeout exceeded")
+		waitOnNotification(t, verificationCompleted, timeoutSignal, "verification completed timeout exceeded")
+	}
 
-	err = makeTunneledWebRequest(t, localHTTPProxyPort)
+	if runConfig.doTunneledWebRequest {
 
-	if err == nil {
-		if runConfig.denyTrafficRules {
-			t.Fatalf("unexpected tunneled web request success")
-		}
-	} else {
-		if !runConfig.denyTrafficRules {
-			t.Fatalf("tunneled web request failed: %s", err)
+		// Test: tunneled web site fetch
+
+		err = makeTunneledWebRequest(t, localHTTPProxyPort)
+
+		if err == nil {
+			if runConfig.denyTrafficRules {
+				t.Fatalf("unexpected tunneled web request success")
+			}
+		} else {
+			if !runConfig.denyTrafficRules {
+				t.Fatalf("tunneled web request failed: %s", err)
+			}
 		}
 	}
 
-	// Test: tunneled UDP packets
+	if runConfig.doTunneledNTPRequest {
 
-	udpgwServerAddress := serverConfig.(map[string]interface{})["UDPInterceptUdpgwServerAddress"].(string)
+		// Test: tunneled UDP packets
 
-	err = makeTunneledNTPRequest(t, localSOCKSProxyPort, udpgwServerAddress)
+		udpgwServerAddress := serverConfig["UDPInterceptUdpgwServerAddress"].(string)
 
-	if err == nil {
-		if runConfig.denyTrafficRules {
-			t.Fatalf("unexpected tunneled NTP request success")
-		}
-	} else {
-		if !runConfig.denyTrafficRules {
-			t.Fatalf("tunneled NTP request failed: %s", err)
+		err = makeTunneledNTPRequest(t, localSOCKSProxyPort, udpgwServerAddress)
+
+		if err == nil {
+			if runConfig.denyTrafficRules {
+				t.Fatalf("unexpected tunneled NTP request success")
+			}
+		} else {
+			if !runConfig.denyTrafficRules {
+				t.Fatalf("tunneled NTP request failed: %s", err)
+			}
 		}
 	}
+
+	// Test: await SLOK payload
+
+	if !runConfig.denyTrafficRules {
+		time.Sleep(1 * time.Second)
+		waitOnNotification(t, slokSeeded, timeoutSignal, "SLOK seeded timeout exceeded")
+	}
 }
 
 func makeTunneledWebRequest(t *testing.T, localHTTPProxyPort int) error {
@@ -437,7 +534,7 @@ func makeTunneledWebRequest(t *testing.T, localHTTPProxyPort int) error {
 func makeTunneledNTPRequest(t *testing.T, localSOCKSProxyPort int, udpgwServerAddress string) error {
 
 	testHostname := "pool.ntp.org"
-	timeout := 10 * time.Second
+	timeout := 20 * time.Second
 
 	localUDPProxyAddress, err := net.ResolveUDPAddr("udp", "127.0.0.1:7301")
 	if err != nil {
@@ -693,3 +790,65 @@ func paveTrafficRulesFile(t *testing.T, trafficRulesFilename, sponsorID string,
 		t.Fatalf("error paving traffic rules file: %s", err)
 	}
 }
+
+func paveOSLConfigFile(t *testing.T, oslConfigFilename string) string {
+
+	oslConfigJSONFormat := `
+    {
+      "Schemes" : [
+        {
+          "Epoch" : "%s",
+          "Regions" : [],
+          "PropagationChannelIDs" : ["%s"],
+          "MasterKey" : "wFuSbqU/pJ/35vRmoM8T9ys1PgDa8uzJps1Y+FNKa5U=",
+          "SeedSpecs" : [
+            {
+              "ID" : "IXHWfVgWFkEKvgqsjmnJuN3FpaGuCzQMETya+DSQvsk=",
+              "UpstreamSubnets" : ["0.0.0.0/0"],
+              "Targets" :
+              {
+                  "BytesRead" : 1,
+                  "BytesWritten" : 1,
+                  "PortForwardDurationNanoseconds" : 1
+              }
+            },
+            {
+              "ID" : "qvpIcORLE2Pi5TZmqRtVkEp+OKov0MhfsYPLNV7FYtI=",
+              "UpstreamSubnets" : ["0.0.0.0/0"],
+              "Targets" :
+              {
+                  "BytesRead" : 1,
+                  "BytesWritten" : 1,
+                  "PortForwardDurationNanoseconds" : 1
+              }
+            }
+          ],
+          "SeedSpecThreshold" : 2,
+          "SeedPeriodNanoseconds" : 10000000000,
+          "SeedPeriodKeySplits": [
+            {
+              "Total": 2,
+              "Threshold": 2
+            }
+          ]
+        }
+      ]
+    }
+    `
+
+	propagationChannelID, _ := common.MakeRandomStringHex(8)
+
+	now := time.Now().UTC()
+	epoch := now.Truncate(10 * time.Second)
+	epochStr := epoch.Format(time.RFC3339Nano)
+
+	oslConfigJSON := fmt.Sprintf(
+		oslConfigJSONFormat, epochStr, propagationChannelID)
+
+	err := ioutil.WriteFile(oslConfigFilename, []byte(oslConfigJSON), 0600)
+	if err != nil {
+		t.Fatalf("error paving osl config file: %s", err)
+	}
+
+	return propagationChannelID
+}

+ 28 - 7
psiphon/server/services.go

@@ -35,6 +35,7 @@ import (
 	"time"
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/server/psinet"
 )
 
@@ -166,9 +167,6 @@ loop:
 
 		case <-reloadSupportServicesSignal:
 			supportServices.Reload()
-			// Reset traffic rules for established clients to reflect reloaded config
-			// TODO: only update when traffic rules config has changed
-			tunnelServer.ResetAllClientTrafficRules()
 
 		case <-logServerLoadSignal:
 			// Signal profiles writes first to ensure some diagnostics are
@@ -318,6 +316,7 @@ func logServerLoad(server *TunnelServer) {
 type SupportServices struct {
 	Config          *Config
 	TrafficRulesSet *TrafficRulesSet
+	OSLConfig       *osl.Config
 	PsinetDatabase  *psinet.Database
 	GeoIPService    *GeoIPService
 	DNSResolver     *DNSResolver
@@ -326,11 +325,17 @@ type SupportServices struct {
 
 // NewSupportServices initializes a new SupportServices.
 func NewSupportServices(config *Config) (*SupportServices, error) {
+
 	trafficRulesSet, err := NewTrafficRulesSet(config.TrafficRulesFilename)
 	if err != nil {
 		return nil, common.ContextError(err)
 	}
 
+	oslConfig, err := osl.NewConfig(config.OSLConfigFilename)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
 	psinetDatabase, err := psinet.NewDatabase(config.PsinetDatabaseFilename)
 	if err != nil {
 		return nil, common.ContextError(err)
@@ -350,6 +355,7 @@ func NewSupportServices(config *Config) (*SupportServices, error) {
 	return &SupportServices{
 		Config:          config,
 		TrafficRulesSet: trafficRulesSet,
+		OSLConfig:       oslConfig,
 		PsinetDatabase:  psinetDatabase,
 		GeoIPService:    geoIPService,
 		DNSResolver:     dnsResolver,
@@ -359,15 +365,23 @@ func NewSupportServices(config *Config) (*SupportServices, error) {
 // Reload reinitializes traffic rules, psinet database, and geo IP database
 // components. If any component fails to reload, an error is logged and
 // Reload proceeds, using the previous state of the component.
-//
-// Limitation: reload of traffic rules currently doesn't apply to existing,
-// established clients.
 func (support *SupportServices) Reload() {
 
 	reloaders := append(
-		[]common.Reloader{support.TrafficRulesSet, support.PsinetDatabase},
+		[]common.Reloader{
+			support.TrafficRulesSet,
+			support.OSLConfig,
+			support.PsinetDatabase},
 		support.GeoIPService.Reloaders()...)
 
+	// Take these actions only after the corresponding Reloader has reloaded.
+	// In both the traffic rules and OSL cases, there is some impact from state
+	// reset, so the reset should be avoided where possible.
+	reloadPostActions := map[common.Reloader]func(){
+		support.TrafficRulesSet: func() { support.TunnelServer.ResetAllClientTrafficRules() },
+		support.OSLConfig:       func() { support.TunnelServer.ResetAllClientOSLConfigs() },
+	}
+
 	for _, reloader := range reloaders {
 
 		if !reloader.WillReload() {
@@ -377,6 +391,13 @@ func (support *SupportServices) Reload() {
 
 		// "reloaded" flag indicates if file was actually reloaded or ignored
 		reloaded, err := reloader.Reload()
+
+		if reloaded {
+			if action, ok := reloadPostActions[reloader]; ok {
+				action()
+			}
+		}
+
 		if err != nil {
 			log.WithContextFields(
 				LogFields{

+ 2 - 8
psiphon/server/trafficRules.go

@@ -22,7 +22,6 @@ package server
 import (
 	"encoding/json"
 	"fmt"
-	"io/ioutil"
 	"net"
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
@@ -171,14 +170,9 @@ func NewTrafficRulesSet(filename string) (*TrafficRulesSet, error) {
 
 	set.ReloadableFile = common.NewReloadableFile(
 		filename,
-		func(filename string) error {
-			configJSON, err := ioutil.ReadFile(filename)
-			if err != nil {
-				// On error, state remains the same
-				return common.ContextError(err)
-			}
+		func(fileContent []byte) error {
 			var newSet TrafficRulesSet
-			err = json.Unmarshal(configJSON, &newSet)
+			err := json.Unmarshal(fileContent, &newSet)
 			if err != nil {
 				return common.ContextError(err)
 			}

+ 382 - 135
psiphon/server/tunnelServer.go

@@ -33,22 +33,21 @@ import (
 
 	"github.com/Psiphon-Inc/crypto/ssh"
 	"github.com/Psiphon-Inc/goarista/monotime"
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/osl"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 const (
-	SSH_HANDSHAKE_TIMEOUT                 = 30 * time.Second
-	SSH_CONNECTION_READ_DEADLINE          = 5 * time.Minute
-	SSH_TCP_PORT_FORWARD_DIAL_TIMEOUT     = 30 * time.Second
-	SSH_TCP_PORT_FORWARD_COPY_BUFFER_SIZE = 8192
+	SSH_HANDSHAKE_TIMEOUT                  = 30 * time.Second
+	SSH_CONNECTION_READ_DEADLINE           = 5 * time.Minute
+	SSH_TCP_PORT_FORWARD_IP_LOOKUP_TIMEOUT = 30 * time.Second
+	SSH_TCP_PORT_FORWARD_DIAL_TIMEOUT      = 30 * time.Second
+	SSH_TCP_PORT_FORWARD_COPY_BUFFER_SIZE  = 8192
+	SSH_SEND_OSL_INITIAL_RETRY_DELAY       = 30 * time.Second
+	SSH_SEND_OSL_RETRY_FACTOR              = 2
 )
 
-// Disallowed port forward hosts is a failsafe. The server should
-// be run on a host with correctly configured firewall rules, or
-// containerization, or both.
-var SSH_DISALLOWED_PORT_FORWARD_HOSTS = []string{"localhost", "127.0.0.1"}
-
 // TunnelServer is the main server that accepts Psiphon client
 // connections, via various obfuscation protocols, and provides
 // port forwarding (TCP and UDP) services to the Psiphon client.
@@ -194,11 +193,19 @@ func (server *TunnelServer) GetLoadStats() map[string]map[string]int64 {
 }
 
 // ResetAllClientTrafficRules resets all established client traffic rules
-// to use the latest server config and client state.
+// to use the latest config and client properties. Any existing traffic
+// rule state is lost, including throttling state.
 func (server *TunnelServer) ResetAllClientTrafficRules() {
 	server.sshServer.resetAllClientTrafficRules()
 }
 
+// ResetAllClientOSLConfigs resets all established client OSL state to use
+// the latest OSL config. Any existing OSL state is lost, including partial
+// progress towards SLOKs.
+func (server *TunnelServer) ResetAllClientOSLConfigs() {
+	server.sshServer.resetAllClientOSLConfigs()
+}
+
 // SetClientHandshakeState sets the handshake state -- that it completed and
 // what paramaters were passed -- in sshClient. This state is used for allowing
 // port forwards and for future traffic rule selection. SetClientHandshakeState
@@ -310,13 +317,13 @@ func (sshServer *sshServer) runListener(
 	// TunnelServer.Run will properly shut down instead of remaining
 	// running.
 
-	if common.TunnelProtocolUsesMeekHTTP(tunnelProtocol) ||
-		common.TunnelProtocolUsesMeekHTTPS(tunnelProtocol) {
+	if protocol.TunnelProtocolUsesMeekHTTP(tunnelProtocol) ||
+		protocol.TunnelProtocolUsesMeekHTTPS(tunnelProtocol) {
 
 		meekServer, err := NewMeekServer(
 			sshServer.support,
 			listener,
-			common.TunnelProtocolUsesMeekHTTPS(tunnelProtocol),
+			protocol.TunnelProtocolUsesMeekHTTPS(tunnelProtocol),
 			handleClient,
 			sshServer.shutdownBroadcast)
 		if err != nil {
@@ -482,17 +489,24 @@ func (sshServer *sshServer) getLoadStats() map[string]map[string]int64 {
 	// than futher down the stats stack. Also useful for glancing at log files.
 
 	allProtocolsStats := make(map[string]int64)
+	allProtocolsStats["accepted_clients"] = 0
+	allProtocolsStats["established_clients"] = 0
+	allProtocolsStats["tcp_port_forwards"] = 0
+	allProtocolsStats["total_tcp_port_forwards"] = 0
+	allProtocolsStats["udp_port_forwards"] = 0
+	allProtocolsStats["total_udp_port_forwards"] = 0
+	allProtocolsStats["tcp_port_forward_dialed_count"] = aggregatedQualityMetrics.tcpPortForwardDialedCount
+	allProtocolsStats["tcp_port_forward_dialed_duration"] = int64(aggregatedQualityMetrics.tcpPortForwardDialedDuration)
+	allProtocolsStats["tcp_port_forward_failed_count"] = aggregatedQualityMetrics.tcpPortForwardFailedCount
+	allProtocolsStats["tcp_port_forward_failed_duration"] = int64(aggregatedQualityMetrics.tcpPortForwardFailedDuration)
+
 	for _, stats := range loadStats {
 		for name, value := range stats {
 			allProtocolsStats[name] += value
 		}
 	}
-	loadStats["ALL"] = allProtocolsStats
 
-	loadStats["ALL"]["tcp_port_forward_dialed_count"] = aggregatedQualityMetrics.tcpPortForwardDialedCount
-	loadStats["ALL"]["tcp_port_forward_dialed_duration"] = int64(aggregatedQualityMetrics.tcpPortForwardDialedDuration)
-	loadStats["ALL"]["tcp_port_forward_failed_count"] = aggregatedQualityMetrics.tcpPortForwardFailedCount
-	loadStats["ALL"]["tcp_port_forward_failed_duration"] = int64(aggregatedQualityMetrics.tcpPortForwardFailedDuration)
+	loadStats["ALL"] = allProtocolsStats
 
 	return loadStats
 }
@@ -511,6 +525,20 @@ func (sshServer *sshServer) resetAllClientTrafficRules() {
 	}
 }
 
+func (sshServer *sshServer) resetAllClientOSLConfigs() {
+
+	sshServer.clientsMutex.Lock()
+	clients := make(map[string]*sshClient)
+	for sessionID, client := range sshServer.clients {
+		clients[sessionID] = client
+	}
+	sshServer.clientsMutex.Unlock()
+
+	for _, client := range clients {
+		client.setOSLConfig()
+	}
+}
+
 func (sshServer *sshServer) setClientHandshakeState(
 	sessionID string, state handshakeState) error {
 
@@ -527,8 +555,6 @@ func (sshServer *sshServer) setClientHandshakeState(
 		return common.ContextError(err)
 	}
 
-	client.setTrafficRules()
-
 	return nil
 }
 
@@ -555,6 +581,73 @@ func (sshServer *sshServer) handleClient(tunnelProtocol string, clientConn net.C
 
 	sshClient := newSshClient(sshServer, tunnelProtocol, geoIPData)
 
+	sshClient.run(clientConn)
+}
+
+type sshClient struct {
+	sync.Mutex
+	sshServer               *sshServer
+	tunnelProtocol          string
+	sshConn                 ssh.Conn
+	activityConn            *common.ActivityMonitoredConn
+	throttledConn           *common.ThrottledConn
+	geoIPData               GeoIPData
+	sessionID               string
+	supportsServerRequests  bool
+	handshakeState          handshakeState
+	udpChannel              ssh.Channel
+	trafficRules            TrafficRules
+	tcpTrafficState         trafficState
+	udpTrafficState         trafficState
+	qualityMetrics          qualityMetrics
+	channelHandlerWaitGroup *sync.WaitGroup
+	tcpPortForwardLRU       *common.LRUConns
+	oslClientSeedState      *osl.ClientSeedState
+	signalIssueSLOKs        chan struct{}
+	stopBroadcast           chan struct{}
+}
+
+type trafficState struct {
+	bytesUp                        int64
+	bytesDown                      int64
+	concurrentPortForwardCount     int64
+	peakConcurrentPortForwardCount int64
+	totalPortForwardCount          int64
+}
+
+// qualityMetrics records upstream TCP dial attempts and
+// elapsed time. Elapsed time includes the full TCP handshake
+// and, in aggregate, is a measure of the quality of the
+// upstream link. These stats are recorded by each sshClient
+// and then reported and reset in sshServer.getLoadStats().
+type qualityMetrics struct {
+	tcpPortForwardDialedCount    int64
+	tcpPortForwardDialedDuration time.Duration
+	tcpPortForwardFailedCount    int64
+	tcpPortForwardFailedDuration time.Duration
+}
+
+type handshakeState struct {
+	completed   bool
+	apiProtocol string
+	apiParams   requestJSONObject
+}
+
+func newSshClient(
+	sshServer *sshServer, tunnelProtocol string, geoIPData GeoIPData) *sshClient {
+	return &sshClient{
+		sshServer:               sshServer,
+		tunnelProtocol:          tunnelProtocol,
+		geoIPData:               geoIPData,
+		channelHandlerWaitGroup: new(sync.WaitGroup),
+		tcpPortForwardLRU:       common.NewLRUConns(),
+		signalIssueSLOKs:        make(chan struct{}, 1),
+		stopBroadcast:           make(chan struct{}),
+	}
+}
+
+func (sshClient *sshClient) run(clientConn net.Conn) {
+
 	// Set initial traffic rules, pre-handshake, based on currently known info.
 	sshClient.setTrafficRules()
 
@@ -569,6 +662,7 @@ func (sshServer *sshServer) handleClient(tunnelProtocol string, clientConn net.C
 		clientConn,
 		SSH_CONNECTION_READ_DEADLINE,
 		false,
+		nil,
 		nil)
 	if err != nil {
 		clientConn.Close()
@@ -607,21 +701,21 @@ func (sshServer *sshServer) handleClient(tunnelProtocol string, clientConn net.C
 		sshServerConfig := &ssh.ServerConfig{
 			PasswordCallback: sshClient.passwordCallback,
 			AuthLogCallback:  sshClient.authLogCallback,
-			ServerVersion:    sshServer.support.Config.SSHServerVersion,
+			ServerVersion:    sshClient.sshServer.support.Config.SSHServerVersion,
 		}
-		sshServerConfig.AddHostKey(sshServer.sshHostKey)
+		sshServerConfig.AddHostKey(sshClient.sshServer.sshHostKey)
 
 		result := &sshNewServerConnResult{}
 
 		// Wrap the connection in an SSH deobfuscator when required.
 
-		if common.TunnelProtocolUsesObfuscatedSSH(tunnelProtocol) {
+		if protocol.TunnelProtocolUsesObfuscatedSSH(sshClient.tunnelProtocol) {
 			// Note: NewObfuscatedSshConn blocks on network I/O
 			// TODO: ensure this won't block shutdown
-			conn, result.err = psiphon.NewObfuscatedSshConn(
-				psiphon.OBFUSCATION_CONN_MODE_SERVER,
+			conn, result.err = common.NewObfuscatedSshConn(
+				common.OBFUSCATION_CONN_MODE_SERVER,
 				conn,
-				sshServer.support.Config.ObfuscatedSSHKey)
+				sshClient.sshServer.support.Config.ObfuscatedSSHKey)
 			if result.err != nil {
 				result.err = common.ContextError(result.err)
 			}
@@ -639,7 +733,7 @@ func (sshServer *sshServer) handleClient(tunnelProtocol string, clientConn net.C
 	var result *sshNewServerConnResult
 	select {
 	case result = <-resultChannel:
-	case <-sshServer.shutdownBroadcast:
+	case <-sshClient.sshServer.shutdownBroadcast:
 		// Close() will interrupt an ongoing handshake
 		// TODO: wait for goroutine to exit before returning?
 		clientConn.Close()
@@ -661,86 +755,25 @@ func (sshServer *sshServer) handleClient(tunnelProtocol string, clientConn net.C
 	sshClient.throttledConn = throttledConn
 	sshClient.Unlock()
 
-	if !sshServer.registerEstablishedClient(sshClient) {
+	if !sshClient.sshServer.registerEstablishedClient(sshClient) {
 		clientConn.Close()
 		log.WithContext().Warning("register failed")
 		return
 	}
-	defer sshServer.unregisterEstablishedClient(sshClient.sessionID)
+	defer sshClient.sshServer.unregisterEstablishedClient(sshClient.sessionID)
 
-	sshClient.runClient(result.channels, result.requests)
+	sshClient.runTunnel(result.channels, result.requests)
 
-	// Note: sshServer.unregisterClient calls sshClient.Close(),
+	// Note: sshServer.unregisterEstablishedClient calls sshClient.stop(),
 	// which also closes underlying transport Conn.
 }
 
-type sshClient struct {
-	sync.Mutex
-	sshServer               *sshServer
-	tunnelProtocol          string
-	sshConn                 ssh.Conn
-	activityConn            *common.ActivityMonitoredConn
-	throttledConn           *common.ThrottledConn
-	geoIPData               GeoIPData
-	sessionID               string
-	handshakeState          handshakeState
-	udpChannel              ssh.Channel
-	trafficRules            TrafficRules
-	tcpTrafficState         trafficState
-	udpTrafficState         trafficState
-	qualityMetrics          qualityMetrics
-	channelHandlerWaitGroup *sync.WaitGroup
-	tcpPortForwardLRU       *common.LRUConns
-	stopBroadcast           chan struct{}
-}
-
-type trafficState struct {
-	bytesUp                        int64
-	bytesDown                      int64
-	concurrentPortForwardCount     int64
-	peakConcurrentPortForwardCount int64
-	totalPortForwardCount          int64
-}
-
-// qualityMetrics records upstream TCP dial attempts and
-// elapsed time. Elapsed time includes the full TCP handshake
-// and, in aggregate, is a measure of the quality of the
-// upstream link. These stats are recorded by each sshClient
-// and then reported and reset in sshServer.getLoadStats().
-type qualityMetrics struct {
-	tcpPortForwardDialedCount    int64
-	tcpPortForwardDialedDuration time.Duration
-	tcpPortForwardFailedCount    int64
-	tcpPortForwardFailedDuration time.Duration
-}
-
-type handshakeState struct {
-	completed   bool
-	apiProtocol string
-	apiParams   requestJSONObject
-}
-
-func newSshClient(
-	sshServer *sshServer, tunnelProtocol string, geoIPData GeoIPData) *sshClient {
-	return &sshClient{
-		sshServer:               sshServer,
-		tunnelProtocol:          tunnelProtocol,
-		geoIPData:               geoIPData,
-		channelHandlerWaitGroup: new(sync.WaitGroup),
-		tcpPortForwardLRU:       common.NewLRUConns(),
-		stopBroadcast:           make(chan struct{}),
-	}
-}
-
 func (sshClient *sshClient) passwordCallback(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {
 
-	expectedSessionIDLength := 2 * common.PSIPHON_API_CLIENT_SESSION_ID_LENGTH
+	expectedSessionIDLength := 2 * protocol.PSIPHON_API_CLIENT_SESSION_ID_LENGTH
 	expectedSSHPasswordLength := 2 * SSH_PASSWORD_BYTE_LENGTH
 
-	var sshPasswordPayload struct {
-		SessionId   string `json:"SessionId"`
-		SshPassword string `json:"SshPassword"`
-	}
+	var sshPasswordPayload protocol.SSHPasswordPayload
 	err := json.Unmarshal(password, &sshPasswordPayload)
 	if err != nil {
 
@@ -773,15 +806,21 @@ func (sshClient *sshClient) passwordCallback(conn ssh.ConnMetadata, password []b
 
 	sessionID := sshPasswordPayload.SessionId
 
+	supportsServerRequests := common.Contains(
+		sshPasswordPayload.ClientCapabilities, protocol.CLIENT_CAPABILITY_SERVER_REQUESTS)
+
 	sshClient.Lock()
 	sshClient.sessionID = sessionID
+	sshClient.supportsServerRequests = supportsServerRequests
 	geoIPData := sshClient.geoIPData
 	sshClient.Unlock()
 
-	// Store the GeoIP data associated with the session ID. This makes the GeoIP data
-	// available to the web server for web transport Psiphon API requests. To allow for
-	// post-tunnel final status requests, the lifetime of cached GeoIP records exceeds
-	// the lifetime of the sshClient, and that's why this distinct session cache exists.
+	// Store the GeoIP data associated with the session ID. This makes
+	// the GeoIP data available to the web server for web API requests.
+	// A cache that's distinct from the sshClient record is used to allow
+	// for or post-tunnel final status requests.
+	// If the client is reconnecting with the same session ID, this call
+	// will undo the expiry set by MarkSessionCacheToExpire.
 	sshClient.sshServer.support.GeoIPService.SetSessionCache(sessionID, geoIPData)
 
 	return nil, nil
@@ -862,18 +901,28 @@ func (sshClient *sshClient) stop() {
 	logFields["peak_concurrent_port_forward_count_udp"] = sshClient.udpTrafficState.peakConcurrentPortForwardCount
 	logFields["total_port_forward_count_udp"] = sshClient.udpTrafficState.totalPortForwardCount
 
+	sessionID := sshClient.sessionID
+
 	sshClient.Unlock()
 
+	// Initiate cleanup of the GeoIP session cache. To allow for post-tunnel
+	// final status requests, the lifetime of cached GeoIP records exceeds the
+	// lifetime of the sshClient.
+	sshClient.sshServer.support.GeoIPService.MarkSessionCacheToExpire(sessionID)
+
 	log.LogRawFieldsWithTimestamp(logFields)
 }
 
-// runClient handles/dispatches new channel and new requests from the client.
+// runTunnel handles/dispatches new channel and new requests from the client.
 // When the SSH client connection closes, both the channels and requests channels
 // will close and runClient will exit.
-func (sshClient *sshClient) runClient(
+func (sshClient *sshClient) runTunnel(
 	channels <-chan ssh.NewChannel, requests <-chan *ssh.Request) {
 
+	stopBroadcast := make(chan struct{})
+
 	requestsWaitGroup := new(sync.WaitGroup)
+
 	requestsWaitGroup.Add(1)
 	go func() {
 		defer requestsWaitGroup.Done()
@@ -909,6 +958,14 @@ func (sshClient *sshClient) runClient(
 		}
 	}()
 
+	if sshClient.supportsServerRequests {
+		requestsWaitGroup.Add(1)
+		go func() {
+			defer requestsWaitGroup.Done()
+			sshClient.runOSLSender(stopBroadcast)
+		}()
+	}
+
 	for newChannel := range channels {
 
 		if newChannel.ChannelType() != "direct-tcpip" {
@@ -921,9 +978,83 @@ func (sshClient *sshClient) runClient(
 		go sshClient.handleNewPortForwardChannel(newChannel)
 	}
 
+	close(stopBroadcast)
+
 	requestsWaitGroup.Wait()
 }
 
+func (sshClient *sshClient) runOSLSender(stopBroadcast <-chan struct{}) {
+
+	for {
+		// Await a signal that there are SLOKs to send
+		// TODO: use reflect.SelectCase, and optionally await timer here?
+		select {
+		case <-sshClient.signalIssueSLOKs:
+		case <-stopBroadcast:
+			return
+		}
+
+		retryDelay := SSH_SEND_OSL_INITIAL_RETRY_DELAY
+		for {
+			err := sshClient.sendOSLRequest()
+			if err == nil {
+				break
+			}
+			log.WithContextFields(LogFields{"error": err}).Warning("sendOSLRequest failed")
+
+			// If the request failed, retry after a delay (with exponential backoff)
+			// or when signaled that there are additional SLOKs to send
+			retryTimer := time.NewTimer(retryDelay)
+			select {
+			case <-retryTimer.C:
+			case <-sshClient.signalIssueSLOKs:
+			case <-stopBroadcast:
+				retryTimer.Stop()
+				return
+			}
+			retryTimer.Stop()
+			retryDelay *= SSH_SEND_OSL_RETRY_FACTOR
+		}
+	}
+}
+
+// sendOSLRequest will invoke osl.GetSeedPayload to issue SLOKs and
+// generate a payload, and send an OSL request to the client when
+// there are new SLOKs in the payload.
+func (sshClient *sshClient) sendOSLRequest() error {
+
+	seedPayload := sshClient.getOSLSeedPayload()
+
+	// Don't send when no SLOKs. This will happen when signalIssueSLOKs
+	// is received but no new SLOKs are issued.
+	if len(seedPayload.SLOKs) == 0 {
+		return nil
+	}
+
+	oslRequest := protocol.OSLRequest{
+		SeedPayload: seedPayload,
+	}
+	requestPayload, err := json.Marshal(oslRequest)
+	if err != nil {
+		return common.ContextError(err)
+	}
+
+	ok, _, err := sshClient.sshConn.SendRequest(
+		protocol.PSIPHON_API_OSL_REQUEST_NAME,
+		true,
+		requestPayload)
+	if err != nil {
+		return common.ContextError(err)
+	}
+	if !ok {
+		return common.ContextError(errors.New("client rejected request"))
+	}
+
+	sshClient.clearOSLSeedPayload()
+
+	return nil
+}
+
 func (sshClient *sshClient) rejectNewChannel(newChannel ssh.NewChannel, reason ssh.RejectionReason, logMessage string) {
 
 	// Note: Debug level, as logMessage may contain user traffic destination address information
@@ -975,22 +1106,28 @@ func (sshClient *sshClient) handleNewPortForwardChannel(newChannel ssh.NewChanne
 // handshake parameters are included in the session summary log recorded in
 // sshClient.stop().
 func (sshClient *sshClient) setHandshakeState(state handshakeState) error {
+
 	sshClient.Lock()
-	defer sshClient.Unlock()
+	completed := sshClient.handshakeState.completed
+	if !completed {
+		sshClient.handshakeState = state
+	}
+	sshClient.Unlock()
 
 	// Client must only perform one handshake
-	if sshClient.handshakeState.completed {
+	if completed {
 		return common.ContextError(errors.New("handshake already completed"))
 	}
 
-	sshClient.handshakeState = state
+	sshClient.setTrafficRules()
+	sshClient.setOSLConfig()
 
 	return nil
 }
 
 // setTrafficRules resets the client's traffic rules based on the latest server config
-// and client state. As sshClient.trafficRules may be reset by a concurrent goroutine,
-// trafficRules must only be accessed within the sshClient mutex.
+// and client properties. As sshClient.trafficRules may be reset by a concurrent
+// goroutine, trafficRules must only be accessed within the sshClient mutex.
 func (sshClient *sshClient) setTrafficRules() {
 	sshClient.Lock()
 	defer sshClient.Unlock()
@@ -999,11 +1136,76 @@ func (sshClient *sshClient) setTrafficRules() {
 		sshClient.tunnelProtocol, sshClient.geoIPData, sshClient.handshakeState)
 
 	if sshClient.throttledConn != nil {
+		// Any existing throttling state is reset.
 		sshClient.throttledConn.SetLimits(
 			sshClient.trafficRules.RateLimits.CommonRateLimits())
 	}
 }
 
+// setOSLConfig resets the client's OSL seed state based on the latest OSL config
+// As sshClient.oslClientSeedState may be reset by a concurrent goroutine,
+// oslClientSeedState must only be accessed within the sshClient mutex.
+func (sshClient *sshClient) setOSLConfig() {
+	sshClient.Lock()
+	defer sshClient.Unlock()
+
+	propagationChannelID, err := getStringRequestParam(
+		sshClient.handshakeState.apiParams, "propagation_channel_id")
+	if err != nil {
+		// This should not fail as long as client has sent valid handshake
+		return
+	}
+
+	// Two limitations when setOSLConfig() is invoked due to an
+	// OSL config hot reload:
+	//
+	// 1. any partial progress towards SLOKs is lost.
+	//
+	// 2. all existing osl.ClientSeedPortForwards for existing
+	//    port forwards will not send progress to the new client
+	//    seed state.
+
+	sshClient.oslClientSeedState = sshClient.sshServer.support.OSLConfig.NewClientSeedState(
+		sshClient.geoIPData.Country,
+		propagationChannelID,
+		sshClient.signalIssueSLOKs)
+}
+
+// newClientSeedPortForward will return nil when no seeding is
+// associated with the specified ipAddress.
+func (sshClient *sshClient) newClientSeedPortForward(ipAddress net.IP) *osl.ClientSeedPortForward {
+	sshClient.Lock()
+	defer sshClient.Unlock()
+
+	// Will not be initialized before handshake.
+	if sshClient.oslClientSeedState == nil {
+		return nil
+	}
+
+	return sshClient.oslClientSeedState.NewClientSeedPortForward(ipAddress)
+}
+
+// getOSLSeedPayload returns a payload containing all seeded SLOKs for
+// this client's session.
+func (sshClient *sshClient) getOSLSeedPayload() *osl.SeedPayload {
+	sshClient.Lock()
+	defer sshClient.Unlock()
+
+	// Will not be initialized before handshake.
+	if sshClient.oslClientSeedState == nil {
+		return &osl.SeedPayload{SLOKs: make([]*osl.SLOK, 0)}
+	}
+
+	return sshClient.oslClientSeedState.GetSeedPayload()
+}
+
+func (sshClient *sshClient) clearOSLSeedPayload() {
+	sshClient.Lock()
+	defer sshClient.Unlock()
+
+	sshClient.oslClientSeedState.ClearSeedPayload()
+}
+
 func (sshClient *sshClient) rateLimits() common.RateLimits {
 	sshClient.Lock()
 	defer sshClient.Unlock()
@@ -1032,7 +1234,7 @@ const (
 )
 
 func (sshClient *sshClient) isPortForwardPermitted(
-	portForwardType int, host string, port int) bool {
+	portForwardType int, remoteIP net.IP, port int) bool {
 
 	sshClient.Lock()
 	defer sshClient.Unlock()
@@ -1041,7 +1243,9 @@ func (sshClient *sshClient) isPortForwardPermitted(
 		return false
 	}
 
-	if common.Contains(SSH_DISALLOWED_PORT_FORWARD_HOSTS, host) {
+	// Disallow connection to loopback. This is a failsafe. The server
+	// should be run on a host with correctly configured firewall rules.
+	if remoteIP.IsLoopback() {
 		return false
 	}
 
@@ -1065,17 +1269,11 @@ func (sshClient *sshClient) isPortForwardPermitted(
 		}
 	}
 
-	// TODO: AllowSubnets won't match when host is a domain.
-	// Callers should resolve domain host before checking
-	// isPortForwardPermitted.
-
-	if ip := net.ParseIP(host); ip != nil {
-		for _, subnet := range sshClient.trafficRules.AllowSubnets {
-			// Note: ignoring error as config has been validated
-			_, network, _ := net.ParseCIDR(subnet)
-			if network.Contains(ip) {
-				return true
-			}
+	for _, subnet := range sshClient.trafficRules.AllowSubnets {
+		// Note: ignoring error as config has been validated
+		_, network, _ := net.ParseCIDR(subnet)
+		if network.Contains(remoteIP) {
+			return true
 		}
 	}
 
@@ -1179,8 +1377,48 @@ func (sshClient *sshClient) handleTCPChannel(
 		}
 	}
 
-	if !isWebServerPortForward && !sshClient.isPortForwardPermitted(
-		portForwardTypeTCP, hostToConnect, portToConnect) {
+	type lookupIPResult struct {
+		IP  net.IP
+		err error
+	}
+	lookupResultChannel := make(chan *lookupIPResult, 1)
+
+	go func() {
+		// TODO: explicit timeout for DNS resolution?
+		IPs, err := net.LookupIP(hostToConnect)
+		// TODO: shuffle list to try other IPs
+		// TODO: IPv6 support
+		var IP net.IP
+		for _, ip := range IPs {
+			if ip.To4() != nil {
+				IP = ip
+			}
+		}
+		if err == nil && IP == nil {
+			err = errors.New("no IP address")
+		}
+		lookupResultChannel <- &lookupIPResult{IP, err}
+	}()
+
+	var lookupResult *lookupIPResult
+	select {
+	case lookupResult = <-lookupResultChannel:
+	case <-sshClient.stopBroadcast:
+		// Note: may leave LookupIP in progress
+		return
+	}
+
+	if lookupResult.err != nil {
+		sshClient.rejectNewChannel(
+			newChannel, ssh.ConnectionFailed, fmt.Sprintf("LookupIP failed: %s", lookupResult.err))
+		return
+	}
+
+	if !isWebServerPortForward &&
+		!sshClient.isPortForwardPermitted(
+			portForwardTypeTCP,
+			lookupResult.IP,
+			portToConnect) {
 
 		sshClient.rejectNewChannel(
 			newChannel, ssh.Prohibited, "port forward not permitted")
@@ -1239,46 +1477,47 @@ func (sshClient *sshClient) handleTCPChannel(
 	// Dial the target remote address. This is done in a goroutine to
 	// ensure the shutdown signal is handled immediately.
 
-	remoteAddr := fmt.Sprintf("%s:%d", hostToConnect, portToConnect)
+	remoteAddr := net.JoinHostPort(lookupResult.IP.String(), strconv.Itoa(portToConnect))
 
 	log.WithContextFields(LogFields{"remoteAddr": remoteAddr}).Debug("dialing")
 
-	type dialTcpResult struct {
+	type dialTCPResult struct {
 		conn net.Conn
 		err  error
 	}
+	dialResultChannel := make(chan *dialTCPResult, 1)
 
-	resultChannel := make(chan *dialTcpResult, 1)
 	dialStartTime := monotime.Now()
 
 	go func() {
 		// TODO: on EADDRNOTAVAIL, temporarily suspend new clients
-		// TODO: IPv6 support
 		conn, err := net.DialTimeout(
-			"tcp4", remoteAddr, SSH_TCP_PORT_FORWARD_DIAL_TIMEOUT)
-		resultChannel <- &dialTcpResult{conn, err}
+			"tcp", remoteAddr, SSH_TCP_PORT_FORWARD_DIAL_TIMEOUT)
+		dialResultChannel <- &dialTCPResult{conn, err}
 	}()
 
-	var result *dialTcpResult
+	var dialResult *dialTCPResult
 	select {
-	case result = <-resultChannel:
+	case dialResult = <-dialResultChannel:
 	case <-sshClient.stopBroadcast:
-		// Note: may leave dial in progress (TODO: use DialContext to cancel)
+		// Note: may leave Dial in progress
+		// TODO: use net.Dialer.DialContext to be able to cancel
 		return
 	}
 
 	sshClient.updateQualityMetrics(
-		result.err == nil, monotime.Since(dialStartTime))
+		dialResult.err == nil, monotime.Since(dialStartTime))
 
-	if result.err != nil {
-		sshClient.rejectNewChannel(newChannel, ssh.ConnectionFailed, result.err.Error())
+	if dialResult.err != nil {
+		sshClient.rejectNewChannel(
+			newChannel, ssh.ConnectionFailed, fmt.Sprintf("DialTimeout failed: %s", dialResult.err))
 		return
 	}
 
 	// The upstream TCP port forward connection has been established. Schedule
 	// some cleanup and notify the SSH client that the channel is accepted.
 
-	fwdConn := result.conn
+	fwdConn := dialResult.conn
 	defer fwdConn.Close()
 
 	fwdChannel, requests, err := newChannel.Accept()
@@ -1297,12 +1536,20 @@ func (sshClient *sshClient) handleTCPChannel(
 	lruEntry := sshClient.tcpPortForwardLRU.Add(fwdConn)
 	defer lruEntry.Remove()
 
+	// Ensure nil interface if newClientSeedPortForward returns nil
+	var updater common.ActivityUpdater
+	seedUpdater := sshClient.newClientSeedPortForward(lookupResult.IP)
+	if seedUpdater != nil {
+		updater = seedUpdater
+	}
+
 	fwdConn, err = common.NewActivityMonitoredConn(
 		fwdConn,
 		sshClient.idleTCPPortForwardTimeout(),
 		true,
+		updater,
 		lruEntry)
-	if result.err != nil {
+	if err != nil {
 		log.WithContextFields(LogFields{"error": err}).Error("NewActivityMonitoredConn failed")
 		return
 	}

+ 9 - 1
psiphon/server/udp.go

@@ -163,7 +163,7 @@ func (mux *udpPortForwardMultiplexer) run() {
 			}
 
 			if !mux.sshClient.isPortForwardPermitted(
-				portForwardTypeUDP, dialIP.String(), int(message.remotePort)) {
+				portForwardTypeUDP, dialIP, int(message.remotePort)) {
 				// The udpgw protocol has no error response, so
 				// we just discard the message and read another.
 				continue
@@ -211,10 +211,18 @@ func (mux *udpPortForwardMultiplexer) run() {
 
 			lruEntry := mux.portForwardLRU.Add(udpConn)
 
+			// Ensure nil interface if newClientSeedPortForward returns nil
+			var updater common.ActivityUpdater
+			seedUpdater := mux.sshClient.newClientSeedPortForward(dialIP)
+			if seedUpdater != nil {
+				updater = seedUpdater
+			}
+
 			conn, err := common.NewActivityMonitoredConn(
 				udpConn,
 				mux.sshClient.idleUDPPortForwardTimeout(),
 				true,
+				updater,
 				lruEntry)
 			if err != nil {
 				lruEntry.Remove()

+ 12 - 9
psiphon/server/webServer.go

@@ -31,6 +31,7 @@ import (
 	"time"
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 )
 
 const WEB_SERVER_IO_TIMEOUT = 10 * time.Second
@@ -234,9 +235,9 @@ func (webServer *webServer) handshakeHandler(w http.ResponseWriter, r *http.Requ
 	if err == nil {
 		responsePayload, err = dispatchAPIRequestHandler(
 			webServer.support,
-			common.PSIPHON_WEB_API_PROTOCOL,
+			protocol.PSIPHON_WEB_API_PROTOCOL,
 			webServer.lookupGeoIPData(params),
-			common.PSIPHON_API_HANDSHAKE_REQUEST_NAME,
+			protocol.PSIPHON_API_HANDSHAKE_REQUEST_NAME,
 			params)
 	}
 
@@ -264,9 +265,9 @@ func (webServer *webServer) connectedHandler(w http.ResponseWriter, r *http.Requ
 	if err == nil {
 		responsePayload, err = dispatchAPIRequestHandler(
 			webServer.support,
-			common.PSIPHON_WEB_API_PROTOCOL,
+			protocol.PSIPHON_WEB_API_PROTOCOL,
 			webServer.lookupGeoIPData(params),
-			common.PSIPHON_API_CONNECTED_REQUEST_NAME,
+			protocol.PSIPHON_API_CONNECTED_REQUEST_NAME,
 			params)
 	}
 
@@ -284,12 +285,13 @@ func (webServer *webServer) statusHandler(w http.ResponseWriter, r *http.Request
 
 	params, err := convertHTTPRequestToAPIRequest(w, r, "statusData")
 
+	var responsePayload []byte
 	if err == nil {
-		_, err = dispatchAPIRequestHandler(
+		responsePayload, err = dispatchAPIRequestHandler(
 			webServer.support,
-			common.PSIPHON_WEB_API_PROTOCOL,
+			protocol.PSIPHON_WEB_API_PROTOCOL,
 			webServer.lookupGeoIPData(params),
-			common.PSIPHON_API_STATUS_REQUEST_NAME,
+			protocol.PSIPHON_API_STATUS_REQUEST_NAME,
 			params)
 	}
 
@@ -300,6 +302,7 @@ func (webServer *webServer) statusHandler(w http.ResponseWriter, r *http.Request
 	}
 
 	w.WriteHeader(http.StatusOK)
+	w.Write(responsePayload)
 }
 
 func (webServer *webServer) clientVerificationHandler(w http.ResponseWriter, r *http.Request) {
@@ -310,9 +313,9 @@ func (webServer *webServer) clientVerificationHandler(w http.ResponseWriter, r *
 	if err == nil {
 		responsePayload, err = dispatchAPIRequestHandler(
 			webServer.support,
-			common.PSIPHON_WEB_API_PROTOCOL,
+			protocol.PSIPHON_WEB_API_PROTOCOL,
 			webServer.lookupGeoIPData(params),
-			common.PSIPHON_API_CLIENT_VERIFICATION_REQUEST_NAME,
+			protocol.PSIPHON_API_CLIENT_VERIFICATION_REQUEST_NAME,
 			params)
 	}
 

+ 120 - 38
psiphon/serverApi.go

@@ -36,6 +36,7 @@ import (
 	"time"
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/transferstats"
 )
 
@@ -72,7 +73,7 @@ var nextTunnelNumber int64
 // Controller (e.g., the user's commanded start and stop) and we measure this
 // duration as well as the duration of each tunnel within the session.
 func MakeSessionId() (sessionId string, err error) {
-	randomId, err := common.MakeSecureRandomBytes(common.PSIPHON_API_CLIENT_SESSION_ID_LENGTH)
+	randomId, err := common.MakeSecureRandomBytes(protocol.PSIPHON_API_CLIENT_SESSION_ID_LENGTH)
 	if err != nil {
 		return "", common.ContextError(err)
 	}
@@ -88,7 +89,7 @@ func NewServerContext(tunnel *Tunnel, sessionId string) (*ServerContext, error)
 	// accessing the Psiphon API via the web service.
 	var psiphonHttpsClient *http.Client
 	if !tunnel.serverEntry.SupportsSSHAPIRequests() ||
-		tunnel.config.TargetApiProtocol == common.PSIPHON_WEB_API_PROTOCOL {
+		tunnel.config.TargetApiProtocol == protocol.PSIPHON_WEB_API_PROTOCOL {
 
 		var err error
 		psiphonHttpsClient, err = makePsiphonHttpsClient(tunnel)
@@ -142,7 +143,7 @@ func (serverContext *ServerContext) doHandshakeRequest() error {
 		}
 
 		response, err = serverContext.tunnel.SendAPIRequest(
-			common.PSIPHON_API_HANDSHAKE_REQUEST_NAME, request)
+			protocol.PSIPHON_API_HANDSHAKE_REQUEST_NAME, request)
 		if err != nil {
 			return common.ContextError(err)
 		}
@@ -173,7 +174,7 @@ func (serverContext *ServerContext) doHandshakeRequest() error {
 	// - 'preemptive_reconnect_lifetime_milliseconds' is unused and ignored
 	// - 'ssh_session_id' is ignored; client session ID is used instead
 
-	var handshakeResponse common.HandshakeResponse
+	var handshakeResponse protocol.HandshakeResponse
 	err := json.Unmarshal(response, &handshakeResponse)
 	if err != nil {
 		return common.ContextError(err)
@@ -182,24 +183,25 @@ func (serverContext *ServerContext) doHandshakeRequest() error {
 	serverContext.clientRegion = handshakeResponse.ClientRegion
 	NoticeClientRegion(serverContext.clientRegion)
 
-	var decodedServerEntries []*ServerEntry
+	var decodedServerEntries []*protocol.ServerEntry
 
 	// Store discovered server entries
 	// We use the server's time, as it's available here, for the server entry
 	// timestamp since this is more reliable than the client time.
 	for _, encodedServerEntry := range handshakeResponse.EncodedServerList {
 
-		serverEntry, err := DecodeServerEntry(
+		serverEntry, err := protocol.DecodeServerEntry(
 			encodedServerEntry,
 			common.TruncateTimestampToHour(handshakeResponse.ServerTimestamp),
-			common.SERVER_ENTRY_SOURCE_DISCOVERY)
+			protocol.SERVER_ENTRY_SOURCE_DISCOVERY)
 		if err != nil {
 			return common.ContextError(err)
 		}
 
-		err = ValidateServerEntry(serverEntry)
+		err = protocol.ValidateServerEntry(serverEntry)
 		if err != nil {
 			// Skip this entry and continue with the next one
+			NoticeAlert("invalid server entry: %s", err)
 			continue
 		}
 
@@ -251,7 +253,6 @@ func (serverContext *ServerContext) DoConnectedRequest() error {
 
 	params := serverContext.getBaseParams()
 
-	const DATA_STORE_LAST_CONNECTED_KEY = "lastConnected"
 	lastConnected, err := GetKeyValue(DATA_STORE_LAST_CONNECTED_KEY)
 	if err != nil {
 		return common.ContextError(err)
@@ -271,7 +272,7 @@ func (serverContext *ServerContext) DoConnectedRequest() error {
 		}
 
 		response, err = serverContext.tunnel.SendAPIRequest(
-			common.PSIPHON_API_CONNECTED_REQUEST_NAME, request)
+			protocol.PSIPHON_API_CONNECTED_REQUEST_NAME, request)
 		if err != nil {
 			return common.ContextError(err)
 		}
@@ -287,7 +288,7 @@ func (serverContext *ServerContext) DoConnectedRequest() error {
 		}
 	}
 
-	var connectedResponse common.ConnectedResponse
+	var connectedResponse protocol.ConnectedResponse
 	err = json.Unmarshal(response, &connectedResponse)
 	if err != nil {
 		return common.ContextError(err)
@@ -298,6 +299,7 @@ func (serverContext *ServerContext) DoConnectedRequest() error {
 	if err != nil {
 		return common.ContextError(err)
 	}
+
 	return nil
 }
 
@@ -330,7 +332,7 @@ func (serverContext *ServerContext) DoStatusRequest(tunnel *Tunnel) error {
 
 		if err == nil {
 			_, err = serverContext.tunnel.SendAPIRequest(
-				common.PSIPHON_API_STATUS_REQUEST_NAME, request)
+				protocol.PSIPHON_API_STATUS_REQUEST_NAME, request)
 		}
 
 	} else {
@@ -368,7 +370,7 @@ func (serverContext *ServerContext) getStatusParams(isTunneled bool) requestJSON
 
 	randomPadding, err := common.MakeSecureRandomPadding(0, PSIPHON_API_STATUS_REQUEST_PADDING_MAX_BYTES)
 	if err != nil {
-		NoticeAlert("MakeSecureRandomPadding failed: %s", err)
+		NoticeAlert("MakeSecureRandomPadding failed: %s", common.ContextError(err))
 		// Proceed without random padding
 		randomPadding = make([]byte, 0)
 	}
@@ -395,25 +397,25 @@ func (serverContext *ServerContext) getStatusParams(isTunneled bool) requestJSON
 // either "clear" or "put back" status request payload data depending
 // on whether or not the request succeeded.
 type statusRequestPayloadInfo struct {
-	serverId      string
-	transferStats *transferstats.AccumulatedStats
-	tunnelStats   [][]byte
+	serverId        string
+	transferStats   *transferstats.AccumulatedStats
+	persistentStats map[string][][]byte
 }
 
 func makeStatusRequestPayload(
 	serverId string) ([]byte, *statusRequestPayloadInfo, error) {
 
 	transferStats := transferstats.TakeOutStatsForServer(serverId)
-	tunnelStats, err := TakeOutUnreportedTunnelStats(
-		PSIPHON_API_TUNNEL_STATS_MAX_COUNT)
+	persistentStats, err := TakeOutUnreportedPersistentStats(
+		PSIPHON_API_PERSISTENT_STATS_MAX_COUNT)
 	if err != nil {
 		NoticeAlert(
-			"TakeOutUnreportedTunnelStats failed: %s", common.ContextError(err))
-		tunnelStats = nil
+			"TakeOutUnreportedPersistentStats failed: %s", common.ContextError(err))
+		persistentStats = nil
 		// Proceed with transferStats only
 	}
 	payloadInfo := &statusRequestPayloadInfo{
-		serverId, transferStats, tunnelStats}
+		serverId, transferStats, persistentStats}
 
 	payload := make(map[string]interface{})
 
@@ -425,12 +427,19 @@ func makeStatusRequestPayload(
 	payload["page_views"] = make([]string, 0)
 	payload["https_requests"] = make([]string, 0)
 
-	// Tunnel stats records are already in JSON format
-	jsonTunnelStats := make([]json.RawMessage, len(tunnelStats))
-	for i, tunnelStatsRecord := range tunnelStats {
-		jsonTunnelStats[i] = json.RawMessage(tunnelStatsRecord)
+	persistentStatPayloadNames := make(map[string]string)
+	persistentStatPayloadNames[PERSISTENT_STAT_TYPE_TUNNEL] = "tunnel_stats"
+	persistentStatPayloadNames[PERSISTENT_STAT_TYPE_REMOTE_SERVER_LIST] = "remote_server_list_stats"
+
+	for statType, stats := range persistentStats {
+
+		// Persistent stats records are already in JSON format
+		jsonStats := make([]json.RawMessage, len(stats))
+		for i, stat := range stats {
+			jsonStats[i] = json.RawMessage(stat)
+		}
+		payload[persistentStatPayloadNames[statType]] = jsonStats
 	}
-	payload["tunnel_stats"] = jsonTunnelStats
 
 	jsonPayload, err := json.Marshal(payload)
 	if err != nil {
@@ -447,21 +456,21 @@ func makeStatusRequestPayload(
 func putBackStatusRequestPayload(payloadInfo *statusRequestPayloadInfo) {
 	transferstats.PutBackStatsForServer(
 		payloadInfo.serverId, payloadInfo.transferStats)
-	err := PutBackUnreportedTunnelStats(payloadInfo.tunnelStats)
+	err := PutBackUnreportedPersistentStats(payloadInfo.persistentStats)
 	if err != nil {
-		// These tunnel stats records won't be resent under after a
+		// These persistent stats records won't be resent until after a
 		// datastore re-initialization.
 		NoticeAlert(
-			"PutBackUnreportedTunnelStats failed: %s", common.ContextError(err))
+			"PutBackUnreportedPersistentStats failed: %s", common.ContextError(err))
 	}
 }
 
 func confirmStatusRequestPayload(payloadInfo *statusRequestPayloadInfo) {
-	err := ClearReportedTunnelStats(payloadInfo.tunnelStats)
+	err := ClearReportedPersistentStats(payloadInfo.persistentStats)
 	if err != nil {
-		// These tunnel stats records may be resent.
+		// These persistent stats records may be resent.
 		NoticeAlert(
-			"ClearReportedTunnelStats failed: %s", common.ContextError(err))
+			"ClearReportedPersistentStats failed: %s", common.ContextError(err))
 	}
 }
 
@@ -550,7 +559,7 @@ func (serverContext *ServerContext) doUntunneledStatusRequest(
 	return nil
 }
 
-// RecordTunnelStats records a tunnel duration and bytes
+// RecordTunnelStat records a tunnel duration and bytes
 // sent and received for subsequent reporting and quality
 // analysis.
 //
@@ -593,7 +602,7 @@ func (serverContext *ServerContext) doUntunneledStatusRequest(
 // Duplicate reporting may also occur when a server receives and
 // processes a status request but the client fails to receive
 // the response.
-func RecordTunnelStats(
+func RecordTunnelStat(
 	sessionId string,
 	tunnelNumber int64,
 	tunnelServerIpAddress string,
@@ -603,7 +612,7 @@ func RecordTunnelStats(
 	totalBytesSent int64,
 	totalBytesReceived int64) error {
 
-	tunnelStats := struct {
+	tunnelStat := struct {
 		SessionId                string `json:"session_id"`
 		TunnelNumber             int64  `json:"tunnel_number"`
 		TunnelServerIpAddress    string `json:"tunnel_server_ip_address"`
@@ -623,12 +632,38 @@ func RecordTunnelStats(
 		totalBytesReceived,
 	}
 
-	tunnelStatsJson, err := json.Marshal(tunnelStats)
+	tunnelStatJson, err := json.Marshal(tunnelStat)
+	if err != nil {
+		return common.ContextError(err)
+	}
+
+	return StorePersistentStat(
+		PERSISTENT_STAT_TYPE_TUNNEL, tunnelStatJson)
+}
+
+// RecordRemoteServerListStat records a completed common or OSL
+// remote server list resource download. These stats use the same
+// persist-until-reported mechanism described in RecordTunnelStats.
+func RecordRemoteServerListStat(
+	url, etag string) error {
+
+	remoteServerListStat := struct {
+		ClientDownloadTimestamp string `json:"client_download_timestamp"`
+		URL                     string `json:"url"`
+		ETag                    string `json:"etag"`
+	}{
+		common.TruncateTimestampToHour(common.GetCurrentTimestamp()),
+		url,
+		etag,
+	}
+
+	remoteServerListStatJson, err := json.Marshal(remoteServerListStat)
 	if err != nil {
 		return common.ContextError(err)
 	}
 
-	return StoreTunnelStats(tunnelStatsJson)
+	return StorePersistentStat(
+		PERSISTENT_STAT_TYPE_REMOTE_SERVER_LIST, remoteServerListStatJson)
 }
 
 // DoClientVerificationRequest performs the "client_verification" API
@@ -660,7 +695,7 @@ func (serverContext *ServerContext) DoClientVerificationRequest(
 		}
 
 		response, err = serverContext.tunnel.SendAPIRequest(
-			common.PSIPHON_API_CLIENT_VERIFICATION_REQUEST_NAME, request)
+			protocol.PSIPHON_API_CLIENT_VERIFICATION_REQUEST_NAME, request)
 		if err != nil {
 			return common.ContextError(err)
 		}
@@ -903,3 +938,50 @@ func makePsiphonHttpsClient(tunnel *Tunnel) (httpsClient *http.Client, err error
 		Timeout:   timeout,
 	}, nil
 }
+
+func HandleServerRequest(
+	tunnelOwner TunnelOwner, tunnel *Tunnel, name string, payload []byte) error {
+
+	switch name {
+	case protocol.PSIPHON_API_OSL_REQUEST_NAME:
+		return HandleOSLRequest(tunnelOwner, tunnel, payload)
+	}
+
+	return common.ContextError(fmt.Errorf("invalid request name: %s", name))
+}
+
+func HandleOSLRequest(
+	tunnelOwner TunnelOwner, tunnel *Tunnel, payload []byte) error {
+
+	var oslRequest protocol.OSLRequest
+	err := json.Unmarshal(payload, &oslRequest)
+	if err != nil {
+		return common.ContextError(err)
+	}
+
+	if oslRequest.ClearLocalSLOKs {
+		DeleteSLOKs()
+	}
+
+	seededNewSLOK := false
+
+	for _, slok := range oslRequest.SeedPayload.SLOKs {
+		duplicate, err := SetSLOK(slok.ID, slok.Key)
+		if err != nil {
+			// TODO: return error to trigger retry?
+			NoticeAlert("SetSLOK failed: %s", common.ContextError(err))
+		} else if !duplicate {
+			seededNewSLOK = true
+		}
+
+		if tunnel.config.EmitSLOKs {
+			NoticeSLOKSeeded(base64.StdEncoding.EncodeToString(slok.ID), duplicate)
+		}
+	}
+
+	if seededNewSLOK {
+		tunnelOwner.SignalSeededNewSLOK()
+	}
+
+	return nil
+}

+ 4 - 112
psiphon/splitTunnel.go

@@ -20,18 +20,14 @@
 package psiphon
 
 import (
-	"bufio"
 	"bytes"
 	"compress/zlib"
 	"encoding/base64"
-	"encoding/binary"
 	"errors"
 	"fmt"
 	"io/ioutil"
 	"net"
 	"net/http"
-	"sort"
-	"strings"
 	"sync"
 	"time"
 
@@ -79,7 +75,7 @@ type SplitTunnelClassifier struct {
 	fetchRoutesWaitGroup     *sync.WaitGroup
 	isRoutesSet              bool
 	cache                    map[string]*classification
-	routes                   networkList
+	routes                   common.SubnetLookup
 }
 
 type classification struct {
@@ -280,7 +276,7 @@ func (classifier *SplitTunnelClassifier) getRoutes(tunnel *Tunnel) (routesData [
 
 	var encodedRoutesData string
 	if !useCachedRoutes {
-		encodedRoutesData, err = ReadAuthenticatedDataPackage(
+		encodedRoutesData, err = common.ReadAuthenticatedDataPackage(
 			routesDataPackage, classifier.routesSignaturePublicKey)
 		if err != nil {
 			NoticeAlert("failed to read split tunnel routes package: %s", common.ContextError(err))
@@ -347,7 +343,7 @@ func (classifier *SplitTunnelClassifier) installRoutes(routesData []byte) (err e
 	classifier.mutex.Lock()
 	defer classifier.mutex.Unlock()
 
-	classifier.routes, err = NewNetworkList(routesData)
+	classifier.routes, err = common.NewSubnetLookupFromRoutes(routesData)
 	if err != nil {
 		return common.ContextError(err)
 	}
@@ -362,111 +358,7 @@ func (classifier *SplitTunnelClassifier) ipAddressInRoutes(ipAddr net.IP) bool {
 	classifier.mutex.RLock()
 	defer classifier.mutex.RUnlock()
 
-	return classifier.routes.ContainsIpAddress(ipAddr)
-}
-
-// networkList is a sorted list of network ranges. It's used to
-// lookup candidate IP addresses for split tunnel classification.
-// networkList implements Sort.Interface.
-type networkList []net.IPNet
-
-// NewNetworkList parses text routes data and produces a networkList
-// for fast ContainsIpAddress lookup.
-// The input format is expected to be text lines where each line
-// is, e.g., "1.2.3.0\t255.255.255.0\n"
-func NewNetworkList(routesData []byte) (networkList, error) {
-
-	// Parse text routes data
-	var list networkList
-	scanner := bufio.NewScanner(bytes.NewReader(routesData))
-	scanner.Split(bufio.ScanLines)
-	for scanner.Scan() {
-		s := strings.Split(scanner.Text(), "\t")
-		if len(s) != 2 {
-			continue
-		}
-
-		ip := parseIPv4(s[0])
-		mask := parseIPv4Mask(s[1])
-		if ip == nil || mask == nil {
-			continue
-		}
-
-		list = append(list, net.IPNet{IP: ip.Mask(mask), Mask: mask})
-	}
-	if len(list) == 0 {
-		return nil, common.ContextError(errors.New("Routes data contains no networks"))
-	}
-
-	// Sort data for fast lookup
-	sort.Sort(list)
-
-	return list, nil
-}
-
-func parseIPv4(s string) net.IP {
-	ip := net.ParseIP(s)
-	if ip == nil {
-		return nil
-	}
-	return ip.To4()
-}
-
-func parseIPv4Mask(s string) net.IPMask {
-	ip := parseIPv4(s)
-	if ip == nil {
-		return nil
-	}
-	mask := net.IPMask(ip)
-	if bits, size := mask.Size(); bits == 0 || size == 0 {
-		return nil
-	}
-	return mask
-}
-
-// Len implementes Sort.Interface
-func (list networkList) Len() int {
-	return len(list)
-}
-
-// Swap implementes Sort.Interface
-func (list networkList) Swap(i, j int) {
-	list[i], list[j] = list[j], list[i]
-}
-
-// Less implementes Sort.Interface
-func (list networkList) Less(i, j int) bool {
-	return binary.BigEndian.Uint32(list[i].IP) < binary.BigEndian.Uint32(list[j].IP)
-}
-
-// ContainsIpAddress performs a binary search on the networkList to
-// find a network containing the candidate IP address.
-func (list networkList) ContainsIpAddress(addr net.IP) bool {
-
-	// Search criteria
-	//
-	// The following conditions are satisfied when address_IP is in the network:
-	// 1. address_IP ^ network_mask == network_IP ^ network_mask
-	// 2. address_IP >= network_IP.
-	// We are also assuming that network ranges do not overlap.
-	//
-	// For an ascending array of networks, the sort.Search returns the smallest
-	// index idx for which condition network_IP > address_IP is satisfied, so we
-	// are checking whether or not adrress_IP belongs to the network[idx-1].
-
-	// Edge conditions check
-	//
-	// idx == 0 means that address_IP is  lesser than the first (smallest) network_IP
-	// thus never satisfies search condition 2.
-	// idx == array_length means that address_IP is larger than the last (largest)
-	// network_IP so we need to check the last element for condition 1.
-
-	addrValue := binary.BigEndian.Uint32(addr.To4())
-	index := sort.Search(len(list), func(i int) bool {
-		networkValue := binary.BigEndian.Uint32(list[i].IP)
-		return networkValue > addrValue
-	})
-	return index > 0 && list[index-1].IP.Equal(addr.Mask(list[index-1].Mask))
+	return classifier.routes.ContainsIPAddress(ipAddr)
 }
 
 // tunneledLookupIP resolves a split tunnel candidate hostname with a tunneled

+ 0 - 38
psiphon/splitTunnel_test.go

@@ -1,38 +0,0 @@
-package psiphon
-
-import (
-	"encoding/binary"
-	"io/ioutil"
-	"math/rand"
-	"net"
-	"testing"
-)
-
-var netList networkList
-var isLocalAddr bool
-
-func Benchmark_NewNetworkList(b *testing.B) {
-
-	routesData, err := ioutil.ReadFile("test_routes.dat")
-	if err != nil {
-		b.Skipf("can't load test routes file: %s", err)
-	}
-
-	for n := 0; n < b.N; n++ {
-		netList, _ = NewNetworkList(routesData)
-	}
-}
-
-func Benchmark_containsRandomAddr(b *testing.B) {
-
-	if netList == nil {
-		b.Skipf("no test routes file")
-	}
-
-	rand.Seed(0)
-	for n := 0; n < b.N; n++ {
-		ip := make([]byte, 4)
-		binary.BigEndian.PutUint32(ip, rand.Uint32())
-		isLocalAddr = netList.ContainsIpAddress(net.IP(ip))
-	}
-}

+ 88 - 66
psiphon/tunnel.go

@@ -36,6 +36,7 @@ import (
 	"github.com/Psiphon-Inc/goarista/monotime"
 	regen "github.com/Psiphon-Inc/goregen"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/transferstats"
 )
 
@@ -59,6 +60,7 @@ type Tunneler interface {
 // owner when it has failed. The owner may, as in the case of the Controller,
 // remove the tunnel from its list of active tunnels.
 type TunnelOwner interface {
+	SignalSeededNewSLOK()
 	SignalTunnelFailure(tunnel *Tunnel)
 }
 
@@ -71,11 +73,12 @@ type Tunnel struct {
 	untunneledDialConfig         *DialConfig
 	isDiscarded                  bool
 	isClosed                     bool
-	serverEntry                  *ServerEntry
+	serverEntry                  *protocol.ServerEntry
 	serverContext                *ServerContext
 	protocol                     string
 	conn                         *common.ActivityMonitoredConn
 	sshClient                    *ssh.Client
+	sshServerRequests            <-chan *ssh.Request
 	operateWaitGroup             *sync.WaitGroup
 	shutdownOperateBroadcast     chan struct{}
 	signalPortForwardFailure     chan struct{}
@@ -117,7 +120,7 @@ func EstablishTunnel(
 	untunneledDialConfig *DialConfig,
 	sessionId string,
 	pendingConns *common.Conns,
-	serverEntry *ServerEntry,
+	serverEntry *protocol.ServerEntry,
 	adjustedEstablishStartTime monotime.Time,
 	tunnelOwner TunnelOwner) (tunnel *Tunnel, err error) {
 
@@ -128,7 +131,7 @@ func EstablishTunnel(
 
 	// Build transport layers and establish SSH connection. Note that
 	// dialConn and monitoredConn are the same network connection.
-	dialConn, monitoredConn, sshClient, dialStats, err := dialSsh(
+	dialResult, err := dialSsh(
 		config, pendingConns, serverEntry, selectedProtocol, sessionId)
 	if err != nil {
 		return nil, common.ContextError(err)
@@ -137,9 +140,9 @@ func EstablishTunnel(
 	// Cleanup on error
 	defer func() {
 		if err != nil {
-			sshClient.Close()
-			monitoredConn.Close()
-			pendingConns.Remove(dialConn)
+			dialResult.sshClient.Close()
+			dialResult.monitoredConn.Close()
+			pendingConns.Remove(dialResult.dialConn)
 		}
 	}()
 
@@ -151,14 +154,15 @@ func EstablishTunnel(
 		isClosed:                 false,
 		serverEntry:              serverEntry,
 		protocol:                 selectedProtocol,
-		conn:                     monitoredConn,
-		sshClient:                sshClient,
+		conn:                     dialResult.monitoredConn,
+		sshClient:                dialResult.sshClient,
+		sshServerRequests:        dialResult.sshRequests,
 		operateWaitGroup:         new(sync.WaitGroup),
 		shutdownOperateBroadcast: make(chan struct{}),
 		// A buffer allows at least one signal to be sent even when the receiver is
 		// not listening. Senders should not block.
 		signalPortForwardFailure: make(chan struct{}, 1),
-		dialStats:                dialStats,
+		dialStats:                dialResult.dialStats,
 		// Buffer allows SetClientVerificationPayload to submit one new payload
 		// without blocking or dropping it.
 		newClientVerificationPayload: make(chan string, 1),
@@ -190,7 +194,7 @@ func EstablishTunnel(
 	tunnel.establishedTime = monotime.Now()
 
 	// Now that network operations are complete, cancel interruptibility
-	pendingConns.Remove(dialConn)
+	pendingConns.Remove(dialResult.dialConn)
 
 	// Spawn the operateTunnel goroutine, which monitors the tunnel and handles periodic stats updates.
 	tunnel.operateWaitGroup.Add(1)
@@ -201,7 +205,7 @@ func EstablishTunnel(
 
 // Close stops operating the tunnel and closes the underlying connection.
 // Supports multiple and/or concurrent calls to Close().
-// When isDicarded is set, operateTunnel will not attempt to send final
+// When isDiscarded is set, operateTunnel will not attempt to send final
 // status requests.
 func (tunnel *Tunnel) Close(isDiscarded bool) {
 
@@ -230,13 +234,6 @@ func (tunnel *Tunnel) Close(isDiscarded bool) {
 	}
 }
 
-// IsClosed returns the tunnel's closed status.
-func (tunnel *Tunnel) IsClosed() bool {
-	tunnel.mutex.Lock()
-	defer tunnel.mutex.Unlock()
-	return tunnel.isClosed
-}
-
 // IsDiscarded returns the tunnel's discarded flag.
 func (tunnel *Tunnel) IsDiscarded() bool {
 	tunnel.mutex.Lock()
@@ -251,10 +248,6 @@ func (tunnel *Tunnel) IsDiscarded() bool {
 func (tunnel *Tunnel) SendAPIRequest(
 	name string, requestPayload []byte) ([]byte, error) {
 
-	if tunnel.IsClosed() {
-		return nil, common.ContextError(errors.New("tunnel is closed"))
-	}
-
 	ok, responsePayload, err := tunnel.sshClient.Conn.SendRequest(
 		name, true, requestPayload)
 
@@ -274,10 +267,6 @@ func (tunnel *Tunnel) SendAPIRequest(
 func (tunnel *Tunnel) Dial(
 	remoteAddr string, alwaysTunnel bool, downstreamConn net.Conn) (conn net.Conn, err error) {
 
-	if tunnel.IsClosed() {
-		return nil, common.ContextError(errors.New("tunnel is closed"))
-	}
-
 	type tunnelDialResult struct {
 		sshPortForwardConn net.Conn
 		err                error
@@ -383,7 +372,9 @@ func (conn *TunneledConn) Close() error {
 }
 
 // selectProtocol is a helper that picks the tunnel protocol
-func selectProtocol(config *Config, serverEntry *ServerEntry) (selectedProtocol string, err error) {
+func selectProtocol(
+	config *Config, serverEntry *protocol.ServerEntry) (selectedProtocol string, err error) {
+
 	// TODO: properly handle protocols (e.g. FRONTED-MEEK-OSSH) vs. capabilities (e.g., {FRONTED-MEEK, OSSH})
 	// for now, the code is simply assuming that MEEK capabilities imply OSSH capability.
 	if config.TunnelProtocol != "" {
@@ -415,7 +406,7 @@ func selectProtocol(config *Config, serverEntry *ServerEntry) (selectedProtocol
 // selectFrontingParameters is a helper which selects/generates meek fronting
 // parameters where the server entry provides multiple options or patterns.
 func selectFrontingParameters(
-	serverEntry *ServerEntry) (frontingAddress, frontingHost string, err error) {
+	serverEntry *protocol.ServerEntry) (frontingAddress, frontingHost string, err error) {
 
 	if len(serverEntry.MeekFrontingAddressesRegex) > 0 {
 
@@ -458,7 +449,7 @@ func selectFrontingParameters(
 // selected meek tunnel protocol.
 func initMeekConfig(
 	config *Config,
-	serverEntry *ServerEntry,
+	serverEntry *protocol.ServerEntry,
 	selectedProtocol,
 	sessionId string) (*MeekConfig, error) {
 
@@ -471,7 +462,7 @@ func initMeekConfig(
 	transformedHostName := false
 
 	switch selectedProtocol {
-	case common.TUNNEL_PROTOCOL_FRONTED_MEEK:
+	case protocol.TUNNEL_PROTOCOL_FRONTED_MEEK:
 		frontingAddress, frontingHost, err := selectFrontingParameters(serverEntry)
 		if err != nil {
 			return nil, common.ContextError(err)
@@ -484,7 +475,7 @@ func initMeekConfig(
 		}
 		hostHeader = frontingHost
 
-	case common.TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP:
+	case protocol.TUNNEL_PROTOCOL_FRONTED_MEEK_HTTP:
 		frontingAddress, frontingHost, err := selectFrontingParameters(serverEntry)
 		if err != nil {
 			return nil, common.ContextError(err)
@@ -492,7 +483,7 @@ func initMeekConfig(
 		dialAddress = fmt.Sprintf("%s:80", frontingAddress)
 		hostHeader = frontingHost
 
-	case common.TUNNEL_PROTOCOL_UNFRONTED_MEEK:
+	case protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK:
 		dialAddress = fmt.Sprintf("%s:%d", serverEntry.IpAddress, serverEntry.MeekServerPort)
 		hostname := serverEntry.IpAddress
 		hostname, transformedHostName = config.HostNameTransformer.TransformHostName(hostname)
@@ -502,7 +493,7 @@ func initMeekConfig(
 			hostHeader = fmt.Sprintf("%s:%d", hostname, serverEntry.MeekServerPort)
 		}
 
-	case common.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS:
+	case protocol.TUNNEL_PROTOCOL_UNFRONTED_MEEK_HTTPS:
 		dialAddress = fmt.Sprintf("%s:%d", serverEntry.IpAddress, serverEntry.MeekServerPort)
 		useHTTPS = true
 		SNIServerName, transformedHostName =
@@ -536,6 +527,14 @@ func initMeekConfig(
 	}, nil
 }
 
+type dialResult struct {
+	dialConn      net.Conn
+	monitoredConn *common.ActivityMonitoredConn
+	sshClient     *ssh.Client
+	sshRequests   <-chan *ssh.Request
+	dialStats     *TunnelDialStats
+}
+
 // dialSsh is a helper that builds the transport layers and establishes the SSH connection.
 // When additional dial configuration is used, DialStats are recorded and returned.
 //
@@ -546,9 +545,9 @@ func initMeekConfig(
 func dialSsh(
 	config *Config,
 	pendingConns *common.Conns,
-	serverEntry *ServerEntry,
+	serverEntry *protocol.ServerEntry,
 	selectedProtocol,
-	sessionId string) (net.Conn, *common.ActivityMonitoredConn, *ssh.Client, *TunnelDialStats, error) {
+	sessionId string) (*dialResult, error) {
 
 	// The meek protocols tunnel obfuscated SSH. Obfuscated SSH is layered on top of SSH.
 	// So depending on which protocol is used, multiple layers are initialized.
@@ -559,18 +558,18 @@ func dialSsh(
 	var err error
 
 	switch selectedProtocol {
-	case common.TUNNEL_PROTOCOL_OBFUSCATED_SSH:
+	case protocol.TUNNEL_PROTOCOL_OBFUSCATED_SSH:
 		useObfuscatedSsh = true
 		directTCPDialAddress = fmt.Sprintf("%s:%d", serverEntry.IpAddress, serverEntry.SshObfuscatedPort)
 
-	case common.TUNNEL_PROTOCOL_SSH:
+	case protocol.TUNNEL_PROTOCOL_SSH:
 		directTCPDialAddress = fmt.Sprintf("%s:%d", serverEntry.IpAddress, serverEntry.SshPort)
 
 	default:
 		useObfuscatedSsh = true
 		meekConfig, err = initMeekConfig(config, serverEntry, selectedProtocol, sessionId)
 		if err != nil {
-			return nil, nil, nil, nil, common.ContextError(err)
+			return nil, common.ContextError(err)
 		}
 	}
 
@@ -610,12 +609,12 @@ func dialSsh(
 	if meekConfig != nil {
 		dialConn, err = DialMeek(meekConfig, dialConfig)
 		if err != nil {
-			return nil, nil, nil, nil, common.ContextError(err)
+			return nil, common.ContextError(err)
 		}
 	} else {
 		dialConn, err = DialTCP(directTCPDialAddress, dialConfig)
 		if err != nil {
-			return nil, nil, nil, nil, common.ContextError(err)
+			return nil, common.ContextError(err)
 		}
 	}
 
@@ -629,9 +628,9 @@ func dialSsh(
 	}()
 
 	// Activity monitoring is used to measure tunnel duration
-	monitoredConn, err := common.NewActivityMonitoredConn(dialConn, 0, false, nil)
+	monitoredConn, err := common.NewActivityMonitoredConn(dialConn, 0, false, nil, nil)
 	if err != nil {
-		return nil, nil, nil, nil, common.ContextError(err)
+		return nil, common.ContextError(err)
 	}
 
 	// Apply throttling (if configured)
@@ -640,17 +639,17 @@ func dialSsh(
 	// Add obfuscated SSH layer
 	var sshConn net.Conn = throttledConn
 	if useObfuscatedSsh {
-		sshConn, err = NewObfuscatedSshConn(
-			OBFUSCATION_CONN_MODE_CLIENT, throttledConn, serverEntry.SshObfuscatedKey)
+		sshConn, err = common.NewObfuscatedSshConn(
+			common.OBFUSCATION_CONN_MODE_CLIENT, throttledConn, serverEntry.SshObfuscatedKey)
 		if err != nil {
-			return nil, nil, nil, nil, common.ContextError(err)
+			return nil, common.ContextError(err)
 		}
 	}
 
 	// Now establish the SSH session over the conn transport
 	expectedPublicKey, err := base64.StdEncoding.DecodeString(serverEntry.SshHostKey)
 	if err != nil {
-		return nil, nil, nil, nil, common.ContextError(err)
+		return nil, common.ContextError(err)
 	}
 	sshCertChecker := &ssh.CertChecker{
 		HostKeyFallback: func(addr string, remote net.Addr, publicKey ssh.PublicKey) error {
@@ -660,18 +659,21 @@ func dialSsh(
 			return nil
 		},
 	}
-	sshPasswordPayload, err := json.Marshal(
-		struct {
-			SessionId   string `json:"SessionId"`
-			SshPassword string `json:"SshPassword"`
-		}{sessionId, serverEntry.SshPassword})
+
+	sshPasswordPayload := &protocol.SSHPasswordPayload{
+		SessionId:          sessionId,
+		SshPassword:        serverEntry.SshPassword,
+		ClientCapabilities: []string{protocol.CLIENT_CAPABILITY_SERVER_REQUESTS},
+	}
+
+	payload, err := json.Marshal(sshPasswordPayload)
 	if err != nil {
-		return nil, nil, nil, nil, common.ContextError(err)
+		return nil, common.ContextError(err)
 	}
 	sshClientConfig := &ssh.ClientConfig{
 		User: serverEntry.SshUsername,
 		Auth: []ssh.AuthMethod{
-			ssh.Password(string(sshPasswordPayload)),
+			ssh.Password(string(payload)),
 		},
 		HostKeyCallback: sshCertChecker.CheckHostKey,
 	}
@@ -688,13 +690,14 @@ func dialSsh(
 	// TODO: adjust the timeout to account for time-elapsed-from-start
 
 	type sshNewClientResult struct {
-		sshClient *ssh.Client
-		err       error
+		sshClient   *ssh.Client
+		sshRequests <-chan *ssh.Request
+		err         error
 	}
 	resultChannel := make(chan *sshNewClientResult, 2)
 	if *config.TunnelConnectTimeoutSeconds > 0 {
 		time.AfterFunc(time.Duration(*config.TunnelConnectTimeoutSeconds)*time.Second, func() {
-			resultChannel <- &sshNewClientResult{nil, errors.New("ssh dial timeout")}
+			resultChannel <- &sshNewClientResult{nil, nil, errors.New("ssh dial timeout")}
 		})
 	}
 
@@ -702,17 +705,18 @@ func dialSsh(
 		// The following is adapted from ssh.Dial(), here using a custom conn
 		// The sshAddress is passed through to host key verification callbacks; we don't use it.
 		sshAddress := ""
-		sshClientConn, sshChans, sshReqs, err := ssh.NewClientConn(sshConn, sshAddress, sshClientConfig)
+		sshClientConn, sshChannels, sshRequests, err := ssh.NewClientConn(
+			sshConn, sshAddress, sshClientConfig)
 		var sshClient *ssh.Client
 		if err == nil {
-			sshClient = ssh.NewClient(sshClientConn, sshChans, sshReqs)
+			sshClient = ssh.NewClient(sshClientConn, sshChannels, nil)
 		}
-		resultChannel <- &sshNewClientResult{sshClient, err}
+		resultChannel <- &sshNewClientResult{sshClient, sshRequests, err}
 	}()
 
 	result := <-resultChannel
 	if result.err != nil {
-		return nil, nil, nil, nil, common.ContextError(result.err)
+		return nil, common.ContextError(result.err)
 	}
 
 	var dialStats *TunnelDialStats
@@ -751,7 +755,13 @@ func dialSsh(
 	// but should not be used to perform I/O as that would interfere with SSH
 	// (and also bypasses throttling).
 
-	return dialConn, monitoredConn, result.sshClient, dialStats, nil
+	return &dialResult{
+			dialConn:      dialConn,
+			monitoredConn: monitoredConn,
+			sshClient:     result.sshClient,
+			sshRequests:   result.sshRequests,
+			dialStats:     dialStats},
+		nil
 }
 
 func makeRandomPeriod(min, max time.Duration) time.Duration {
@@ -835,16 +845,16 @@ func (tunnel *Tunnel) operateTunnel(tunnelOwner TunnelOwner) {
 	defer statsTimer.Stop()
 
 	// Schedule an immediate status request to deliver any unreported
-	// tunnel stats.
+	// persistent stats.
 	// Note: this may not be effective when there's an outstanding
 	// asynchronous untunneled final status request is holding the
-	// tunnel stats records. It may also conflict with other
+	// persistent stats records. It may also conflict with other
 	// tunnel candidates which attempt to send an immediate request
 	// before being discarded. For now, we mitigate this with a short,
 	// random delay.
-	unreported := CountUnreportedTunnelStats()
+	unreported := CountUnreportedPersistentStats()
 	if unreported > 0 {
-		NoticeInfo("Unreported tunnel stats: %d", unreported)
+		NoticeInfo("Unreported persistent stats: %d", unreported)
 		statsTimer.Reset(makeRandomPeriod(
 			PSIPHON_API_STATUS_REQUEST_SHORT_PERIOD_MIN,
 			PSIPHON_API_STATUS_REQUEST_SHORT_PERIOD_MAX))
@@ -992,6 +1002,18 @@ func (tunnel *Tunnel) operateTunnel(tunnelOwner TunnelOwner) {
 
 		case err = <-sshKeepAliveError:
 
+		case serverRequest := <-tunnel.sshServerRequests:
+			if serverRequest != nil {
+				err := HandleServerRequest(tunnelOwner, tunnel, serverRequest.Type, serverRequest.Payload)
+				if err == nil {
+					serverRequest.Reply(true, nil)
+				} else {
+					NoticeAlert("HandleServerRequest for %s failed: %s", serverRequest.Type, err)
+					serverRequest.Reply(false, nil)
+
+				}
+			}
+
 		case <-tunnel.shutdownOperateBroadcast:
 			shutdown = true
 		}
@@ -1045,7 +1067,7 @@ func (tunnel *Tunnel) operateTunnel(tunnelOwner TunnelOwner) {
 
 		tunnelDuration := tunnel.conn.GetLastActivityMonotime().Sub(tunnel.establishedTime)
 
-		err := RecordTunnelStats(
+		err := RecordTunnelStat(
 			tunnel.serverContext.sessionId,
 			tunnel.serverContext.tunnelNumber,
 			tunnel.serverEntry.IpAddress,