Просмотр исходного кода

First phase of obfuscated server list implementation
- New package, "osl", with core routines to be
used by client, server, and automation.
- Test coverage for "osl" package.
- Refactor subnet lookup and authenticated data
package functionality out of "psiphon" and into
"common" where it's now also used by "osl".
- Explicit test coverage for subnet lookup and
authenticated data package.

Rod Hynes 9 лет назад
Родитель
Сommit
a8031db643

+ 2 - 0
.travis.yml

@@ -10,10 +10,12 @@ install:
 script:
 - cd psiphon
 - go test -race -v ./common
+- go test -race -v ./common/osl
 - go test -race -v ./transferstats
 - go test -race -v ./server
 - go test -race -v
 - go test -v -covermode=count -coverprofile=common.coverprofile ./common
+- go test -v -covermode=count -coverprofile=osl.coverprofile ./common/osl
 - go test -v -covermode=count -coverprofile=transferstats.coverprofile ./transferstats
 - go test -v -covermode=count -coverprofile=server.coverprofile ./server
 - go test -v -covermode=count -coverprofile=psiphon.coverprofile

+ 151 - 0
psiphon/common/authPackage.go

@@ -0,0 +1,151 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package common
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/sha256"
+	"crypto/x509"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+)
+
+// AuthenticatedDataPackage is a JSON record containing some Psiphon data
+// payload, such as list of Psiphon server entries. As it may be downloaded
+// from various sources, it is digitally signed so that the data may be
+// authenticated.
+type AuthenticatedDataPackage struct {
+	Data                   string `json:"data"`
+	SigningPublicKeyDigest []byte `json:"signingPublicKeyDigest"`
+	Signature              []byte `json:"signature"`
+}
+
+// GenerateAuthenticatedDataPackageKeys generates a key pair
+// be used to sign and verify AuthenticatedDataPackages.
+func GenerateAuthenticatedDataPackageKeys() (string, string, error) {
+
+	rsaKey, err := rsa.GenerateKey(rand.Reader, 4096)
+	if err != nil {
+		return "", "", ContextError(err)
+	}
+
+	publicKeyBytes, err := x509.MarshalPKIXPublicKey(rsaKey.Public())
+	if err != nil {
+		return "", "", ContextError(err)
+	}
+
+	privateKeyBytes := x509.MarshalPKCS1PrivateKey(rsaKey)
+
+	return base64.StdEncoding.EncodeToString(publicKeyBytes),
+		base64.StdEncoding.EncodeToString(privateKeyBytes),
+		nil
+}
+
+func sha256sum(data string) []byte {
+	hash := sha256.New()
+	hash.Write([]byte(data))
+	return hash.Sum(nil)
+}
+
+// WriteAuthenticatedDataPackage creates an AuthenticatedDataPackage
+// containing the specified data and signed by the given key. The output
+// conforms with the legacy format here:
+// https://bitbucket.org/psiphon/psiphon-circumvention-system/src/c25d080f6827b141fe637050ce0d5bd0ae2e9db5/Automation/psi_ops_crypto_tools.py
+func WriteAuthenticatedDataPackage(
+	data string, signingPublicKey, signingPrivateKey string) ([]byte, error) {
+
+	derEncodedPrivateKey, err := base64.StdEncoding.DecodeString(signingPrivateKey)
+	if err != nil {
+		return nil, ContextError(err)
+	}
+	rsaPrivateKey, err := x509.ParsePKCS1PrivateKey(derEncodedPrivateKey)
+	if err != nil {
+		return nil, ContextError(err)
+	}
+
+	signature, err := rsa.SignPKCS1v15(
+		rand.Reader,
+		rsaPrivateKey,
+		crypto.SHA256,
+		sha256sum(data))
+	if err != nil {
+		return nil, ContextError(err)
+	}
+
+	packageJSON, err := json.Marshal(
+		&AuthenticatedDataPackage{
+			Data: data,
+			SigningPublicKeyDigest: sha256sum(signingPublicKey),
+			Signature:              signature,
+		})
+	if err != nil {
+		return nil, ContextError(err)
+	}
+
+	return packageJSON, nil
+}
+
+// ReadAuthenticatedDataPackage extracts and verifies authenticated
+// data from an AuthenticatedDataPackage. The package must have been
+// signed with the given key.
+func ReadAuthenticatedDataPackage(
+	packageJSON []byte, signingPublicKey string) (string, error) {
+
+	var authenticatedDataPackage *AuthenticatedDataPackage
+	err := json.Unmarshal(packageJSON, &authenticatedDataPackage)
+	if err != nil {
+		return "", ContextError(err)
+	}
+
+	derEncodedPublicKey, err := base64.StdEncoding.DecodeString(signingPublicKey)
+	if err != nil {
+		return "", ContextError(err)
+	}
+	publicKey, err := x509.ParsePKIXPublicKey(derEncodedPublicKey)
+	if err != nil {
+		return "", ContextError(err)
+	}
+	rsaPublicKey, ok := publicKey.(*rsa.PublicKey)
+	if !ok {
+		return "", ContextError(errors.New("unexpected signing public key type"))
+	}
+
+	if 0 != bytes.Compare(
+		authenticatedDataPackage.SigningPublicKeyDigest,
+		sha256sum(signingPublicKey)) {
+
+		return "", ContextError(errors.New("unexpected signing public key digest"))
+	}
+
+	err = rsa.VerifyPKCS1v15(
+		rsaPublicKey,
+		crypto.SHA256,
+		sha256sum(authenticatedDataPackage.Data),
+		authenticatedDataPackage.Signature)
+	if err != nil {
+		return "", ContextError(err)
+	}
+
+	return authenticatedDataPackage.Data, nil
+}

+ 98 - 0
psiphon/common/authPackage_test.go

@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package common
+
+import (
+	"encoding/json"
+	"testing"
+)
+
+func TestAuthenticatedPackage(t *testing.T) {
+
+	var signingPublicKey, signingPrivateKey string
+
+	t.Run("generate package keys", func(t *testing.T) {
+		var err error
+		signingPublicKey, signingPrivateKey, err = GenerateAuthenticatedDataPackageKeys()
+		if err != nil {
+			t.Fatalf("GenerateAuthenticatedDataPackageKeys failed: %s", err)
+		}
+	})
+
+	expectedContent := "TestAuthenticatedPackage"
+	var packagePayload []byte
+
+	t.Run("write package", func(t *testing.T) {
+		var err error
+		packagePayload, err = WriteAuthenticatedDataPackage(
+			expectedContent,
+			signingPublicKey,
+			signingPrivateKey)
+		if err != nil {
+			t.Fatalf("WriteAuthenticatedDataPackage failed: %s", err)
+		}
+	})
+
+	t.Run("read package: success", func(t *testing.T) {
+		content, err := ReadAuthenticatedDataPackage(
+			packagePayload, signingPublicKey)
+		if err != nil {
+			t.Fatalf("ReadAuthenticatedDataPackage failed: %s", err)
+		}
+		if content != expectedContent {
+			t.Fatalf(
+				"unexpected package content: expected %s got %s",
+				expectedContent, content)
+		}
+	})
+
+	t.Run("read package: wrong signing key", func(t *testing.T) {
+		wrongSigningPublicKey, _, err := GenerateAuthenticatedDataPackageKeys()
+		if err != nil {
+			t.Fatalf("GenerateAuthenticatedDataPackageKeys failed: %s", err)
+		}
+		_, err = ReadAuthenticatedDataPackage(
+			packagePayload, wrongSigningPublicKey)
+		if err == nil {
+			t.Fatalf("ReadAuthenticatedDataPackage unexpectedly succeeded")
+		}
+	})
+
+	t.Run("read package: tampered data", func(t *testing.T) {
+
+		var authDataPackage AuthenticatedDataPackage
+		err := json.Unmarshal(packagePayload, &authDataPackage)
+		if err != nil {
+			t.Fatalf("Unmarshal failed: %s", err)
+		}
+		authDataPackage.Data = "TamperedData"
+
+		tamperedPackagePayload, err := json.Marshal(&authDataPackage)
+		if err != nil {
+			t.Fatalf("Marshal failed: %s", err)
+		}
+
+		_, err = ReadAuthenticatedDataPackage(
+			tamperedPackagePayload, signingPublicKey)
+		if err == nil {
+			t.Fatalf("ReadAuthenticatedDataPackage unexpectedly succeeded")
+		}
+	})
+}

+ 1085 - 0
psiphon/common/osl/osl.go

@@ -0,0 +1,1085 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+// Package osl implements the Obfuscated Server List (OSL) mechanism. This
+// mechanism is a method of distributing server lists only to clients that
+// demonstrate certain behavioral traits. Clients are seeded with Server
+// List Obfuscation Keys (SLOKs) as they meet the configured criteria. These
+// keys are stored and later comboined to assemble keys to decrypt out-of-band
+// distributed OSL files that contain server lists.
+//
+// This package contains the core routines used in psiphond (to track client
+// traits and issue SLOKs), clients (to manage SLOKs and decrypt OSLs), and
+// automation (to create OSLs for distribution).
+package osl
+
+import (
+	"crypto/hmac"
+	"crypto/sha256"
+	"encoding/base64"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"path"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/Psiphon-Inc/crypto/nacl/secretbox"
+	"github.com/Psiphon-Inc/sss"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+)
+
+const (
+	KEY_LENGTH_BYTES    = 32
+	DIRECTORY_FILENAME  = "osl-dir"
+	OSL_FILENAME_FORMAT = "osl-%s"
+)
+
+// Config is an OSL configuration, which consists of a list of schemes.
+type Config struct {
+	Schemes []*Scheme
+}
+
+// Scheme defines a OSL seeding and distribution strategy. SLOKs to
+// decrypt OSLs are issued based on client network activity -- defined
+// in the SeedSpecs -- and time. OSLs are created for periods of time
+// and can be decrypted by clients that are seeded with a sufficient
+// selection of SLOKs for that time period. Distribution of server
+// entries to OSLs is delegated to automation.
+type Scheme struct {
+
+	// Epoch is the start time of the scheme, the start time of the
+	// first OSL and when SLOKs will first be issued. It must be
+	// specified in UTC and must be a multiple of SeedPeriodNanoseconds.
+	Epoch string
+
+	// Regions is a list of client country codes this scheme applies to.
+	Regions []string
+
+	// PropagationChannelIDs is a list of client propagtion channel IDs
+	// this scheme applies to. Propagation channel IDs are an input
+	// to SLOK key derivation.
+	PropagationChannelIDs []string
+
+	// MasterKey is the base random key used for SLOK key derivation. It
+	// must be unique for each scheme. It must be 32 random bytes, base64
+	// encoded.
+	MasterKey []byte
+
+	// SeedSpecs is the set of different client network activity patterns
+	// that will result in issuing SLOKs. For a given time period, a distinct
+	// SLOK is issued for each SeedLevel in each SeedSpec.
+	// Duplicate subnets may appear in multiple SeedSpecs.
+	SeedSpecs []*SeedSpec
+
+	// SeedSpecThreshold is the threshold scheme for combining SLOKs to
+	// decrypt an OSL. For any fixed time period, at least K (threshold) of
+	// N (total) SLOKs from the N SeedSpecs must be seeded for a client to be
+	// able to reassemble the OSL key.
+	// Limitation: thresholds must be at least 2.
+	SeedSpecThreshold int
+
+	// SeedPeriodNanoseconds is the time period granularity of SLOKs.
+	// New SLOKs are issued every SeedPeriodNanoseconds. Client progress
+	// towards activity levels is reset at the end of each period.
+	SeedPeriodNanoseconds int64
+
+	// KeySplits is the time period threshold scheme layered on top of the
+	// SeedSpecThreshold scheme for combining SLOKs to decrypt an OSL.
+	// There must be at least one level. For one level, any K (threshold) of
+	// N (total) SeedSpec SLOK groups must be sufficiently seeded for a client
+	// to be able to reassemble the OSL key. When an additional level is
+	// specified, then K' of N' groups of N of K SeedSpec SLOK groups must be
+	// sufficiently seeded. And so on. The first level in the list is the
+	// lowest level. The time period for OSLs is determined by the totals in
+	// the KeySplits.
+	// Limitation: thresholds must be at least 2.
+	//
+	// Example:
+	//
+	//   SeedSpecs = <3 specs>
+	//   SeedSpecThreshold = 2
+	//   SeedPeriodNanoseconds = 100,000,000 = 100 milliseconds
+	//   SeedPeriodKeySplits = [{10, 7}, {60, 5}]
+	//
+	//   In these scheme, up to 3 distinct SLOKs, one per spec, are issued
+	//   every 100 milliseconds.
+	//
+	//   Distinct OSLs are paved for every minute (60 seconds). Each OSL
+	//   key is split such that, for those 60 seconds, a client must seed
+	//   2/3 spec SLOKs for 7 of 10 consecutive 100 ms. time periods within
+	//   a second, for any 5 of 60 seconds within the minute.
+	//
+	SeedPeriodKeySplits []KeySplit
+
+	// The following fields are ephemeral state.
+
+	epoch                 time.Time
+	subnetLookups         map[*SeedSpec]common.SubnetLookup
+	subnetLookup          common.SubnetLookup
+	derivedSLOKCacheMutex sync.RWMutex
+	derivedSLOKCache      map[slokReference]*SLOK
+}
+
+// SeedSpec defines a client traffic pattern that results in a seeded SLOK.
+// For each time period, a unique SLOK is issued to a client that meets the
+// traffic levels specified in Targets. All upstream port forward traffic to
+// UpstreamSubnets is counted towards the targets.
+//
+// ID is a SLOK key derivation component and must be 32 random bytes, base64
+// encoded. UpstreamSubnets is a list of CIDRs. Description is not used; it's
+// for JSON config file comments.
+type SeedSpec struct {
+	Description     string
+	ID              []byte
+	UpstreamSubnets []string
+	Targets         TrafficValues
+}
+
+// TrafficValues defines a client traffic level that seeds a SLOK.
+// BytesRead and BytesWritten are the minimum bytes transferred counts to
+// seed a SLOK. Both UDP and TCP data will be counted towards these totals.
+// PortForwardDurationMilliseconds is the duration that a TCP or UDP port
+// forward is active (not connected, in the UDP case). All threshold
+// settings must be met to seed a SLOK; any threshold may be set to 0 to
+// be trivially satisfied.
+type TrafficValues struct {
+	BytesRead                       int64
+	BytesWritten                    int64
+	PortForwardDurationMilliseconds int64
+}
+
+// KeySplit defines a secret key splitting scheme where the secret is split
+// into n (total) shares and any K (threshold) of N shares must be known
+// to recostruct the split secret.
+type KeySplit struct {
+	Total     int
+	Threshold int
+}
+
+// ClientSeedState tracks the progress of a client towards seeding SLOKs.
+type ClientSeedState struct {
+	scheme               *Scheme
+	propagationChannelID string
+	progressSLOKTime     int64
+	progress             map[*SeedSpec]*TrafficValues
+	mutex                sync.Mutex
+	issuedSLOKs          map[string]*SLOK
+}
+
+// ClientSeedPortForward map a client port forward, which is relaying
+// traffic to a specific upstream address, to all seed state progress
+// counters for SeedSpecs with subnets containing the upstream address.
+// As traffic is relayed through the port forwards, the bytes transferred
+// and duration count towards the progress of these SeedSpecs and
+// associated SLOKs.
+type ClientSeedPortForward struct {
+	state    *ClientSeedState
+	progress []*TrafficValues
+}
+
+// slokReference uniquely identifies a SLOK by specifying all the fields
+// used to derive the SLOK secret key and ID.
+// Note: SeedSpecID is not a []byte as slokReference is used as a map key.
+type slokReference struct {
+	PropagationChannelID string
+	SeedSpecID           string
+	Time                 time.Time
+}
+
+// SLOK is a seeded SLOK issued to a client. The client will store the
+// SLOK in its local database; look it up by ID when checking which OSLs it
+// can reassemble keys for; and use the key material to reassemble OSL
+// file keys.
+type SLOK struct {
+	ID  []byte
+	Key []byte
+}
+
+// SeedPayload is the list of seeded SLOKs sent to a client.
+type SeedPayload struct {
+	SLOKs []*SLOK
+}
+
+// LoadConfig loads, vaildates, and initializes a JSON encoded OSL
+// configuration.
+func LoadConfig(configJSON []byte) (*Config, error) {
+
+	var config Config
+	err := json.Unmarshal(configJSON, &config)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	var previousEpoch time.Time
+
+	for _, scheme := range config.Schemes {
+
+		epoch, err := time.Parse(time.RFC3339, scheme.Epoch)
+		if err != nil {
+			return nil, common.ContextError(fmt.Errorf("invalid epoch format: %s", err))
+		}
+
+		if epoch.UTC() != epoch {
+			return nil, common.ContextError(errors.New("invalid epoch timezone"))
+		}
+
+		if epoch.Round(time.Duration(scheme.SeedPeriodNanoseconds)) != epoch {
+			return nil, common.ContextError(errors.New("invalid epoch period"))
+		}
+
+		if epoch.Before(previousEpoch) {
+			return nil, common.ContextError(errors.New("invalid epoch order"))
+		}
+
+		previousEpoch = epoch
+
+		scheme.epoch = epoch
+		scheme.subnetLookups = make(map[*SeedSpec]common.SubnetLookup)
+		scheme.derivedSLOKCache = make(map[slokReference]*SLOK)
+
+		if len(scheme.MasterKey) != KEY_LENGTH_BYTES {
+			return nil, common.ContextError(errors.New("invalid master key"))
+		}
+
+		for _, seedSpec := range scheme.SeedSpecs {
+			if len(seedSpec.ID) != KEY_LENGTH_BYTES {
+				return nil, common.ContextError(errors.New("invalid seed spec ID"))
+			}
+
+			// TODO: check that subnets do not overlap, as required by SubnetLookup
+			subnetLookup, err := common.NewSubnetLookup(seedSpec.UpstreamSubnets)
+			if err != nil {
+				return nil, common.ContextError(fmt.Errorf("invalid upstream subnets: %s", err))
+			}
+
+			scheme.subnetLookups[seedSpec] = subnetLookup
+		}
+
+		if !isValidShamirSplit(len(scheme.SeedSpecs), scheme.SeedSpecThreshold) {
+			return nil, common.ContextError(errors.New("invalid seed spec key split"))
+		}
+
+		if len(scheme.SeedPeriodKeySplits) < 1 {
+			return nil, common.ContextError(errors.New("invalid seed period key split count"))
+		}
+
+		for _, keySplit := range scheme.SeedPeriodKeySplits {
+			if !isValidShamirSplit(keySplit.Total, keySplit.Threshold) {
+				return nil, common.ContextError(errors.New("invalid seed period key split"))
+			}
+		}
+	}
+
+	return &config, nil
+}
+
+// NewClientSeedState creates a new client seed state to track
+// client progress towards seeding SLOKs. psiphond maintains one
+// ClientSeedState for each connected client.
+func NewClientSeedState(
+	config *Config, clientRegion, propagationChannelID string) *ClientSeedState {
+
+	for _, scheme := range config.Schemes {
+		// Only the first matching scheme is selected.
+		// Note: this implementation assumes a few simple schemes. For more
+		// schemes with many propagation channel IDs or region filters, use
+		// maps for more efficient lookup.
+		if scheme.epoch.Before(time.Now().UTC()) &&
+			common.Contains(scheme.PropagationChannelIDs, propagationChannelID) &&
+			common.Contains(scheme.Regions, clientRegion) {
+
+			// Empty progress is initialized up front for all seed specs. Once
+			// created, the progress map structure is read-only (the map, not the
+			// TrafficValue fields); this permits lock-free operation.
+			progress := make(map[*SeedSpec]*TrafficValues)
+			for _, seedSpec := range scheme.SeedSpecs {
+				progress[seedSpec] = &TrafficValues{}
+			}
+
+			return &ClientSeedState{
+				scheme:               scheme,
+				propagationChannelID: propagationChannelID,
+				progressSLOKTime:     getSLOKTime(scheme.SeedPeriodNanoseconds),
+				progress:             progress,
+				issuedSLOKs:          make(map[string]*SLOK),
+			}
+		}
+	}
+
+	return &ClientSeedState{}
+}
+
+// NewClientSeedPortForwardState creates a new client port forward
+// traffic progress tracker. Port forward progress reported to the
+// ClientSeedPortForward is added to seed state progress for all
+// seed specs containing upstreamIPAddress in their subnets.
+// The return value will be nil when activity for upstreamIPAddress
+// does not count towards any progress.
+// NewClientSeedPortForward may be invoked concurrently by many
+// psiphond port forward establishment goroutines.
+func (state *ClientSeedState) NewClientSeedPortForward(
+	upstreamIPAddress net.IP) *ClientSeedPortForward {
+
+	// Concurrency: access to ClientSeedState is unsynchronized
+	// but references only read-only fields.
+
+	if state.scheme == nil {
+		return nil
+	}
+
+	var progress []*TrafficValues
+
+	// Determine which seed spec subnets contain upstreamIPAddress
+	// and point to the progress for each. When progress is reported,
+	// it is added directly to all of these TrafficValues instances.
+	// Note: this implementation assumes a small number of seed specs.
+	// For larger numbers, instead of N SubnetLookups, create a single
+	// SubnetLookup which returns, for a given IP address, all matching
+	// subnets and associated seed specs.
+	for seedSpec, subnetLookup := range state.scheme.subnetLookups {
+		if subnetLookup.ContainsIPAddress(upstreamIPAddress) {
+			progress = append(progress, state.progress[seedSpec])
+		}
+	}
+
+	if progress == nil {
+		return nil
+	}
+
+	return &ClientSeedPortForward{
+		state:    state,
+		progress: progress,
+	}
+}
+
+// UpdateProgress adds port forward bytes transfered and duration to
+// all seed spec progresses associated with the port forward.
+// If UpdateProgress is invoked after the SLOK time period has rolled
+// over, any pending seeded SLOKs are issued and all progress is reset.
+// UpdateProgress may be invoked concurrently by many psiphond port
+// relay goroutines. The implementation of UpdateProgress prioritizes
+// not blocking port forward relaying; a consequence of this lock-free
+// design is that progress reported at the exact time of SLOK time period
+// rollover may be dropped.
+func (portForward *ClientSeedPortForward) UpdateProgress(progressDelta *TrafficValues) {
+
+	// Concurrency: access to ClientSeedState is unsynchronized to read-only
+	// fields or atomic, except in the case of a time period rollover, in which
+	// case a mutex is acquired.
+
+	slokTime := getSLOKTime(portForward.state.scheme.SeedPeriodNanoseconds)
+
+	// If the SLOK time period has changed since progress was last recorded,
+	// call issueSLOKs which will issue any SLOKs for that past time period
+	// and then clear all progress. Progress will then be recorded for the
+	// current time period.
+	// As it acquires the state mutex, issueSLOKs may stall other port
+	// forwards for this client. The delay is minimized by SLOK caching,
+	// which avoids redundant crypto operations.
+	if slokTime != atomic.LoadInt64(&portForward.state.progressSLOKTime) {
+		portForward.state.mutex.Lock()
+		portForward.state.issueSLOKs()
+		portForward.state.mutex.Unlock()
+	}
+
+	// Add directly to the permanent TrafficValues progress accumulators
+	// for the state's seed specs. Concurrently, other port forwards may
+	// be adding to the same accumulators. Also concurrently, another
+	// goroutine may be invoking issueSLOKs, which zeros all the accumulators.
+	// As a consequence, progress may be dropped at the exact time of
+	// time period rollover.
+	for _, progress := range portForward.progress {
+		atomic.AddInt64(&progress.BytesRead, progressDelta.BytesRead)
+		atomic.AddInt64(&progress.BytesWritten, progressDelta.BytesWritten)
+		atomic.AddInt64(&progress.PortForwardDurationMilliseconds, progressDelta.PortForwardDurationMilliseconds)
+	}
+}
+
+// IssueSLOKs checks client progress against each candidate seed spec
+// and seeds SLOKs when the client traffic levels are achieved. After
+// checking progress, and if the SLOK time period has changed since
+// progress was last recorded, progress is reset. Partial, insufficient
+// progress is intentionally dropped when the time period rolls over.
+// Derived SLOKs are cached to avoid redundant CPU intensive operations.
+// All issued SLOKs are retained in the client state for the duration
+// of the client's session. As there is no mechanism for the client to
+// explicitly acknowledge recieved SLOKs, it is intended that SLOKs
+// will be resent to the client.
+func (state *ClientSeedState) issueSLOKs() {
+
+	// Concurrency: the caller must lock state.mutex.
+
+	if state.scheme == nil {
+		return
+	}
+
+	progressSLOKTime := time.Unix(0, state.progressSLOKTime)
+
+	for seedSpec, progress := range state.progress {
+
+		if atomic.LoadInt64(&progress.BytesRead) >= seedSpec.Targets.BytesRead &&
+			atomic.LoadInt64(&progress.BytesWritten) >= seedSpec.Targets.BytesWritten &&
+			atomic.LoadInt64(&progress.PortForwardDurationMilliseconds) >=
+				seedSpec.Targets.PortForwardDurationMilliseconds {
+
+			ref := &slokReference{
+				PropagationChannelID: state.propagationChannelID,
+				SeedSpecID:           string(seedSpec.ID),
+				Time:                 progressSLOKTime,
+			}
+
+			state.scheme.derivedSLOKCacheMutex.RLock()
+			slok, ok := state.scheme.derivedSLOKCache[*ref]
+			state.scheme.derivedSLOKCacheMutex.RUnlock()
+			if !ok {
+				slok = deriveSLOK(state.scheme, ref)
+				state.scheme.derivedSLOKCacheMutex.Lock()
+				state.scheme.derivedSLOKCache[*ref] = slok
+				state.scheme.derivedSLOKCacheMutex.Unlock()
+			}
+
+			state.issuedSLOKs[string(slok.ID)] = slok
+		}
+	}
+
+	slokTime := getSLOKTime(state.scheme.SeedPeriodNanoseconds)
+
+	if slokTime != state.progressSLOKTime {
+		state.progressSLOKTime = slokTime
+		// The progress map structure is not reset or modifed; instead
+		// the mapped accumulator values are zeroed. Concurrently, port
+		// forward relay goroutines continue to add to these accumulators.
+		for _, progress := range state.progress {
+			atomic.StoreInt64(&progress.BytesRead, 0)
+			atomic.StoreInt64(&progress.BytesWritten, 0)
+			atomic.StoreInt64(&progress.PortForwardDurationMilliseconds, 0)
+		}
+	}
+}
+
+func getSLOKTime(seedPeriodNanoseconds int64) int64 {
+	return time.Now().UTC().Truncate(time.Duration(seedPeriodNanoseconds)).UnixNano()
+}
+
+// deriveSLOK produces SLOK secret keys and IDs using HKDF-Expand
+// defined in https://tools.ietf.org/html/rfc5869.
+func deriveSLOK(
+	scheme *Scheme, ref *slokReference) *SLOK {
+
+	timeBytes := make([]byte, 8)
+	binary.LittleEndian.PutUint64(timeBytes, uint64(ref.Time.UnixNano()))
+
+	key := deriveKeyHKDF(
+		scheme.MasterKey,
+		[]byte(ref.PropagationChannelID),
+		[]byte(ref.SeedSpecID),
+		timeBytes)
+
+	// TODO: is ID derivation cryptographically sound?
+	id := deriveKeyHKDF(
+		scheme.MasterKey,
+		key)
+
+	return &SLOK{
+		ID:  id,
+		Key: key,
+	}
+}
+
+// GetSeedPayload issues any pending SLOKs and returns the accumulated
+// SLOKs for a given client. psiphond will periodically call this and
+// return the SLOKs in API request responses.
+// Note: caller must not modify the SLOKs in SeedPayload.SLOKs
+// as these are shared data.
+func (state *ClientSeedState) GetSeedPayload() *SeedPayload {
+
+	state.mutex.Lock()
+	defer state.mutex.Unlock()
+
+	state.issueSLOKs()
+
+	if state.scheme == nil {
+		return &SeedPayload{}
+	}
+
+	sloks := make([]*SLOK, len(state.issuedSLOKs))
+	index := 0
+	for _, slok := range state.issuedSLOKs {
+		sloks[index] = slok
+		index++
+	}
+
+	return &SeedPayload{
+		SLOKs: sloks,
+	}
+}
+
+// PaveFile describes an OSL data file to be paved to an out-of-band
+// distribution drop site. There are two types of files: a directory,
+// which describes how to assemble keys for OSLs, and the encrypted
+// OSL files.
+type PaveFile struct {
+	Name     string
+	Contents []byte
+}
+
+// Directory describes a set of OSL files.
+type Directory struct {
+	FileSpecs []*OSLFileSpec
+
+	// The following fields are ephemeral state.
+
+	oslIDLookup map[string]*OSLFileSpec
+}
+
+// An OSLFileSpec includes an ID which is used to reference the
+// OSL file and describes the key splits used to divide the OSL
+// file key along with the SLOKs required to reassemble those keys.
+type OSLFileSpec struct {
+	ID        []byte
+	KeyShares *KeyShares
+}
+
+// KeyShares is a tree data structure which describes the
+// key splits used to divide a secret key. BoxedShares are encrypted
+// shares of the key, and #Threshold amount of decrypted BoxedShares
+// are required to reconstruct the secret key. The keys for BoxedShares
+// are either SLOKs (referenced by SLOK ID) or random keys that are
+// themselves split as described in child KeyShares.
+type KeyShares struct {
+	Threshold   int
+	BoxedShares [][]byte
+	SLOKIDs     [][]byte
+	KeyShares   []*KeyShares
+}
+
+// Pave creates the full set of OSL files, for all schemes in the
+// configuration, to be dropped in an out-of-band distribution site.
+// Only OSLs for the propagation channel ID associated with the
+// distribution site are paved. This function is used by automation.
+//
+// The Name component of each file relates to the values returned by
+// the client functions GetDirectoryURL and GetOSLFileURL.
+//
+// Pave returns a pave file for the entire directory of all OSLs from
+// epoch. It only returns pave files for OSLs referenced in
+// paveServerEntries. paveServerEntries is a list of maps, one for each
+// scheme, from the first SLOK time period identifying an OSL to a
+// payload to encrypt and pave.
+//
+// Automation is responsible for consistently distributing server entries
+// to OSLs in the case where OSLs are repaved in subsequent calls.
+func (config *Config) Pave(
+	endTime time.Time,
+	propagationChannelID string,
+	signingPublicKey string,
+	signingPrivateKey string,
+	paveServerEntries []map[time.Time][]byte) ([]*PaveFile, error) {
+
+	var paveFiles []*PaveFile
+
+	Directory := &Directory{}
+
+	if len(paveServerEntries) != len(config.Schemes) {
+		return nil, common.ContextError(errors.New("invalid paveServerEntries"))
+	}
+
+	for schemeIndex, scheme := range config.Schemes {
+
+		slokTimePeriodsPerOSL := 1
+		for _, keySplit := range scheme.SeedPeriodKeySplits {
+			slokTimePeriodsPerOSL *= keySplit.Total
+		}
+
+		if common.Contains(scheme.PropagationChannelIDs, propagationChannelID) {
+			oslTime := scheme.epoch
+			for oslTime.Before(endTime) {
+
+				firstSLOKTime := oslTime
+				fileKey, fileSpec, err := makeOSLFileSpec(
+					scheme, propagationChannelID, firstSLOKTime)
+				if err != nil {
+					return nil, common.ContextError(err)
+				}
+
+				Directory.FileSpecs = append(Directory.FileSpecs, fileSpec)
+
+				serverEntries, ok := paveServerEntries[schemeIndex][oslTime]
+				if ok {
+					boxedServerEntries, err := box(fileKey, serverEntries)
+					if err != nil {
+						return nil, common.ContextError(err)
+					}
+
+					fileName := fmt.Sprintf(
+						OSL_FILENAME_FORMAT, hex.EncodeToString(fileSpec.ID))
+
+					paveFiles = append(paveFiles, &PaveFile{
+						Name:     fileName,
+						Contents: boxedServerEntries,
+					})
+				}
+
+				oslTime = oslTime.Add(
+					time.Duration(
+						int64(slokTimePeriodsPerOSL) * scheme.SeedPeriodNanoseconds))
+			}
+		}
+	}
+
+	jsonDirectory, err := json.Marshal(Directory)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	signedDirectory, err := common.WriteAuthenticatedDataPackage(
+		base64.StdEncoding.EncodeToString(jsonDirectory),
+		signingPublicKey,
+		signingPrivateKey)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	paveFiles = append(paveFiles, &PaveFile{
+		Name:     DIRECTORY_FILENAME,
+		Contents: signedDirectory,
+	})
+
+	return paveFiles, nil
+}
+
+// makeOSLFileSpec creates a random OSL file key, splits it according
+// the the scheme's key splits, and sets the OSL ID as its first SLOK
+// ID. The returned key is used to encrypt the OSL payload and then
+// discarded; the key may be reassembled using the data in the KeyShares
+// tree, given sufficient SLOKs.
+func makeOSLFileSpec(
+	scheme *Scheme,
+	propagationChannelID string,
+	firstSLOKTime time.Time) ([]byte, *OSLFileSpec, error) {
+
+	ref := &slokReference{
+		PropagationChannelID: propagationChannelID,
+		SeedSpecID:           string(scheme.SeedSpecs[0].ID),
+		Time:                 firstSLOKTime,
+	}
+	firstSLOK := deriveSLOK(scheme, ref)
+	oslID := firstSLOK.ID
+
+	fileKey, err := common.MakeSecureRandomBytes(KEY_LENGTH_BYTES)
+	if err != nil {
+		return nil, nil, common.ContextError(err)
+	}
+
+	keyShares, err := divideKey(
+		scheme,
+		fileKey,
+		scheme.SeedPeriodKeySplits,
+		propagationChannelID,
+		&firstSLOKTime)
+	if err != nil {
+		return nil, nil, common.ContextError(err)
+	}
+
+	fileSpec := &OSLFileSpec{
+		ID:        oslID,
+		KeyShares: keyShares,
+	}
+
+	return fileKey, fileSpec, nil
+}
+
+// divideKey recursively constructs a KeyShares tree.
+func divideKey(
+	scheme *Scheme,
+	key []byte,
+	keySplits []KeySplit,
+	propagationChannelID string,
+	nextSLOKTime *time.Time) (*KeyShares, error) {
+
+	keySplitIndex := len(keySplits) - 1
+	keySplit := keySplits[keySplitIndex]
+
+	shares, err := shamirSplit(key, keySplit.Total, keySplit.Threshold)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	var boxedShares [][]byte
+	var keyShares []*KeyShares
+
+	for _, share := range shares {
+		shareKey, err := common.MakeSecureRandomBytes(KEY_LENGTH_BYTES)
+		if err != nil {
+			return nil, common.ContextError(err)
+		}
+		if keySplitIndex > 0 {
+			keyShare, err := divideKey(
+				scheme,
+				shareKey,
+				keySplits[0:keySplitIndex],
+				propagationChannelID,
+				nextSLOKTime)
+			if err != nil {
+				return nil, common.ContextError(err)
+			}
+			keyShares = append(keyShares, keyShare)
+		} else {
+			keyShare, err := divideKeyWithSeedSpecSLOKs(
+				scheme,
+				shareKey,
+				propagationChannelID,
+				nextSLOKTime)
+			if err != nil {
+				return nil, common.ContextError(err)
+			}
+			keyShares = append(keyShares, keyShare)
+
+			*nextSLOKTime = nextSLOKTime.Add(time.Duration(scheme.SeedPeriodNanoseconds))
+		}
+		boxedShare, err := box(shareKey, share)
+		if err != nil {
+			return nil, common.ContextError(err)
+		}
+		boxedShares = append(boxedShares, boxedShare)
+	}
+
+	return &KeyShares{
+		Threshold:   keySplit.Threshold,
+		BoxedShares: boxedShares,
+		SLOKIDs:     nil,
+		KeyShares:   keyShares,
+	}, nil
+}
+
+func divideKeyWithSeedSpecSLOKs(
+	scheme *Scheme,
+	key []byte,
+	propagationChannelID string,
+	nextSLOKTime *time.Time) (*KeyShares, error) {
+
+	var boxedShares [][]byte
+	var slokIDs [][]byte
+
+	shares, err := shamirSplit(
+		key, len(scheme.SeedSpecs), scheme.SeedSpecThreshold)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	for index, seedSpec := range scheme.SeedSpecs {
+
+		ref := &slokReference{
+			PropagationChannelID: propagationChannelID,
+			SeedSpecID:           string(seedSpec.ID),
+			Time:                 *nextSLOKTime,
+		}
+		slok := deriveSLOK(scheme, ref)
+
+		boxedShare, err := box(slok.Key, shares[index])
+		if err != nil {
+			return nil, common.ContextError(err)
+		}
+		boxedShares = append(boxedShares, boxedShare)
+
+		slokIDs = append(slokIDs, slok.ID)
+	}
+
+	return &KeyShares{
+		Threshold:   scheme.SeedSpecThreshold,
+		BoxedShares: boxedShares,
+		SLOKIDs:     slokIDs,
+		KeyShares:   nil,
+	}, nil
+}
+
+// GetDirectoryURL returns the URL for an OSL directory. Clients
+// call this when fetching the directory from out-of-band
+// distribution sites.
+// Clients are responsible for tracking whether the remote file has
+// changed or not before downloading.
+func GetDirectoryURL(baseURL string) string {
+	u, err := url.Parse(baseURL)
+	if err != nil {
+		return ""
+	}
+	u.Path = path.Join(u.Path, DIRECTORY_FILENAME)
+	return u.String()
+}
+
+// GetOSLFileURL returns the URL for an OSL file. Once the client
+// has determined, from GetSeededOSLIDs, which OSLs it has sufficiently
+// seeded, it calls this to fetch the OSLs for download and decryption.
+// Clients are responsible for tracking whether the remote file has
+// changed or not before downloading.
+func GetOSLFileURL(baseURL string, oslID []byte) string {
+	u, err := url.Parse(baseURL)
+	if err != nil {
+		return ""
+	}
+	u.Path = path.Join(
+		u.Path, fmt.Sprintf(OSL_FILENAME_FORMAT, hex.EncodeToString(oslID)))
+	return u.String()
+}
+
+// LoadDirectory authenticates the signed directory package -- which is the
+// contents of the paved directory file. It then returns the directory data.
+// Clients call this to process downloaded directory files.
+func LoadDirectory(directoryPackage []byte, signingPublicKey string) (*Directory, error) {
+
+	encodedDirectory, err := common.ReadAuthenticatedDataPackage(directoryPackage, signingPublicKey)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	directoryJSON, err := base64.StdEncoding.DecodeString(encodedDirectory)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	var directory Directory
+	err = json.Unmarshal(directoryJSON, &directory)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	directory.oslIDLookup = make(map[string]*OSLFileSpec)
+	for _, fileSpec := range directory.FileSpecs {
+		directory.oslIDLookup[string(fileSpec.ID)] = fileSpec
+	}
+
+	return &directory, nil
+}
+
+// SLOKLookup is a callback to lookup SLOK keys by ID.
+type SLOKLookup func([]byte) []byte
+
+// GetSeededOSLIDs examines each OSL in the directory and returns a list for
+// which the client has sufficient SLOKs to reassemble the OSL key and
+// decrypt. This function simply does SLOK ID lookups and threshold counting
+// and does not derive keys for every OSL.
+// The client is responsible for using the resulting list of OSL IDs to fetch
+// the OSL files and process.
+//
+// The client's propagation channel ID is used implicitly: it determines the
+// base URL used to download the directory and OSL files. If the client has
+// seeded SLOKs from a propagation channel ID different than the one associated
+// with its present base URL, they will not appear in the directory and not
+// be used.
+//
+// SLOKLookup is called to determine which SLOKs are seeded with the client.
+// errorLogger is a callback to log errors; GetSeededOSLIDs will continue to
+// process each candidate OSL even in the case of an error processing a
+// particular one.
+func (directory *Directory) GetSeededOSLIDs(lookup SLOKLookup, errorLogger func(error)) [][]byte {
+
+	var OSLIDs [][]byte
+	for _, fileSpec := range directory.FileSpecs {
+		ok, _, err := fileSpec.KeyShares.reassembleKey(lookup, false)
+		if err != nil {
+			errorLogger(err)
+			continue
+		}
+		if ok {
+			OSLIDs = append(OSLIDs, fileSpec.ID)
+		}
+	}
+
+	return OSLIDs
+}
+
+// reassembleKey recursively traverses a KeyShares tree, determining
+// whether there exists suffient SLOKs to reassemble the root key and
+// performing the key assembly as required.
+func (keyShares *KeyShares) reassembleKey(lookup SLOKLookup, unboxKey bool) (bool, []byte, error) {
+
+	if (len(keyShares.SLOKIDs) > 0 && len(keyShares.KeyShares) > 0) ||
+		(len(keyShares.SLOKIDs) > 0 && len(keyShares.SLOKIDs) != len(keyShares.BoxedShares)) ||
+		(len(keyShares.KeyShares) > 0 && len(keyShares.KeyShares) != len(keyShares.BoxedShares)) {
+		return false, nil, common.ContextError(errors.New("unexpected KeyShares format"))
+	}
+
+	shareCount := 0
+	var shares [][]byte
+	if unboxKey {
+		// Note: shamirCombine infers share indices from slice offset, so the full
+		// keyShares.Total slots are allocated and missing shares are left nil.
+		shares = make([][]byte, len(keyShares.BoxedShares))
+	}
+	if len(keyShares.SLOKIDs) > 0 {
+		for i := 0; i < len(keyShares.SLOKIDs) && shareCount < keyShares.Threshold; i++ {
+			slokKey := lookup(keyShares.SLOKIDs[i])
+			if slokKey == nil {
+				continue
+			}
+			shareCount += 1
+			if unboxKey {
+				share, err := unbox(slokKey, keyShares.BoxedShares[i])
+				if err != nil {
+					return false, nil, common.ContextError(err)
+				}
+				shares[i] = share
+			}
+		}
+	} else {
+		for i := 0; i < len(keyShares.KeyShares) && shareCount < keyShares.Threshold; i++ {
+			ok, key, err := keyShares.KeyShares[i].reassembleKey(lookup, unboxKey)
+			if err != nil {
+				return false, nil, common.ContextError(err)
+			}
+			if !ok {
+				continue
+			}
+			shareCount += 1
+			if unboxKey {
+				share, err := unbox(key, keyShares.BoxedShares[i])
+				if err != nil {
+					return false, nil, common.ContextError(err)
+				}
+				shares[i] = share
+			}
+		}
+	}
+
+	if shareCount < keyShares.Threshold {
+		return false, nil, nil
+	}
+
+	if !unboxKey {
+		return true, nil, nil
+	}
+
+	joinedKey := shamirCombine(shares)
+
+	return true, joinedKey, nil
+}
+
+// DecryptOSL reassembles the key for the OSL specified by oslID and uses
+// that key to decrypt oslFileContents. Clients will call DecryptOSL for
+// OSLs indicated by GetSeededOSLIDs along with their downloaded content.
+// SLOKLookup is called to determine which SLOKs are seeded with the client.
+func (directory *Directory) DecryptOSL(
+	lookup SLOKLookup, oslID []byte, oslFileContents []byte) ([]byte, error) {
+
+	fileSpec, ok := directory.oslIDLookup[string(oslID)]
+	if !ok {
+		return nil, common.ContextError(errors.New("unknown OSL ID"))
+	}
+	ok, fileKey, err := fileSpec.KeyShares.reassembleKey(lookup, true)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+	if !ok {
+		return nil, common.ContextError(errors.New("unseeded OSL"))
+	}
+	decryptedOSLFileContents, err := unbox(fileKey, oslFileContents)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+	return decryptedOSLFileContents, nil
+}
+
+// deriveKeyHKDF implements HKDF-Expand as defined in https://tools.ietf.org/html/rfc5869
+// where masterKey = PRK, context = info, and L = 32; SHA-256 is used so HashLen = 32
+func deriveKeyHKDF(masterKey []byte, context ...[]byte) []byte {
+	mac := hmac.New(sha256.New, masterKey)
+	for _, item := range context {
+		mac.Write([]byte(item))
+	}
+	mac.Write([]byte{byte(0x01)})
+	return mac.Sum(nil)
+}
+
+// isValidShamirSplit checks sss.Split constraints
+func isValidShamirSplit(total, threshold int) bool {
+	if total < 2 || total > 254 || threshold < 2 || threshold > total {
+		return false
+	}
+	return true
+}
+
+// shamirSplit is a helper wrapper for sss.Split
+func shamirSplit(secret []byte, total, threshold int) ([][]byte, error) {
+	if !isValidShamirSplit(total, threshold) {
+		return nil, common.ContextError(errors.New("invalid parameters"))
+	}
+
+	shareMap, err := sss.Split(byte(total), byte(threshold), secret)
+	if err != nil {
+		return nil, common.ContextError(err)
+	}
+
+	shares := make([][]byte, total)
+	for i := 0; i < total; i++ {
+		// Note: sss.Combine index starts at 1
+		shares[i] = shareMap[byte(i)+1]
+	}
+
+	return shares, nil
+}
+
+// shamirCombine is a helper wrapper for sss.Combine
+func shamirCombine(shares [][]byte) []byte {
+
+	// Convert a sparse list into a map
+	shareMap := make(map[byte][]byte)
+	for index, share := range shares {
+		if share != nil {
+			// Note: sss.Combine index starts at 1
+			shareMap[byte(index)+1] = share
+		}
+	}
+
+	return sss.Combine(shareMap)
+}
+
+// box is a helper wrapper for secretbox.Seal.
+// A constant  nonce is used, which is secure so long as
+// each key is used to encrypt only one message.
+func box(key, plaintext []byte) ([]byte, error) {
+	if len(key) != 32 {
+		return nil, common.ContextError(errors.New("invalid key length"))
+	}
+	var nonce [24]byte
+	var secretboxKey [32]byte
+	copy(secretboxKey[:], key)
+	box := secretbox.Seal(nil, plaintext, &nonce, &secretboxKey)
+	return box, nil
+}
+
+// unbox is a helper wrapper for secretbox.Open
+func unbox(key, box []byte) ([]byte, error) {
+	if len(key) != 32 {
+		return nil, common.ContextError(errors.New("invalid key length"))
+	}
+	var nonce [24]byte
+	var secretboxKey [32]byte
+	copy(secretboxKey[:], key)
+	plaintext, ok := secretbox.Open(nil, box, &nonce, &secretboxKey)
+	if !ok {
+		return nil, common.ContextError(errors.New("unbox failed"))
+	}
+	return plaintext, nil
+}

+ 567 - 0
psiphon/common/osl/osl_test.go

@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package osl
+
+import (
+	"bytes"
+	"fmt"
+	"net"
+	"testing"
+	"time"
+
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+)
+
+func TestOSL(t *testing.T) {
+
+	configJSONTemplate := `
+{
+  "Schemes" : [
+    {
+      "Epoch" : "%s",
+
+      "Regions" : ["US", "CA"],
+
+      "PropagationChannelIDs" : ["2995DB0C968C59C4F23E87988D9C0D41", "E742C25A6D8BA8C17F37E725FA628569"],
+
+      "MasterKey" : "wFuSbqU/pJ/35vRmoM8T9ys1PgDa8uzJps1Y+FNKa5U=",
+
+      "SeedSpecs" : [
+        {
+          "Description": "spec1",
+          "ID" : "IXHWfVgWFkEKvgqsjmnJuN3FpaGuCzQMETya+DSQvsk=",
+          "UpstreamSubnets" : ["192.168.0.0/16", "172.16.0.0/12"],
+          "Targets" :
+          {
+              "BytesRead" : 1,
+              "BytesWritten" : 1,
+              "PortForwardDurationMilliseconds" : 1
+          }
+        },
+        {
+          "Description": "spec2",
+          "ID" : "qvpIcORLE2Pi5TZmqRtVkEp+OKov0MhfsYPLNV7FYtI=",
+          "UpstreamSubnets" : ["192.168.0.0/16", "10.0.0.0/8"],
+          "Targets" :
+          {
+              "BytesRead" : 10,
+              "BytesWritten" : 10,
+              "PortForwardDurationMilliseconds" : 10
+          }
+        },
+        {
+          "Description": "spec3",
+          "ID" : "ts5LInjFHbVKX+/C5/bSJqUh+cLT5kJy92TZGLvAtPU=",
+          "UpstreamSubnets" : ["100.64.0.0/10"],
+          "Targets" :
+          {
+              "BytesRead" : 100,
+              "BytesWritten" : 100,
+              "PortForwardDurationMilliseconds" : 100
+          }
+        }
+      ],
+
+      "SeedSpecThreshold" : 2,
+
+      "SeedPeriodNanoseconds" : 1000000,
+
+      "SeedPeriodKeySplits": [
+        {
+          "Total": 10,
+          "Threshold": 5
+        },
+        {
+          "Total": 10,
+          "Threshold": 5
+        }
+      ]
+    },
+    {
+      "Epoch" : "%s",
+
+      "Regions" : ["US", "CA"],
+
+      "PropagationChannelIDs" : ["36F1CF2DF1250BF0C7BA0629CE3DC657"],
+
+      "MasterKey" : "fcyQy8JSxLXHt/Iom9Qj9wMnSjrsccTiiSPEsJicet4=",
+
+      "SeedSpecs" : [
+        {
+          "Description": "spec1",
+          "ID" : "NXY0/4lqMxx5XIszIhMbwHobH/qb2Gl0Bw/OGndc1vM=",
+          "UpstreamSubnets" : ["192.168.0.0/16", "172.16.0.0/12"],
+          "Targets" :
+          {
+              "BytesRead" : 1,
+              "BytesWritten" : 1,
+              "PortForwardDurationMilliseconds" : 1
+          }
+        },
+        {
+          "Description": "spec2",
+          "ID" : "o78G6muv3idtbQKXoU05tF6gTlQj1LHmNe0eUWkZGxs=",
+          "UpstreamSubnets" : ["192.168.0.0/16", "10.0.0.0/8"],
+          "Targets" :
+          {
+              "BytesRead" : 10,
+              "BytesWritten" : 10,
+              "PortForwardDurationMilliseconds" : 10
+          }
+        },
+        {
+          "Description": "spec3",
+          "ID" : "1DlAvJYpoSEfcqMXYBV7bDEtYu3LCQO39ISD5tmi8Uo=",
+          "UpstreamSubnets" : ["100.64.0.0/10"],
+          "Targets" :
+          {
+              "BytesRead" : 0,
+              "BytesWritten" : 0,
+              "PortForwardDurationMilliseconds" : 0
+          }
+        }
+      ],
+
+      "SeedSpecThreshold" : 2,
+
+      "SeedPeriodNanoseconds" : 1000000,
+
+      "SeedPeriodKeySplits": [
+        {
+          "Total": 100,
+          "Threshold": 25
+        }
+      ]
+    }
+  ]
+}
+`
+	now := time.Now().UTC()
+	epoch := now.Truncate(1 * time.Millisecond)
+	epochStr := epoch.Format(time.RFC3339Nano)
+	configJSON := fmt.Sprintf(configJSONTemplate, epochStr, epochStr)
+
+	// The first scheme requires sufficient activity within 5/10 1 millisecond
+	// periods and 5/10 10 millisecond longer periods. The second scheme requires
+	// sufficient activity within 25/100 1 millisecond periods.
+
+	config, err := LoadConfig([]byte(configJSON))
+	if err != nil {
+		t.Fatalf("LoadConfig failed: %s", err)
+	}
+
+	t.Run("ineligible client, sufficient transfer", func(t *testing.T) {
+
+		clientSeedState := NewClientSeedState(config, "US", "C5E8D2EDFD093B50D8D65CF59D0263CA")
+
+		seedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("192.168.0.1"))
+
+		if seedPortForward != nil {
+			t.Fatalf("expected nil client seed port forward")
+		}
+	})
+
+	// This clientSeedState is used across multiple tests.
+	clientSeedState := NewClientSeedState(config, "US", "2995DB0C968C59C4F23E87988D9C0D41")
+
+	t.Run("eligible client, no transfer", func(t *testing.T) {
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 0 {
+			t.Fatalf("expected 0 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("eligible client, insufficient transfer", func(t *testing.T) {
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(
+			&TrafficValues{
+				BytesRead:                       5,
+				BytesWritten:                    5,
+				PortForwardDurationMilliseconds: 5,
+			})
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 0 {
+			t.Fatalf("expected 0 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	rolloverToNextSLOKTime := func() {
+		// Rollover to the next SLOK time, so accrued data transfer will be reset.
+		now := time.Now().UTC()
+		time.Sleep(now.Add(1 * time.Millisecond).Truncate(1 * time.Millisecond).Sub(now))
+	}
+
+	t.Run("eligible client, insufficient transfer after rollover", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(
+			&TrafficValues{
+				BytesRead:                       5,
+				BytesWritten:                    5,
+				PortForwardDurationMilliseconds: 5,
+			})
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 0 {
+			t.Fatalf("expected 0 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("eligible client, sufficient transfer, one port forward", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedPortForward := clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1"))
+
+		clientSeedPortForward.UpdateProgress(
+			&TrafficValues{
+				BytesRead:                       5,
+				BytesWritten:                    5,
+				PortForwardDurationMilliseconds: 5,
+			})
+
+		clientSeedPortForward.UpdateProgress(
+			&TrafficValues{
+				BytesRead:                       5,
+				BytesWritten:                    5,
+				PortForwardDurationMilliseconds: 5,
+			})
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 1 {
+			t.Fatalf("expected 1 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("eligible client, sufficient transfer, multiple port forwards", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(
+			&TrafficValues{
+				BytesRead:                       5,
+				BytesWritten:                    5,
+				PortForwardDurationMilliseconds: 5,
+			})
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(
+			&TrafficValues{
+				BytesRead:                       5,
+				BytesWritten:                    5,
+				PortForwardDurationMilliseconds: 5,
+			})
+
+		// Expect 2 SLOKS: 1 new, and 1 remaining in payload.
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 2 {
+			t.Fatalf("expected 2 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("eligible client, sufficient transfer multiple SLOKs", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("192.168.0.1")).UpdateProgress(
+			&TrafficValues{
+				BytesRead:                       5,
+				BytesWritten:                    5,
+				PortForwardDurationMilliseconds: 5,
+			})
+
+		clientSeedState.NewClientSeedPortForward(net.ParseIP("10.0.0.1")).UpdateProgress(
+			&TrafficValues{
+				BytesRead:                       5,
+				BytesWritten:                    5,
+				PortForwardDurationMilliseconds: 5,
+			})
+
+		// Expect 4 SLOKS: 2 new, and 2 remaining in payload.
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 4 {
+			t.Fatalf("expected 4 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	t.Run("no transfer required", func(t *testing.T) {
+
+		rolloverToNextSLOKTime()
+
+		clientSeedState := NewClientSeedState(config, "US", "36F1CF2DF1250BF0C7BA0629CE3DC657")
+
+		if len(clientSeedState.GetSeedPayload().SLOKs) != 1 {
+			t.Fatalf("expected 1 SLOKs, got %d", len(clientSeedState.GetSeedPayload().SLOKs))
+		}
+	})
+
+	signingPublicKey, signingPrivateKey, err := common.GenerateAuthenticatedDataPackageKeys()
+	if err != nil {
+		t.Fatalf("GenerateAuthenticatedDataPackageKeys failed: %s", err)
+	}
+
+	pavedDirectories := make(map[string][]byte)
+	pavedOSLFileContents := make(map[string]map[string][]byte)
+
+	t.Run("pave OSLs", func(t *testing.T) {
+
+		// Pave sufficient OSLs to cover simulated elapsed time of all test cases.
+		endTime := epoch.Add(1000 * time.Millisecond)
+
+		// In actual deployment, paved files for each propagation channel ID
+		// are dropped in distinct distribution sites.
+		for _, propagationChannelID := range []string{
+			"2995DB0C968C59C4F23E87988D9C0D41",
+			"E742C25A6D8BA8C17F37E725FA628569",
+			"36F1CF2DF1250BF0C7BA0629CE3DC657"} {
+
+			// Dummy server entry payloads will be the OSL ID, which the following
+			// tests use to verify that the correct OSL file decrypts successfully.
+			paveServerEntries := make([]map[time.Time][]byte, len(config.Schemes))
+			for schemeIndex, scheme := range config.Schemes {
+
+				paveServerEntries[schemeIndex] = make(map[time.Time][]byte)
+
+				slokTimePeriodsPerOSL := 1
+				for _, keySplit := range scheme.SeedPeriodKeySplits {
+					slokTimePeriodsPerOSL *= keySplit.Total
+				}
+
+				oslTime := scheme.epoch
+				for oslTime.Before(endTime) {
+					firstSLOKRef := &slokReference{
+						PropagationChannelID: propagationChannelID,
+						SeedSpecID:           string(scheme.SeedSpecs[0].ID),
+						Time:                 oslTime,
+					}
+					firstSLOK := deriveSLOK(scheme, firstSLOKRef)
+					oslID := firstSLOK.ID
+					paveServerEntries[schemeIndex][oslTime] = oslID
+
+					oslTime = oslTime.Add(
+						time.Duration(
+							int64(slokTimePeriodsPerOSL) * scheme.SeedPeriodNanoseconds))
+				}
+			}
+
+			paveFiles, err := config.Pave(
+				endTime,
+				propagationChannelID,
+				signingPublicKey,
+				signingPrivateKey,
+				paveServerEntries)
+			if err != nil {
+				t.Fatalf("PaveDirectory failed: %s", err)
+			}
+
+			// Check that the paved file name matches the name the client will look for.
+			if len(paveFiles) < 1 || paveFiles[len(paveFiles)-1].Name != GetDirectoryURL("") {
+				t.Fatalf("invalid directory pave file")
+			}
+
+			pavedDirectories[propagationChannelID] = paveFiles[len(paveFiles)-1].Contents
+
+			pavedOSLFileContents[propagationChannelID] = make(map[string][]byte)
+			for _, paveFile := range paveFiles[0:len(paveFiles)] {
+				pavedOSLFileContents[propagationChannelID][paveFile.Name] = paveFile.Contents
+			}
+		}
+	})
+
+	if len(pavedDirectories) != 3 {
+		// Previous subtest failed. Following tests cannot be completed, so abort.
+		t.Fatalf("pave failed")
+	}
+
+	// To ensure SLOKs are issued at precise time periods, the following tests
+	// bypass ClientSeedState and derive SLOKs directly.
+
+	expandRanges := func(ranges ...[2]int) []int {
+		a := make([]int, 0)
+		for _, r := range ranges {
+			for n := r[0]; n <= r[1]; n++ {
+				a = append(a, n)
+			}
+		}
+		return a
+	}
+
+	singleSplitPropagationChannelID := "36F1CF2DF1250BF0C7BA0629CE3DC657"
+	singleSplitScheme := config.Schemes[1]
+
+	doubleSplitPropagationChannelID := "2995DB0C968C59C4F23E87988D9C0D41"
+	doubleSplitScheme := config.Schemes[0]
+
+	keySplitTestCases := []struct {
+		description              string
+		propagationChannelID     string
+		scheme                   *Scheme
+		issueSLOKTimePeriods     []int
+		issueSLOKSeedSpecIndexes []int
+		expectedOSLCount         int
+	}{
+		{
+			"single split scheme: insufficient SLOK periods",
+			singleSplitPropagationChannelID,
+			singleSplitScheme,
+			expandRanges([2]int{0, 23}),
+			[]int{0, 1},
+			0,
+		},
+		{
+			"single split scheme: insufficient SLOK seed specs",
+			singleSplitPropagationChannelID,
+			singleSplitScheme,
+			expandRanges([2]int{0, 23}),
+			[]int{0},
+			0,
+		},
+		{
+			"single split scheme: sufficient SLOKs",
+			singleSplitPropagationChannelID,
+			singleSplitScheme,
+			expandRanges([2]int{0, 24}),
+			[]int{0, 1},
+			1,
+		},
+		{
+			"single split scheme: sufficient SLOKs (alternative seed specs)",
+			singleSplitPropagationChannelID,
+			singleSplitScheme,
+			expandRanges([2]int{0, 24}),
+			[]int{1, 2},
+			1,
+		},
+		{
+			"single split scheme: more than sufficient SLOKs",
+			singleSplitPropagationChannelID,
+			singleSplitScheme,
+			expandRanges([2]int{0, 49}),
+			[]int{0, 1},
+			1,
+		},
+		{
+			"double split scheme: insufficient SLOK periods",
+			doubleSplitPropagationChannelID,
+			doubleSplitScheme,
+			expandRanges([2]int{0, 4}, [2]int{10, 14}, [2]int{20, 24}, [2]int{30, 34}, [2]int{40, 43}),
+			[]int{0, 1},
+			0,
+		},
+		{
+			"double split scheme: insufficient SLOK period spread",
+			doubleSplitPropagationChannelID,
+			doubleSplitScheme,
+			expandRanges([2]int{0, 25}),
+			[]int{0, 1},
+			0,
+		},
+		{
+			"double split scheme: insufficient SLOK seed specs",
+			doubleSplitPropagationChannelID,
+			doubleSplitScheme,
+			expandRanges([2]int{0, 4}, [2]int{10, 14}, [2]int{20, 24}, [2]int{30, 34}, [2]int{40, 44}),
+			[]int{0},
+			0,
+		},
+		{
+			"double split scheme: sufficient SLOKs",
+			doubleSplitPropagationChannelID,
+			doubleSplitScheme,
+			expandRanges([2]int{0, 4}, [2]int{10, 14}, [2]int{20, 24}, [2]int{30, 34}, [2]int{40, 44}),
+			[]int{0, 1},
+			1,
+		},
+		{
+			"double split scheme: sufficient SLOKs (alternative seed specs)",
+			doubleSplitPropagationChannelID,
+			doubleSplitScheme,
+			expandRanges([2]int{0, 4}, [2]int{10, 14}, [2]int{20, 24}, [2]int{30, 34}, [2]int{40, 44}),
+			[]int{1, 2},
+			1,
+		},
+	}
+
+	for _, testCase := range keySplitTestCases {
+		t.Run(testCase.description, func(t *testing.T) {
+
+			slokMap := make(map[string][]byte)
+
+			for _, timePeriod := range testCase.issueSLOKTimePeriods {
+				for _, seedSpecIndex := range testCase.issueSLOKSeedSpecIndexes {
+
+					slok := deriveSLOK(
+						testCase.scheme,
+						&slokReference{
+							PropagationChannelID: testCase.propagationChannelID,
+							SeedSpecID:           string(testCase.scheme.SeedSpecs[seedSpecIndex].ID),
+							Time:                 epoch.Add(time.Duration(timePeriod) * time.Millisecond),
+						})
+
+					slokMap[string(slok.ID)] = slok.Key
+
+				}
+			}
+
+			t.Logf("SLOK count: %d", len(slokMap))
+
+			slokLookup := func(slokID []byte) []byte {
+				return slokMap[string(slokID)]
+			}
+
+			checkDirectoryStartTime := time.Now()
+
+			directory, err := LoadDirectory(
+				pavedDirectories[testCase.propagationChannelID], signingPublicKey)
+			if err != nil {
+				t.Fatalf("LoadDirectory failed: %s", err)
+			}
+
+			t.Logf("directory OSL count: %d", len(directory.FileSpecs))
+
+			oslIDs := directory.GetSeededOSLIDs(
+				slokLookup,
+				func(err error) {
+					// Actual client will treat errors as warnings.
+					t.Fatalf("GetSeededOSLIDs failed: %s", err)
+				})
+
+			t.Logf("check directory elapsed time: %s", time.Since(checkDirectoryStartTime))
+
+			if len(oslIDs) != testCase.expectedOSLCount {
+				t.Fatalf("expected %d OSLs got %d", testCase.expectedOSLCount, len(oslIDs))
+			}
+
+			for _, oslID := range oslIDs {
+				oslFileContents, ok :=
+					pavedOSLFileContents[testCase.propagationChannelID][GetOSLFileURL("", oslID)]
+				if !ok {
+					t.Fatalf("unknown OSL file name")
+				}
+
+				plaintextOSL, err := directory.DecryptOSL(slokLookup, oslID, oslFileContents)
+				if err != nil {
+					t.Fatalf("DecryptOSL failed: %s", err)
+				}
+
+				// The decrypted OSL should contain its own ID.
+				if bytes.Compare(plaintextOSL, oslID) != 0 {
+					t.Fatalf("unexpected OSL file contents")
+				}
+			}
+		})
+	}
+}

+ 152 - 0
psiphon/common/subnet.go

@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package common
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"net"
+	"sort"
+	"strings"
+)
+
+// SubnetLookup provides an efficient lookup for individual
+// IP addresses within a list of subnets.
+type SubnetLookup []net.IPNet
+
+// NewSubnetLookup creates a SubnetLookup from a list of
+// subnet CIDRs.
+func NewSubnetLookup(CIDRs []string) (SubnetLookup, error) {
+
+	subnets := make([]net.IPNet, len(CIDRs))
+
+	for i, CIDR := range CIDRs {
+		_, network, err := net.ParseCIDR(CIDR)
+		if err != nil {
+			return nil, ContextError(err)
+		}
+		subnets[i] = *network
+	}
+
+	lookup := SubnetLookup(subnets)
+	sort.Sort(lookup)
+
+	return lookup, nil
+}
+
+// NewSubnetLookup creates a SubnetLookup from text routes data.
+// The input format is expected to be text lines where each line
+// is, e.g., "1.2.3.0\t255.255.255.0\n"
+func NewSubnetLookupFromRoutes(routesData []byte) (SubnetLookup, error) {
+
+	// Parse text routes data
+	var subnets []net.IPNet
+	scanner := bufio.NewScanner(bytes.NewReader(routesData))
+	scanner.Split(bufio.ScanLines)
+	for scanner.Scan() {
+		s := strings.Split(scanner.Text(), "\t")
+		if len(s) != 2 {
+			continue
+		}
+
+		ip := parseIPv4(s[0])
+		mask := parseIPv4Mask(s[1])
+		if ip == nil || mask == nil {
+			continue
+		}
+
+		subnets = append(subnets, net.IPNet{IP: ip.Mask(mask), Mask: mask})
+	}
+	if len(subnets) == 0 {
+		return nil, ContextError(errors.New("Routes data contains no networks"))
+	}
+
+	lookup := SubnetLookup(subnets)
+	sort.Sort(lookup)
+
+	return lookup, nil
+}
+
+func parseIPv4(s string) net.IP {
+	ip := net.ParseIP(s)
+	if ip == nil {
+		return nil
+	}
+	return ip.To4()
+}
+
+func parseIPv4Mask(s string) net.IPMask {
+	ip := parseIPv4(s)
+	if ip == nil {
+		return nil
+	}
+	mask := net.IPMask(ip)
+	if bits, size := mask.Size(); bits == 0 || size == 0 {
+		return nil
+	}
+	return mask
+}
+
+// Len implements Sort.Interface
+func (lookup SubnetLookup) Len() int {
+	return len(lookup)
+}
+
+// Swap implements Sort.Interface
+func (lookup SubnetLookup) Swap(i, j int) {
+	lookup[i], lookup[j] = lookup[j], lookup[i]
+}
+
+// Less implements Sort.Interface
+func (lookup SubnetLookup) Less(i, j int) bool {
+	return binary.BigEndian.Uint32(lookup[i].IP) < binary.BigEndian.Uint32(lookup[j].IP)
+}
+
+// ContainsIPAddress performs a binary search on the sorted subnet
+// list to find a network containing the candidate IP address.
+func (lookup SubnetLookup) ContainsIPAddress(addr net.IP) bool {
+
+	// Search criteria
+	//
+	// The following conditions are satisfied when address_IP is in the network:
+	// 1. address_IP ^ network_mask == network_IP ^ network_mask
+	// 2. address_IP >= network_IP.
+	// We are also assuming that network ranges do not overlap.
+	//
+	// For an ascending array of networks, the sort.Search returns the smallest
+	// index idx for which condition network_IP > address_IP is satisfied, so we
+	// are checking whether or not adrress_IP belongs to the network[idx-1].
+
+	// Edge conditions check
+	//
+	// idx == 0 means that address_IP is lesser than the first (smallest) network_IP
+	// thus never satisfies search condition 2.
+	// idx == array_length means that address_IP is larger than the last (largest)
+	// network_IP so we need to check the last element for condition 1.
+
+	addrValue := binary.BigEndian.Uint32(addr.To4())
+	index := sort.Search(len(lookup), func(i int) bool {
+		networkValue := binary.BigEndian.Uint32(lookup[i].IP)
+		return networkValue > addrValue
+	})
+	return index > 0 && lookup[index-1].IP.Equal(addr.Mask(lookup[index-1].Mask))
+}

+ 126 - 0
psiphon/common/subnet_test.go

@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2016, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package common
+
+import (
+	"encoding/binary"
+	"io/ioutil"
+	"math/rand"
+	"net"
+	"testing"
+)
+
+func TestSubnetLookup(t *testing.T) {
+	CIDRs := []string{
+		"192.168.0.0/16",
+		"10.0.0.0/8",
+		"172.16.0.0/12",
+		"100.64.0.0/10"}
+
+	routes := []byte("192.168.0.0\t255.255.0.0\n10.0.0.0\t255.0.0.0\n" +
+		"172.16.0.0\t255.240.0.0\n100.64.0.0\t255.192.0.0\n")
+
+	var subnetLookup SubnetLookup
+
+	t.Run("new subnet lookup", func(t *testing.T) {
+
+		var err error
+		subnetLookup, err = NewSubnetLookup(CIDRs)
+		if err != nil {
+			t.Fatalf("NewSubnetLookup failed: %s", err)
+		}
+	})
+
+	var subnetLookupRoutes SubnetLookup
+
+	t.Run("new subnet lookup (routes case)", func(t *testing.T) {
+
+		var err error
+		subnetLookupRoutes, err = NewSubnetLookupFromRoutes(routes)
+		if err != nil {
+			t.Fatalf("NewSubnetLookupFromRoutes failed: %s", err)
+		}
+	})
+
+	if subnetLookup == nil || subnetLookupRoutes == nil {
+		t.Fatalf("new subnet list failed")
+	}
+
+	testCases := []struct {
+		description    string
+		ipAddress      net.IP
+		expectedResult bool
+	}{
+		{"IP address in subnet", net.ParseIP("172.17.3.2"), true},
+		{"IP address not in subnet", net.ParseIP("169.254.1.1"), false},
+		{"IP address not in subnet (prefix case)", net.ParseIP("172.15.3.2"), false},
+	}
+
+	for _, testCase := range testCases {
+		t.Run(testCase.description, func(t *testing.T) {
+
+			result := subnetLookup.ContainsIPAddress(testCase.ipAddress)
+			if result != testCase.expectedResult {
+				t.Fatalf(
+					"ContainsIPAddress returned %+v expected %+v",
+					result, testCase.expectedResult)
+			}
+
+			result = subnetLookupRoutes.ContainsIPAddress(testCase.ipAddress)
+			if result != testCase.expectedResult {
+				t.Fatalf(
+					"ContainsIPAddress (routes case) returned %+v expected %+v",
+					result, testCase.expectedResult)
+			}
+		})
+	}
+}
+
+func BenchmarkSubnetLookup(b *testing.B) {
+
+	var subnetLookup SubnetLookup
+
+	b.Run("load routes file", func(b *testing.B) {
+
+		routesData, err := ioutil.ReadFile("test_routes.dat")
+		if err != nil {
+			b.Skipf("can't load test routes file: %s", err)
+		}
+
+		for n := 0; n < b.N; n++ {
+			subnetLookup, err = NewSubnetLookupFromRoutes(routesData)
+			if err != nil {
+				b.Fatalf("NewSubnetLookup failed: %s", err)
+			}
+		}
+	})
+
+	if subnetLookup == nil {
+		b.Skipf("no test routes file")
+	}
+
+	b.Run("lookup random IP address", func(b *testing.B) {
+		for n := 0; n < b.N; n++ {
+			ip := make([]byte, 4)
+			binary.BigEndian.PutUint32(ip, rand.Uint32())
+			_ = subnetLookup.ContainsIPAddress(net.IP(ip))
+		}
+	})
+}

+ 0 - 80
psiphon/package.go

@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 2015, Psiphon Inc.
- * All rights reserved.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-package psiphon
-
-import (
-	"crypto"
-	"crypto/rsa"
-	"crypto/sha256"
-	"crypto/x509"
-	"encoding/base64"
-	"encoding/json"
-	"errors"
-
-	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
-)
-
-// AuthenticatedDataPackage is a JSON record containing some Psiphon data
-// payload, such as list of Psiphon server entries. As it may be downloaded
-// from various sources, it is digitally signed so that the data may be
-// authenticated.
-type AuthenticatedDataPackage struct {
-	Data                   string `json:"data"`
-	SigningPublicKeyDigest string `json:"signingPublicKeyDigest"`
-	Signature              string `json:"signature"`
-}
-
-func ReadAuthenticatedDataPackage(
-	rawPackage []byte, signingPublicKey string) (data string, err error) {
-
-	var authenticatedDataPackage *AuthenticatedDataPackage
-	err = json.Unmarshal(rawPackage, &authenticatedDataPackage)
-	if err != nil {
-		return "", common.ContextError(err)
-	}
-
-	derEncodedPublicKey, err := base64.StdEncoding.DecodeString(signingPublicKey)
-	if err != nil {
-		return "", common.ContextError(err)
-	}
-	publicKey, err := x509.ParsePKIXPublicKey(derEncodedPublicKey)
-	if err != nil {
-		return "", common.ContextError(err)
-	}
-	rsaPublicKey, ok := publicKey.(*rsa.PublicKey)
-	if !ok {
-		return "", common.ContextError(errors.New("unexpected signing public key type"))
-	}
-	signature, err := base64.StdEncoding.DecodeString(authenticatedDataPackage.Signature)
-	if err != nil {
-		return "", common.ContextError(err)
-	}
-	// TODO: can distinguish signed-with-different-key from other errors:
-	// match digest(publicKey) against authenticatedDataPackage.SigningPublicKeyDigest
-	hash := sha256.New()
-	hash.Write([]byte(authenticatedDataPackage.Data))
-	digest := hash.Sum(nil)
-	err = rsa.VerifyPKCS1v15(rsaPublicKey, crypto.SHA256, digest, signature)
-	if err != nil {
-		return "", common.ContextError(err)
-	}
-
-	return authenticatedDataPackage.Data, nil
-}

+ 1 - 1
psiphon/remoteServerList.go

@@ -101,7 +101,7 @@ func FetchRemoteServerList(
 		return common.ContextError(err)
 	}
 
-	remoteServerList, err := ReadAuthenticatedDataPackage(
+	remoteServerList, err := common.ReadAuthenticatedDataPackage(
 		dataPackage, config.RemoteServerListSignaturePublicKey)
 	if err != nil {
 		return common.ContextError(err)

+ 4 - 112
psiphon/splitTunnel.go

@@ -20,18 +20,14 @@
 package psiphon
 
 import (
-	"bufio"
 	"bytes"
 	"compress/zlib"
 	"encoding/base64"
-	"encoding/binary"
 	"errors"
 	"fmt"
 	"io/ioutil"
 	"net"
 	"net/http"
-	"sort"
-	"strings"
 	"sync"
 	"time"
 
@@ -79,7 +75,7 @@ type SplitTunnelClassifier struct {
 	fetchRoutesWaitGroup     *sync.WaitGroup
 	isRoutesSet              bool
 	cache                    map[string]*classification
-	routes                   networkList
+	routes                   common.SubnetLookup
 }
 
 type classification struct {
@@ -280,7 +276,7 @@ func (classifier *SplitTunnelClassifier) getRoutes(tunnel *Tunnel) (routesData [
 
 	var encodedRoutesData string
 	if !useCachedRoutes {
-		encodedRoutesData, err = ReadAuthenticatedDataPackage(
+		encodedRoutesData, err = common.ReadAuthenticatedDataPackage(
 			routesDataPackage, classifier.routesSignaturePublicKey)
 		if err != nil {
 			NoticeAlert("failed to read split tunnel routes package: %s", common.ContextError(err))
@@ -347,7 +343,7 @@ func (classifier *SplitTunnelClassifier) installRoutes(routesData []byte) (err e
 	classifier.mutex.Lock()
 	defer classifier.mutex.Unlock()
 
-	classifier.routes, err = NewNetworkList(routesData)
+	classifier.routes, err = common.NewSubnetLookupFromRoutes(routesData)
 	if err != nil {
 		return common.ContextError(err)
 	}
@@ -362,111 +358,7 @@ func (classifier *SplitTunnelClassifier) ipAddressInRoutes(ipAddr net.IP) bool {
 	classifier.mutex.RLock()
 	defer classifier.mutex.RUnlock()
 
-	return classifier.routes.ContainsIpAddress(ipAddr)
-}
-
-// networkList is a sorted list of network ranges. It's used to
-// lookup candidate IP addresses for split tunnel classification.
-// networkList implements Sort.Interface.
-type networkList []net.IPNet
-
-// NewNetworkList parses text routes data and produces a networkList
-// for fast ContainsIpAddress lookup.
-// The input format is expected to be text lines where each line
-// is, e.g., "1.2.3.0\t255.255.255.0\n"
-func NewNetworkList(routesData []byte) (networkList, error) {
-
-	// Parse text routes data
-	var list networkList
-	scanner := bufio.NewScanner(bytes.NewReader(routesData))
-	scanner.Split(bufio.ScanLines)
-	for scanner.Scan() {
-		s := strings.Split(scanner.Text(), "\t")
-		if len(s) != 2 {
-			continue
-		}
-
-		ip := parseIPv4(s[0])
-		mask := parseIPv4Mask(s[1])
-		if ip == nil || mask == nil {
-			continue
-		}
-
-		list = append(list, net.IPNet{IP: ip.Mask(mask), Mask: mask})
-	}
-	if len(list) == 0 {
-		return nil, common.ContextError(errors.New("Routes data contains no networks"))
-	}
-
-	// Sort data for fast lookup
-	sort.Sort(list)
-
-	return list, nil
-}
-
-func parseIPv4(s string) net.IP {
-	ip := net.ParseIP(s)
-	if ip == nil {
-		return nil
-	}
-	return ip.To4()
-}
-
-func parseIPv4Mask(s string) net.IPMask {
-	ip := parseIPv4(s)
-	if ip == nil {
-		return nil
-	}
-	mask := net.IPMask(ip)
-	if bits, size := mask.Size(); bits == 0 || size == 0 {
-		return nil
-	}
-	return mask
-}
-
-// Len implementes Sort.Interface
-func (list networkList) Len() int {
-	return len(list)
-}
-
-// Swap implementes Sort.Interface
-func (list networkList) Swap(i, j int) {
-	list[i], list[j] = list[j], list[i]
-}
-
-// Less implementes Sort.Interface
-func (list networkList) Less(i, j int) bool {
-	return binary.BigEndian.Uint32(list[i].IP) < binary.BigEndian.Uint32(list[j].IP)
-}
-
-// ContainsIpAddress performs a binary search on the networkList to
-// find a network containing the candidate IP address.
-func (list networkList) ContainsIpAddress(addr net.IP) bool {
-
-	// Search criteria
-	//
-	// The following conditions are satisfied when address_IP is in the network:
-	// 1. address_IP ^ network_mask == network_IP ^ network_mask
-	// 2. address_IP >= network_IP.
-	// We are also assuming that network ranges do not overlap.
-	//
-	// For an ascending array of networks, the sort.Search returns the smallest
-	// index idx for which condition network_IP > address_IP is satisfied, so we
-	// are checking whether or not adrress_IP belongs to the network[idx-1].
-
-	// Edge conditions check
-	//
-	// idx == 0 means that address_IP is  lesser than the first (smallest) network_IP
-	// thus never satisfies search condition 2.
-	// idx == array_length means that address_IP is larger than the last (largest)
-	// network_IP so we need to check the last element for condition 1.
-
-	addrValue := binary.BigEndian.Uint32(addr.To4())
-	index := sort.Search(len(list), func(i int) bool {
-		networkValue := binary.BigEndian.Uint32(list[i].IP)
-		return networkValue > addrValue
-	})
-	return index > 0 && list[index-1].IP.Equal(addr.Mask(list[index-1].Mask))
+	return classifier.routes.ContainsIPAddress(ipAddr)
 }
 
 // tunneledLookupIP resolves a split tunnel candidate hostname with a tunneled

+ 0 - 38
psiphon/splitTunnel_test.go

@@ -1,38 +0,0 @@
-package psiphon
-
-import (
-	"encoding/binary"
-	"io/ioutil"
-	"math/rand"
-	"net"
-	"testing"
-)
-
-var netList networkList
-var isLocalAddr bool
-
-func Benchmark_NewNetworkList(b *testing.B) {
-
-	routesData, err := ioutil.ReadFile("test_routes.dat")
-	if err != nil {
-		b.Skipf("can't load test routes file: %s", err)
-	}
-
-	for n := 0; n < b.N; n++ {
-		netList, _ = NewNetworkList(routesData)
-	}
-}
-
-func Benchmark_containsRandomAddr(b *testing.B) {
-
-	if netList == nil {
-		b.Skipf("no test routes file")
-	}
-
-	rand.Seed(0)
-	for n := 0; n < b.N; n++ {
-		ip := make([]byte, 4)
-		binary.BigEndian.PutUint32(ip, rand.Uint32())
-		isLocalAddr = netList.ContainsIpAddress(net.IP(ip))
-	}
-}