Browse Source

Merge remote-tracking branch 'upstream/master'

Eugene Fryntov 10 years ago
parent
commit
0387e7c45d

+ 5 - 0
.gitignore

@@ -3,6 +3,11 @@ psiphon_config
 psiphon.config
 psiphon.config
 controller_test.config
 controller_test.config
 psiphon.db*
 psiphon.db*
+psiphon.boltdb
+
+# Exclude compiled tunnel core binaries
+ConsoleClient/ConsoleClient
+ConsoleClient/bin
 
 
 # Compiled Object files, Static and Dynamic libs (Shared Objects)
 # Compiled Object files, Static and Dynamic libs (Shared Objects)
 *.o
 *.o

+ 1 - 1
.travis.yml

@@ -1,6 +1,6 @@
 language: go
 language: go
 go:
 go:
-- 1.4
+- 1.5
 addons:
 addons:
   apt_packages:
   apt_packages:
     - libx11-dev
     - libx11-dev

+ 1 - 0
AndroidLibrary/README.md

@@ -20,6 +20,7 @@ Follow Go Android documentation:
 * [gomobile documentation](https://godoc.org/golang.org/x/mobile/cmd/gomobile)
 * [gomobile documentation](https://godoc.org/golang.org/x/mobile/cmd/gomobile)
 * Requires Go 1.5 or later.
 * Requires Go 1.5 or later.
 * Build command: `gomobile bind -target=android github.com/Psiphon-Labs/psiphon-tunnel-core/AndroidLibrary/psi`
 * Build command: `gomobile bind -target=android github.com/Psiphon-Labs/psiphon-tunnel-core/AndroidLibrary/psi`
+  * Record build version info, as described [here](https://github.com/Psiphon-Labs/psiphon-tunnel-core/blob/master/README.md#setup), by passing a `-ldflags` argument to `gomobile bind`.
 * Output: `psi.aar`
 * Output: `psi.aar`
 
 
 Using
 Using

+ 1 - 0
ConsoleClient/.gitignore

@@ -0,0 +1 @@
+bin

+ 12 - 23
ConsoleClient/Dockerfile

@@ -2,36 +2,25 @@
 #
 #
 # See README.md for usage instructions.
 # See README.md for usage instructions.
 
 
-FROM ubuntu:12.04
+FROM ubuntu:15.04
 
 
-ENV GOVERSION=go1.4.1
+ENV GOVERSION=go1.5
 
 
 # Install system-level dependencies.
 # Install system-level dependencies.
 ENV DEBIAN_FRONTEND=noninteractive
 ENV DEBIAN_FRONTEND=noninteractive
-RUN apt-get update && \
-  apt-get -y install build-essential python-software-properties bzip2 unzip curl \
-    git subversion mercurial bzr \
-    upx gcc-mingw-w64-i686 gcc-mingw-w64-x86-64 gcc-multilib
+RUN apt-get update && apt-get -y install build-essential curl git mercurial upx gcc-mingw-w64-i686 gcc-mingw-w64-x86-64 gcc-multilib
 
 
 # Install Go.
 # Install Go.
-ENV GOROOT=/go \
-  GOPATH=/
-ENV PATH=$PATH:$GOROOT/bin
-RUN echo "INSTALLING GO" && \
-  curl -L https://github.com/golang/go/archive/$GOVERSION.zip -o /tmp/go.zip && \
-  unzip /tmp/go.zip && \
-  rm /tmp/go.zip && \
-  mv /go-$GOVERSION $GOROOT && \
-  echo $GOVERSION > $GOROOT/VERSION && \
-  cd $GOROOT/src && \
-  ./all.bash
+ENV GOROOT=/usr/local/go GOPATH=/go
+ENV PATH=$PATH:$GOROOT/bin:$GOPATH/bin
+
+RUN curl -L https://storage.googleapis.com/golang/$GOVERSION.linux-amd64.tar.gz -o /tmp/go.tar.gz && \
+  tar -C /usr/local -xzf /tmp/go.tar.gz && \
+  rm /tmp/go.tar.gz && \
+  echo $GOVERSION > $GOROOT/VERSION
 
 
 ENV CGO_ENABLED=1
 ENV CGO_ENABLED=1
-RUN go get github.com/mitchellh/gox && \
-  go get github.com/inconshreveable/gonative && \
-  mkdir -p /usr/local/gonative && \
-  cd /usr/local/gonative && \
-  gonative build
-ENV PATH=/usr/local/gonative/go/bin:$PATH
+
+RUN go get github.com/mitchellh/gox && go get github.com/pwaller/goupx
 
 
 WORKDIR $GOPATH/src
 WORKDIR $GOPATH/src

+ 37 - 16
ConsoleClient/README.md

@@ -1,24 +1,45 @@
-Psiphon Console Client README
-================================================================================
+##Psiphon Console Client README
 
 
-### Building with Docker
+###Building with Docker
 
 
 Note that you may need to use `sudo docker` below, depending on your OS.
 Note that you may need to use `sudo docker` below, depending on your OS.
 
 
-Create the build image:
+#####Create the build image:
+  1. Run the command: `docker build --no-cache=true -t psiclient .` (this may take some time to complete)
+  2. Once completed, verify that you see an image named `psiclient` when running: `docker images`
 
 
-```bash
-# While in the same directory as the Dockerfile...
-$ docker build --no-cache=true -t psigoconsole .
-# That will take a long time to complete.
-# After it's done, you'll have an image called "psigoconsole". Check with...
-$ docker images
-```
+#####Run the build:
+  *Ensure that the command below is run from within the `ConsoleClient` directory*
 
 
-To do the build:
+  ```bash
+  cd .. && \
+    docker run \
+    --rm \
+    -v $(pwd):/go/src/github.com/Psiphon-Labs/psiphon-tunnel-core \
+    psiclient \
+    /bin/bash -c 'cd /go/src/github.com/Psiphon-Labs/psiphon-tunnel-core/ConsoleClient && ./make.bash all' \
+  ; cd -
+  ```
+This command can also be modified by replacing `all` with `windows`, `linux`, or `osx` as the first parameter to `make.bash` (as in `...&& ./make.bash windows`) to only build binaries for the operating system of choice
 
 
-```bash
-$ docker run --rm -v $GOPATH/src:/src psigoconsole /bin/bash -c 'cd /src/github.com/Psiphon-Labs/psiphon-tunnel-core/ConsoleClient && ./make.bash'
-```
+When that command completes, the compiled binaries will be located in the `bin` directory (`./bin`, and everything under it will likely be owned by root, so be sure to `chown` to an appropriate user) under the current directory. The structure will be:
+  ```
+  bin
+  ├── darwin
+  │   └── psiphon-tunnel-core-x86_64
+  ├── linux
+  │   └── psiphon-tunnel-core-i686
+  │   └── psiphon-tunnel-core-x86_64
+  └── windows
+      └── psiphon-tunnel-core-i686.exe
+      └── psiphon-tunnel-core-x86_64.exe
 
 
-When that command completes, the compiled library will be located at `windows_386/psiphon-tunnel-core.exe`.
+  ```
+
+### Building without Docker
+
+See the [main README build section](../README.md#build)
+
+### Creating a configuration file
+
+See the [main README configuration section](../README.md#configure)

+ 84 - 31
ConsoleClient/make.bash

@@ -1,49 +1,102 @@
 #!/usr/bin/env bash
 #!/usr/bin/env bash
 
 
 set -e
 set -e
-set -exv # verbose output for testing
 
 
 if [ ! -f make.bash ]; then
 if [ ! -f make.bash ]; then
-  echo 'make.bash must be run from $GOPATH/src/github.com/Psiphon-Labs/psiphon-tunnel-core/ConsoleClient'
+  echo "make.bash must be run from $GOPATH/src/github.com/Psiphon-Labs/psiphon-tunnel-core/ConsoleClient"
   exit 1
   exit 1
 fi
 fi
 
 
-CGO_ENABLED=1
-
-# Make sure we have our dependencies
-echo -e "go-getting dependencies...\n"
-go get -d -v ./...
-
 EXE_BASENAME="psiphon-tunnel-core"
 EXE_BASENAME="psiphon-tunnel-core"
 BUILDINFOFILE="${EXE_BASENAME}_buildinfo.txt"
 BUILDINFOFILE="${EXE_BASENAME}_buildinfo.txt"
 BUILDDATE=$(date --iso-8601=seconds)
 BUILDDATE=$(date --iso-8601=seconds)
 BUILDREPO=$(git config --get remote.origin.url)
 BUILDREPO=$(git config --get remote.origin.url)
-BUILDREV=$(git rev-parse HEAD)
+BUILDREV=$(git rev-parse --short HEAD)
+
 LDFLAGS="\
 LDFLAGS="\
 -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildDate $BUILDDATE \
 -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildDate $BUILDDATE \
 -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildRepo $BUILDREPO \
 -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildRepo $BUILDREPO \
 -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildRev $BUILDREV \
 -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildRev $BUILDREV \
 "
 "
 echo -e "${BUILDDATE}\n${BUILDREPO}\n${BUILDREV}\n" > $BUILDINFOFILE
 echo -e "${BUILDDATE}\n${BUILDREPO}\n${BUILDREV}\n" > $BUILDINFOFILE
-echo -e "LDFLAGS=$LDFLAGS\n"
-
-echo -e "\nBuilding windows-386..."
-CC=/usr/bin/i686-w64-mingw32-gcc \
-  gox -verbose -ldflags "$LDFLAGS" -osarch windows/386 -output windows_386_${EXE_BASENAME}
-# We are finding that UPXing the full Windows Psiphon client produces better results
-# if psiphon-tunnel-core.exe is not already UPX'd.
-#upx --best windows_386_${EXE_BASENAME}.exe
-
-echo -e "\nBuilding windows-amd64..."
-CC=/usr/bin/x86_64-w64-mingw32-gcc \
-  gox -verbose -ldflags "$LDFLAGS" -osarch windows/amd64 -output windows_amd64_${EXE_BASENAME}
-upx --best windows_amd64_${EXE_BASENAME}.exe
-
-echo -e "\nBuilding linux-amd64..."
-gox -verbose -ldflags "$LDFLAGS" -osarch linux/amd64 -output linux_amd64_${EXE_BASENAME}
-upx --best linux_amd64_${EXE_BASENAME}
-
-echo -e "\nBuilding linux-386..."
-CFLAGS=-m32 \
-  gox -verbose -ldflags "$LDFLAGS" -osarch linux/386 -output linux_386_${EXE_BASENAME}
-upx --best linux_386_${EXE_BASENAME}
+
+echo "Variables for ldflags:"
+echo " Build date: ${BUILDDATE}"
+echo " Build repo: ${BUILDREPO}"
+echo " Build revision: ${BUILDREV}"
+echo ""
+
+if [ ! -d bin ]; then
+  mkdir bin
+fi
+
+build_for_windows () {
+  echo "...Getting project dependencies (via go get) for Windows"
+  GOOS=windows go get -d -v ./...
+
+  echo "...Building windows-i686"
+  CC=/usr/bin/i686-w64-mingw32-gcc gox -verbose -ldflags "$LDFLAGS" -osarch windows/386 -output bin/windows/${EXE_BASENAME}-i686
+  # We are finding that UPXing the full Windows Psiphon client produces better results if psiphon-tunnel-core.exe is not already UPX'd.
+  echo "....No UPX for this build"
+
+  echo "...Building windows-x86_64"
+  CC=/usr/bin/x86_64-w64-mingw32-gcc gox -verbose -ldflags "$LDFLAGS" -osarch windows/amd64 -output bin/windows/${EXE_BASENAME}-x86_64
+  # We are finding that UPXing the full Windows Psiphon client produces better results if psiphon-tunnel-core.exe is not already UPX'd.
+  echo "....No UPX for this build"
+}
+
+build_for_linux () {
+  echo "Getting project dependencies (via go get) for Linux"
+  GOOS=linux go get -d -v ./...
+
+  echo "...Building linux-i686"
+  CFLAGS=-m32 gox -verbose -ldflags "$LDFLAGS" -osarch linux/386 -output bin/linux/${EXE_BASENAME}-i686
+  echo "....UPX packaging output"
+  goupx --best bin/linux/${EXE_BASENAME}-i686
+
+  echo "...Building linux-x86_64"
+  gox -verbose -ldflags "$LDFLAGS" -osarch linux/amd64 -output bin/linux/${EXE_BASENAME}-x86_64
+  echo "....UPX packaging output"
+  goupx --best bin/linux/${EXE_BASENAME}-x86_64
+}
+
+build_for_osx () {
+  echo "Getting project dependencies (via go get) for OSX"
+  GOOS=darwin go get -d -v ./...
+
+  echo "Building darwin-x86_64..."
+  CGO_ENABLED=0 gox -verbose -ldflags "$LDFLAGS" -osarch darwin/amd64 -output bin/darwin/${EXE_BASENAME}-x86_64
+  # Darwin binaries don't seem to be UPXable when built this way
+  echo "..No UPX for this build"
+}
+
+TARGET=$1
+case $TARGET in
+  windows)
+    echo "..Building for Windows"
+    build_for_windows
+    ;;
+  linux)
+    echo "..Building for Linux"
+    build_for_linux
+    ;;
+  osx)
+    echo "..Building for OSX"
+    build_for_osx
+    ;;
+  all)
+    echo "..Building all"
+    build_for_windows
+    build_for_linux
+    build_for_osx
+    ;;
+  *)
+    echo "..No selection made, building all"
+    build_for_windows
+    build_for_linux
+    build_for_osx
+    ;;
+
+esac
+
+echo "Done"

+ 7 - 0
ConsoleClient/psiphonClient.go

@@ -47,6 +47,9 @@ func main() {
 	var profileFilename string
 	var profileFilename string
 	flag.StringVar(&profileFilename, "profile", "", "CPU profile output file")
 	flag.StringVar(&profileFilename, "profile", "", "CPU profile output file")
 
 
+	var interfaceName string
+	flag.StringVar(&interfaceName, "listenInterface", "", "Interface Name")
+
 	flag.Parse()
 	flag.Parse()
 
 
 	// Initialize default Notice output (stderr)
 	// Initialize default Notice output (stderr)
@@ -153,6 +156,10 @@ func main() {
 		}
 		}
 	}
 	}
 
 
+	if interfaceName != "" {
+		config.ListenInterface = interfaceName
+	}
+
 	// Run Psiphon
 	// Run Psiphon
 
 
 	controller, err := psiphon.NewController(config)
 	controller, err := psiphon.NewController(config)

+ 25 - 7
README.md

@@ -18,7 +18,9 @@ This project is currently at the proof-of-concept stage. Current production Psip
 Setup
 Setup
 --------------------------------------------------------------------------------
 --------------------------------------------------------------------------------
 
 
-* Go 1.4 (or higher) is required.
+#### Build
+
+* Go 1.5 (or higher) is required.
 * This project builds and runs on recent versions of Windows, Linux, and Mac OS X.
 * This project builds and runs on recent versions of Windows, Linux, and Mac OS X.
 * Note that the `psiphon` package is imported using the absolute path `github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon`; without further local configuration, `go` will use this version of the code and not the local copy in the repository.
 * Note that the `psiphon` package is imported using the absolute path `github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon`; without further local configuration, `go` will use this version of the code and not the local copy in the repository.
 * In this repository, run `go build` in `ConsoleClient` to make the `ConsoleClient` binary, a console Psiphon client application.
 * In this repository, run `go build` in `ConsoleClient` to make the `ConsoleClient` binary, a console Psiphon client application.
@@ -29,13 +31,16 @@ Setup
     BUILDREPO=$(git config --get remote.origin.url)
     BUILDREPO=$(git config --get remote.origin.url)
     BUILDREV=$(git rev-parse HEAD)
     BUILDREV=$(git rev-parse HEAD)
     LDFLAGS="\
     LDFLAGS="\
-    -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildDate $BUILDDATE \
-    -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildRepo $BUILDREPO \
-    -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildRev $BUILDREV \
+    -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildDate=$BUILDDATE \
+    -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildRepo=$BUILDREPO \
+    -X github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon.buildRev=$BUILDREV \
     "
     "
     ```
     ```
 
 
-* Run `./ConsoleClient --config psiphon.config` where the config file looks like this:
+#### Configure
+
+ * Configuration files are standard text files containing a valid JSON object. Example:
+
 
 
   <!--BEGIN-SAMPLE-CONFIG-->
   <!--BEGIN-SAMPLE-CONFIG-->
   ```
   ```
@@ -48,10 +53,23 @@ Setup
   ```
   ```
   <!--END-SAMPLE-CONFIG-->
   <!--END-SAMPLE-CONFIG-->
 
 
-* Config file parameters are [documented here](https://godoc.org/github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon#Config).
-* Replace each `<placeholder>` with a value from your Psiphon network. The Psiphon server-side stack is open source and can be found in our  [Psiphon 3 repository](https://bitbucket.org/psiphon/psiphon-circumvention-system). If you would like to use the Psiphon Inc. network, contact <developer-support@psiphon.ca>.
+*Note: The lines `<!--BEGIN-SAMPLE-CONFIG-->` and `<--END-SAMPLE-CONFIG-->` (visible in the raw Markdown) are used by the [config test](psiphon/config_test.go). Do not remove them.*
+
+* All config file parameters are [documented here](https://godoc.org/github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon#Config).
+* Replace each `<placeholder>` with a value from your Psiphon server. The Psiphon server-side stack is open source and can be found in our [Psiphon 3 repository](https://bitbucket.org/psiphon/psiphon-circumvention-system).
+
+
+#### Run
+
+* Run `./ConsoleClient --config psiphon.config` where `psiphon.config` is created as described in the [Configure](#configure) section above
+
+
+Other Platforms
+--------------------------------------------------------------------------------
+
 * The project builds and runs on Android. See the [AndroidLibrary README](AndroidLibrary/README.md) for more information about building the Go component, and the [AndroidApp README](AndroidApp/README.md) for a sample Android app that uses it.
 * The project builds and runs on Android. See the [AndroidLibrary README](AndroidLibrary/README.md) for more information about building the Go component, and the [AndroidApp README](AndroidApp/README.md) for a sample Android app that uses it.
 
 
+
 Licensing
 Licensing
 --------------------------------------------------------------------------------
 --------------------------------------------------------------------------------
 
 

+ 103 - 18
SampleApps/Psibot/app/src/main/java/ca/psiphon/PsiphonTunnel.java

@@ -27,25 +27,36 @@ import android.net.NetworkInfo;
 import android.net.VpnService;
 import android.net.VpnService;
 import android.os.Build;
 import android.os.Build;
 import android.os.ParcelFileDescriptor;
 import android.os.ParcelFileDescriptor;
+import android.util.Base64;
 
 
 import org.apache.http.conn.util.InetAddressUtils;
 import org.apache.http.conn.util.InetAddressUtils;
 import org.json.JSONArray;
 import org.json.JSONArray;
 import org.json.JSONException;
 import org.json.JSONException;
 import org.json.JSONObject;
 import org.json.JSONObject;
 
 
+import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.IOException;
+import java.io.PrintStream;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Method;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.NetworkInterface;
 import java.net.NetworkInterface;
 import java.net.SocketException;
 import java.net.SocketException;
+import java.security.KeyStore;
+import java.security.KeyStoreException;
+import java.security.NoSuchAlgorithmException;
+import java.security.cert.CertificateException;
+import java.security.cert.X509Certificate;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
+import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Locale;
 import java.util.Locale;
 import java.util.Map;
 import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 
 import go.psi.Psi;
 import go.psi.Psi;
 
 
@@ -72,6 +83,7 @@ public class PsiphonTunnel extends Psi.PsiphonProvider.Stub {
         public void onSplitTunnelRegion(String region);
         public void onSplitTunnelRegion(String region);
         public void onUntunneledAddress(String address);
         public void onUntunneledAddress(String address);
         public void onBytesTransferred(long sent, long received);
         public void onBytesTransferred(long sent, long received);
+        public void onStartedWaitingForNetworkConnectivity();
     }
     }
 
 
     private final HostService mHostService;
     private final HostService mHostService;
@@ -80,6 +92,7 @@ public class PsiphonTunnel extends Psi.PsiphonProvider.Stub {
     private int mLocalSocksProxyPort;
     private int mLocalSocksProxyPort;
     private boolean mRoutingThroughTunnel;
     private boolean mRoutingThroughTunnel;
     private Thread mTun2SocksThread;
     private Thread mTun2SocksThread;
+    private AtomicBoolean mIsWaitingForNetworkConnectivity;
 
 
     // Only one PsiphonVpn instance may exist at a time, as the underlying
     // Only one PsiphonVpn instance may exist at a time, as the underlying
     // go.psi.Psi and tun2socks implementations each contain global state.
     // go.psi.Psi and tun2socks implementations each contain global state.
@@ -99,6 +112,7 @@ public class PsiphonTunnel extends Psi.PsiphonProvider.Stub {
         mHostService = hostService;
         mHostService = hostService;
         mLocalSocksProxyPort = 0;
         mLocalSocksProxyPort = 0;
         mRoutingThroughTunnel = false;
         mRoutingThroughTunnel = false;
+        mIsWaitingForNetworkConnectivity = new AtomicBoolean(false);
     }
     }
 
 
     public Object clone() throws CloneNotSupportedException {
     public Object clone() throws CloneNotSupportedException {
@@ -243,8 +257,16 @@ public class PsiphonTunnel extends Psi.PsiphonProvider.Stub {
 
 
     @Override
     @Override
     public long HasNetworkConnectivity() {
     public long HasNetworkConnectivity() {
+        boolean hasConnectivity = hasNetworkConnectivity(mHostService.getContext());
+        boolean wasWaitingForNetworkConnectivity = mIsWaitingForNetworkConnectivity.getAndSet(!hasConnectivity);
+        if (!hasConnectivity && !wasWaitingForNetworkConnectivity) {
+            // HasNetworkConnectivity may be called many times, but only call
+            // onStartedWaitingForNetworkConnectivity once per loss of connectivity,
+            // so the HostService may log a single message.
+            mHostService.onStartedWaitingForNetworkConnectivity();
+        }
         // TODO: change to bool return value once gobind supports that type
         // TODO: change to bool return value once gobind supports that type
-        return hasNetworkConnectivity(mHostService.getContext()) ? 1 : 0;
+        return hasConnectivity ? 1 : 0;
     }
     }
 
 
     @Override
     @Override
@@ -307,16 +329,6 @@ public class PsiphonTunnel extends Psi.PsiphonProvider.Stub {
         // This parameter is for stats reporting
         // This parameter is for stats reporting
         json.put("TunnelWholeDevice", isVpnMode ? 1 : 0);
         json.put("TunnelWholeDevice", isVpnMode ? 1 : 0);
 
 
-        // Enable tunnel auto-reconnect after a threshold number of port
-        // forward failures. By default, this mechanism is disabled in
-        // tunnel-core due to the chance of false positives due to
-        // bad user input. Since VpnService mode resolves domain names
-        // differently (udpgw), invalid domain name user input won't result
-        // in SSH port forward failures.
-        if (isVpnMode) {
-            json.put("PortForwardFailureThreshold", 10);
-        }
-
         json.put("EmitBytesTransferred", true);
         json.put("EmitBytesTransferred", true);
 
 
         if (mLocalSocksProxyPort != 0) {
         if (mLocalSocksProxyPort != 0) {
@@ -326,15 +338,18 @@ public class PsiphonTunnel extends Psi.PsiphonProvider.Stub {
             // has no effect with restartPsiphon(), a full stop() is necessary.
             // has no effect with restartPsiphon(), a full stop() is necessary.
             json.put("LocalSocksProxyPort", mLocalSocksProxyPort);
             json.put("LocalSocksProxyPort", mLocalSocksProxyPort);
         }
         }
-        
+
         json.put("UseIndistinguishableTLS", true);
         json.put("UseIndistinguishableTLS", true);
 
 
-        // TODO: doesn't work due to OpenSSL version incompatibility; try using
-        // the KeyStore API to build a local copy of trusted CAs cert files.
-        //
-        //if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.ICE_CREAM_SANDWICH) {
-        //    json.put("SystemCACertificateDirectory", "/system/etc/security/cacerts");
-        //}
+        try {
+            // Also enable indistinguishable TLS for HTTPS requests that
+            // require system CAs.
+            json.put(
+                "TrustedCACertificatesFilename",
+                setupTrustedCertificates(mHostService.getContext()));
+        } catch (Exception e) {
+            mHostService.onDiagnosticMessage(e.getMessage());
+        }
 
 
         return json.toString();
         return json.toString();
     }
     }
@@ -399,6 +414,7 @@ public class PsiphonTunnel extends Psi.PsiphonProvider.Stub {
                 mHostService.onUntunneledAddress(notice.getJSONObject("data").getString("address"));
                 mHostService.onUntunneledAddress(notice.getJSONObject("data").getString("address"));
 
 
             } else if (noticeType.equals("BytesTransferred")) {
             } else if (noticeType.equals("BytesTransferred")) {
+                diagnostic = false;
                 JSONObject data = notice.getJSONObject("data");
                 JSONObject data = notice.getJSONObject("data");
                 mHostService.onBytesTransferred(data.getLong("sent"), data.getLong("received"));
                 mHostService.onBytesTransferred(data.getLong("sent"), data.getLong("received"));
             }
             }
@@ -413,6 +429,75 @@ public class PsiphonTunnel extends Psi.PsiphonProvider.Stub {
         }
         }
     }
     }
 
 
+    private String setupTrustedCertificates(Context context) throws Exception {
+
+        // Copy the Android system CA store to a local, private cert bundle file.
+        //
+        // This results in a file that can be passed to SSL_CTX_load_verify_locations
+        // for use with OpenSSL modes in tunnel-core.
+        // https://www.openssl.org/docs/manmaster/ssl/SSL_CTX_load_verify_locations.html
+        //
+        // TODO: to use the path mode of load_verify_locations would require emulating
+        // the filename scheme used by c_rehash:
+        // https://www.openssl.org/docs/manmaster/apps/c_rehash.html
+        // http://stackoverflow.com/questions/19237167/the-new-subject-hash-openssl-algorithm-differs
+
+        File directory = context.getDir("PsiphonCAStore", Context.MODE_PRIVATE);
+
+        final String errorMessage = "copy AndroidCAStore failed";
+        try {
+
+            File file = new File(directory, "certs.dat");
+
+            // Pave a fresh copy on every run, which ensures we're not using old certs.
+            // Note: assumes KeyStore doesn't return revoked certs.
+            //
+            // TODO: this takes under 1 second, but should we avoid repaving every time?
+            file.delete();
+
+            PrintStream output = null;
+            try {
+                output = new PrintStream(new FileOutputStream(file));
+
+                KeyStore keyStore = KeyStore.getInstance("AndroidCAStore");
+                keyStore.load(null, null);
+
+                Enumeration<String> aliases = keyStore.aliases();
+                while (aliases.hasMoreElements()) {
+                    String alias = aliases.nextElement();
+                    X509Certificate cert = (X509Certificate) keyStore.getCertificate(alias);
+
+                    output.println("-----BEGIN CERTIFICATE-----");
+                    String pemCert = new String(Base64.encode(cert.getEncoded(), Base64.NO_WRAP), "UTF-8");
+                    // OpenSSL appears to reject the default linebreaking done by Base64.encode,
+                    // so we manually linebreak every 64 characters
+                    for (int i = 0; i < pemCert.length() ; i+= 64) {
+                        output.println(pemCert.substring(i, Math.min(i + 64, pemCert.length())));
+                    }
+                    output.println("-----END CERTIFICATE-----");
+                }
+
+                mHostService.onDiagnosticMessage("prepared PsiphonCAStore");
+
+                return file.getAbsolutePath();
+
+            } finally {
+                if (output != null) {
+                    output.close();
+                }
+            }
+
+        } catch (KeyStoreException e) {
+            throw new Exception(errorMessage, e);
+        } catch (NoSuchAlgorithmException e) {
+            throw new Exception(errorMessage, e);
+        } catch (CertificateException e) {
+            throw new Exception(errorMessage, e);
+        } catch (IOException e) {
+            throw new Exception(errorMessage, e);
+        }
+    }
+
     //----------------------------------------------------------------------------------------------
     //----------------------------------------------------------------------------------------------
     // Tun2Socks
     // Tun2Socks
     //----------------------------------------------------------------------------------------------
     //----------------------------------------------------------------------------------------------

+ 5 - 0
SampleApps/Psibot/app/src/main/java/ca/psiphon/psibot/Service.java

@@ -228,6 +228,11 @@ public class Service extends VpnService
     public void onBytesTransferred(long sent, long received) {
     public void onBytesTransferred(long sent, long received) {
     }
     }
 
 
+    @Override
+    public void onStartedWaitingForNetworkConnectivity() {
+        Log.addEntry("waiting for network connectivity...");
+    }
+
     @Override
     @Override
     public void onClientRegion(String region) {
     public void onClientRegion(String region) {
         Log.addEntry("client region: " + region);
         Log.addEntry("client region: " + region);

+ 1 - 1
psiphon/LookupIP.go

@@ -1,4 +1,4 @@
-// +build android linux
+// +build android linux darwin
 
 
 /*
 /*
  * Copyright (c) 2015, Psiphon Inc.
  * Copyright (c) 2015, Psiphon Inc.

+ 1 - 1
psiphon/LookupIP_nobind.go

@@ -1,4 +1,4 @@
-// +build !android,!linux
+// +build !android,!linux,!darwin
 
 
 /*
 /*
  * Copyright (c) 2014, Psiphon Inc.
  * Copyright (c) 2014, Psiphon Inc.

+ 55 - 43
psiphon/config.go

@@ -29,37 +29,46 @@ import (
 // TODO: allow all params to be configured
 // TODO: allow all params to be configured
 
 
 const (
 const (
-	DATA_STORE_FILENAME                          = "psiphon.db"
-	CONNECTION_WORKER_POOL_SIZE                  = 10
-	TUNNEL_POOL_SIZE                             = 1
-	TUNNEL_CONNECT_TIMEOUT                       = 15 * time.Second
-	TUNNEL_OPERATE_SHUTDOWN_TIMEOUT              = 500 * time.Millisecond
-	TUNNEL_PORT_FORWARD_DIAL_TIMEOUT             = 10 * time.Second
-	TUNNEL_SSH_KEEP_ALIVE_PAYLOAD_MAX_BYTES      = 256
-	TUNNEL_SSH_KEEP_ALIVE_PERIOD_MIN             = 60 * time.Second
-	TUNNEL_SSH_KEEP_ALIVE_PERIOD_MAX             = 120 * time.Second
-	TUNNEL_SSH_KEEP_ALIVE_TIMEOUT                = 10 * time.Second
-	ESTABLISH_TUNNEL_TIMEOUT_SECONDS             = 300
-	ESTABLISH_TUNNEL_WORK_TIME_SECONDS           = 60 * time.Second
-	ESTABLISH_TUNNEL_PAUSE_PERIOD                = 5 * time.Second
-	PORT_FORWARD_FAILURE_THRESHOLD               = 0
-	HTTP_PROXY_ORIGIN_SERVER_TIMEOUT             = 15 * time.Second
-	HTTP_PROXY_MAX_IDLE_CONNECTIONS_PER_HOST     = 50
-	FETCH_REMOTE_SERVER_LIST_TIMEOUT             = 10 * time.Second
-	FETCH_REMOTE_SERVER_LIST_RETRY_PERIOD        = 5 * time.Second
-	FETCH_REMOTE_SERVER_LIST_STALE_PERIOD        = 6 * time.Hour
-	PSIPHON_API_CLIENT_SESSION_ID_LENGTH         = 16
-	PSIPHON_API_SERVER_TIMEOUT                   = 20 * time.Second
-	PSIPHON_API_STATUS_REQUEST_PERIOD_MIN        = 5 * time.Minute
-	PSIPHON_API_STATUS_REQUEST_PERIOD_MAX        = 10 * time.Minute
-	PSIPHON_API_STATUS_REQUEST_PADDING_MAX_BYTES = 256
-	PSIPHON_API_CONNECTED_REQUEST_PERIOD         = 24 * time.Hour
-	PSIPHON_API_CONNECTED_REQUEST_RETRY_PERIOD   = 5 * time.Second
-	FETCH_ROUTES_TIMEOUT                         = 1 * time.Minute
-	DOWNLOAD_UPGRADE_TIMEOUT                     = 15 * time.Minute
-	DOWNLOAD_UPGRADE_RETRY_PAUSE_PERIOD          = 5 * time.Second
-	IMPAIRED_PROTOCOL_CLASSIFICATION_DURATION    = 2 * time.Minute
-	IMPAIRED_PROTOCOL_CLASSIFICATION_THRESHOLD   = 3
+	LEGACY_DATA_STORE_FILENAME                     = "psiphon.db"
+	DATA_STORE_FILENAME                            = "psiphon.boltdb"
+	CONNECTION_WORKER_POOL_SIZE                    = 10
+	TUNNEL_POOL_SIZE                               = 1
+	TUNNEL_CONNECT_TIMEOUT                         = 20 * time.Second
+	TUNNEL_OPERATE_SHUTDOWN_TIMEOUT                = 1 * time.Second
+	TUNNEL_PORT_FORWARD_DIAL_TIMEOUT               = 10 * time.Second
+	TUNNEL_SSH_KEEP_ALIVE_PAYLOAD_MAX_BYTES        = 256
+	TUNNEL_SSH_KEEP_ALIVE_PERIOD_MIN               = 60 * time.Second
+	TUNNEL_SSH_KEEP_ALIVE_PERIOD_MAX               = 120 * time.Second
+	TUNNEL_SSH_KEEP_ALIVE_PERIODIC_TIMEOUT         = 30 * time.Second
+	TUNNEL_SSH_KEEP_ALIVE_PERIODIC_INACTIVE_PERIOD = 10 * time.Second
+	TUNNEL_SSH_KEEP_ALIVE_PROBE_TIMEOUT            = 5 * time.Second
+	TUNNEL_SSH_KEEP_ALIVE_PROBE_INACTIVE_PERIOD    = 10 * time.Second
+	ESTABLISH_TUNNEL_TIMEOUT_SECONDS               = 300
+	ESTABLISH_TUNNEL_WORK_TIME                     = 60 * time.Second
+	ESTABLISH_TUNNEL_PAUSE_PERIOD                  = 5 * time.Second
+	ESTABLISH_TUNNEL_SERVER_AFFINITY_GRACE_PERIOD  = 1 * time.Second
+	HTTP_PROXY_ORIGIN_SERVER_TIMEOUT               = 15 * time.Second
+	HTTP_PROXY_MAX_IDLE_CONNECTIONS_PER_HOST       = 50
+	FETCH_REMOTE_SERVER_LIST_TIMEOUT               = 30 * time.Second
+	FETCH_REMOTE_SERVER_LIST_RETRY_PERIOD          = 5 * time.Second
+	FETCH_REMOTE_SERVER_LIST_STALE_PERIOD          = 6 * time.Hour
+	PSIPHON_API_CLIENT_SESSION_ID_LENGTH           = 16
+	PSIPHON_API_SERVER_TIMEOUT                     = 20 * time.Second
+	PSIPHON_API_SHUTDOWN_SERVER_TIMEOUT            = 1 * time.Second
+	PSIPHON_API_STATUS_REQUEST_PERIOD_MIN          = 5 * time.Minute
+	PSIPHON_API_STATUS_REQUEST_PERIOD_MAX          = 10 * time.Minute
+	PSIPHON_API_STATUS_REQUEST_SHORT_PERIOD_MIN    = 5 * time.Second
+	PSIPHON_API_STATUS_REQUEST_SHORT_PERIOD_MAX    = 10 * time.Second
+	PSIPHON_API_STATUS_REQUEST_PADDING_MAX_BYTES   = 256
+	PSIPHON_API_CONNECTED_REQUEST_PERIOD           = 24 * time.Hour
+	PSIPHON_API_CONNECTED_REQUEST_RETRY_PERIOD     = 5 * time.Second
+	PSIPHON_API_TUNNEL_STATS_MAX_COUNT             = 1000
+	FETCH_ROUTES_TIMEOUT                           = 1 * time.Minute
+	DOWNLOAD_UPGRADE_TIMEOUT                       = 15 * time.Minute
+	DOWNLOAD_UPGRADE_RETRY_PAUSE_PERIOD            = 5 * time.Second
+	IMPAIRED_PROTOCOL_CLASSIFICATION_DURATION      = 2 * time.Minute
+	IMPAIRED_PROTOCOL_CLASSIFICATION_THRESHOLD     = 3
+	TOTAL_BYTES_TRANSFERRED_NOTICE_PERIOD          = 5 * time.Minute
 )
 )
 
 
 // To distinguish omitted timeout params from explicit 0 value timeout
 // To distinguish omitted timeout params from explicit 0 value timeout
@@ -115,6 +124,9 @@ type Config struct {
 	// automatic updates.
 	// automatic updates.
 	// This value is supplied by and depends on the Psiphon Network, and is
 	// This value is supplied by and depends on the Psiphon Network, and is
 	// typically embedded in the client binary.
 	// typically embedded in the client binary.
+	// Note that sending a ClientPlatform string which includes "windows"
+	// (case insensitive) and a ClientVersion of <= 44 will cause an
+	// error in processing the response to DoConnectedRequest calls.
 	ClientVersion string
 	ClientVersion string
 
 
 	// ClientPlatform is the client platform ("Windows", "Android", etc.) that
 	// ClientPlatform is the client platform ("Windows", "Android", etc.) that
@@ -141,6 +153,13 @@ type Config struct {
 	// the controller will keep trying indefinitely.
 	// the controller will keep trying indefinitely.
 	EstablishTunnelTimeoutSeconds *int
 	EstablishTunnelTimeoutSeconds *int
 
 
+	// ListenInterface specifies which interface to listen on.  If no interface
+	// is provided then listen on 127.0.0.1.
+	// If an invalid interface is provided then listen on localhost (127.0.0.1).
+	// If 'any' is provided then use 0.0.0.0.
+	// If there are multiple IP addresses on an interface use the first IPv4 address.
+	ListenInterface string
+
 	// LocalSocksProxyPort specifies a port number for the local SOCKS proxy
 	// LocalSocksProxyPort specifies a port number for the local SOCKS proxy
 	// running at 127.0.0.1. For the default value, 0, the system selects a free
 	// running at 127.0.0.1. For the default value, 0, the system selects a free
 	// port (a notice reporting the selected port is emitted).
 	// port (a notice reporting the selected port is emitted).
@@ -161,14 +180,6 @@ type Config struct {
 	// which is recommended.
 	// which is recommended.
 	TunnelPoolSize int
 	TunnelPoolSize int
 
 
-	// PortForwardFailureThreshold specifies a threshold number of port forward
-	// failures (failure to connect, or I/O failure) after which the tunnel is
-	// considered to be degraded and a re-establish is launched. This facility
-	// can suffer from false positives, especially when the host client is running
-	// in configuration where domain name resolution is done as part of the port
-	// forward (as opposed to tunneling UDP, for example). The default is 0, off.
-	PortForwardFailureThreshold int
-
 	// UpstreamProxyUrl is a URL specifying an upstream proxy to use for all
 	// UpstreamProxyUrl is a URL specifying an upstream proxy to use for all
 	// outbound connections. The URL should include proxy type and authentication
 	// outbound connections. The URL should include proxy type and authentication
 	// information, as required. See example URLs here:
 	// information, as required. See example URLs here:
@@ -249,6 +260,11 @@ type Config struct {
 	// When specified, this enables use of indistinguishable TLS for HTTPS requests
 	// When specified, this enables use of indistinguishable TLS for HTTPS requests
 	// that require typical (system CA) server authentication.
 	// that require typical (system CA) server authentication.
 	TrustedCACertificatesFilename string
 	TrustedCACertificatesFilename string
+
+	// DisablePeriodicSshKeepAlive indicates whether to send an SSH keepalive every
+	// 1-2 minutes, when the tunnel is idle. If the SSH keepalive times out, the tunnel
+	// is considered to have failed.
+	DisablePeriodicSshKeepAlive bool
 }
 }
 
 
 // LoadConfig parses and validates a JSON format Psiphon config JSON
 // LoadConfig parses and validates a JSON format Psiphon config JSON
@@ -301,10 +317,6 @@ func LoadConfig(configJson []byte) (*Config, error) {
 		config.TunnelPoolSize = TUNNEL_POOL_SIZE
 		config.TunnelPoolSize = TUNNEL_POOL_SIZE
 	}
 	}
 
 
-	if config.PortForwardFailureThreshold == 0 {
-		config.PortForwardFailureThreshold = PORT_FORWARD_FAILURE_THRESHOLD
-	}
-
 	if config.NetworkConnectivityChecker != nil {
 	if config.NetworkConnectivityChecker != nil {
 		return nil, ContextError(errors.New("NetworkConnectivityChecker interface must be set at runtime"))
 		return nil, ContextError(errors.New("NetworkConnectivityChecker interface must be set at runtime"))
 	}
 	}

+ 181 - 61
psiphon/controller.go

@@ -51,13 +51,20 @@ type Controller struct {
 	isEstablishing                 bool
 	isEstablishing                 bool
 	establishWaitGroup             *sync.WaitGroup
 	establishWaitGroup             *sync.WaitGroup
 	stopEstablishingBroadcast      chan struct{}
 	stopEstablishingBroadcast      chan struct{}
-	candidateServerEntries         chan *ServerEntry
+	candidateServerEntries         chan *candidateServerEntry
 	establishPendingConns          *Conns
 	establishPendingConns          *Conns
 	untunneledPendingConns         *Conns
 	untunneledPendingConns         *Conns
 	untunneledDialConfig           *DialConfig
 	untunneledDialConfig           *DialConfig
 	splitTunnelClassifier          *SplitTunnelClassifier
 	splitTunnelClassifier          *SplitTunnelClassifier
 	signalFetchRemoteServerList    chan struct{}
 	signalFetchRemoteServerList    chan struct{}
 	impairedProtocolClassification map[string]int
 	impairedProtocolClassification map[string]int
+	signalReportConnected          chan struct{}
+	serverAffinityDoneBroadcast    chan struct{}
+}
+
+type candidateServerEntry struct {
+	serverEntry               *ServerEntry
+	isServerAffinityCandidate bool
 }
 }
 
 
 // NewController initializes a new controller.
 // NewController initializes a new controller.
@@ -97,20 +104,22 @@ func NewController(config *Config) (controller *Controller, err error) {
 		runWaitGroup:           new(sync.WaitGroup),
 		runWaitGroup:           new(sync.WaitGroup),
 		// establishedTunnels and failedTunnels buffer sizes are large enough to
 		// establishedTunnels and failedTunnels buffer sizes are large enough to
 		// receive full pools of tunnels without blocking. Senders should not block.
 		// receive full pools of tunnels without blocking. Senders should not block.
-		establishedTunnels:       make(chan *Tunnel, config.TunnelPoolSize),
-		failedTunnels:            make(chan *Tunnel, config.TunnelPoolSize),
-		tunnels:                  make([]*Tunnel, 0),
-		establishedOnce:          false,
-		startedConnectedReporter: false,
-		startedUpgradeDownloader: false,
-		isEstablishing:           false,
-		establishPendingConns:    new(Conns),
-		untunneledPendingConns:   untunneledPendingConns,
-		untunneledDialConfig:     untunneledDialConfig,
-		// A buffer allows at least one signal to be sent even when the receiver is
-		// not listening. Senders should not block.
-		signalFetchRemoteServerList:    make(chan struct{}, 1),
+		establishedTunnels:             make(chan *Tunnel, config.TunnelPoolSize),
+		failedTunnels:                  make(chan *Tunnel, config.TunnelPoolSize),
+		tunnels:                        make([]*Tunnel, 0),
+		establishedOnce:                false,
+		startedConnectedReporter:       false,
+		startedUpgradeDownloader:       false,
+		isEstablishing:                 false,
+		establishPendingConns:          new(Conns),
+		untunneledPendingConns:         untunneledPendingConns,
+		untunneledDialConfig:           untunneledDialConfig,
 		impairedProtocolClassification: make(map[string]int),
 		impairedProtocolClassification: make(map[string]int),
+		// TODO: Add a buffer of 1 so we don't miss a signal while receiver is
+		// starting? Trade-off is potential back-to-back fetch remotes. As-is,
+		// establish will eventually signal another fetch remote.
+		signalFetchRemoteServerList: make(chan struct{}),
+		signalReportConnected:       make(chan struct{}),
 	}
 	}
 
 
 	controller.splitTunnelClassifier = NewSplitTunnelClassifier(config, controller)
 	controller.splitTunnelClassifier = NewSplitTunnelClassifier(config, controller)
@@ -133,7 +142,13 @@ func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
 
 
 	// Start components
 	// Start components
 
 
-	socksProxy, err := NewSocksProxy(controller.config, controller)
+	listenIP, err := GetInterfaceIPAddress(controller.config.ListenInterface)
+	if err != nil {
+		NoticeError("error getting listener IP: %s", err)
+		return
+	}
+
+	socksProxy, err := NewSocksProxy(controller.config, controller, listenIP)
 	if err != nil {
 	if err != nil {
 		NoticeAlert("error initializing local SOCKS proxy: %s", err)
 		NoticeAlert("error initializing local SOCKS proxy: %s", err)
 		return
 		return
@@ -141,7 +156,7 @@ func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
 	defer socksProxy.Close()
 	defer socksProxy.Close()
 
 
 	httpProxy, err := NewHttpProxy(
 	httpProxy, err := NewHttpProxy(
-		controller.config, controller.untunneledDialConfig, controller)
+		controller.config, controller.untunneledDialConfig, controller, listenIP)
 	if err != nil {
 	if err != nil {
 		NoticeAlert("error initializing local HTTP proxy: %s", err)
 		NoticeAlert("error initializing local HTTP proxy: %s", err)
 		return
 		return
@@ -175,9 +190,19 @@ func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
 
 
 	close(controller.shutdownBroadcast)
 	close(controller.shutdownBroadcast)
 	controller.establishPendingConns.CloseAll()
 	controller.establishPendingConns.CloseAll()
-	controller.untunneledPendingConns.CloseAll()
 	controller.runWaitGroup.Wait()
 	controller.runWaitGroup.Wait()
 
 
+	// Stops untunneled connections, including fetch remote server list,
+	// split tunnel port forwards and also untunneled final stats requests.
+	// Note: there's a circular dependency with runWaitGroup.Wait() and
+	// untunneledPendingConns.CloseAll(): runWaitGroup depends on tunnels
+	// stopping which depends, in orderly shutdown, on final status requests
+	// completing. So this pending conns cancel comes too late to interrupt
+	// final status requests in the orderly shutdown case -- which is desired
+	// since we give those a short timeout and would prefer to not interrupt
+	// them.
+	controller.untunneledPendingConns.CloseAll()
+
 	controller.splitTunnelClassifier.Shutdown()
 	controller.splitTunnelClassifier.Shutdown()
 
 
 	NoticeInfo("exiting controller")
 	NoticeInfo("exiting controller")
@@ -274,7 +299,9 @@ func (controller *Controller) establishTunnelWatcher() {
 // comment in DoConnectedRequest for a description of the request mechanism.
 // comment in DoConnectedRequest for a description of the request mechanism.
 // To ensure we don't over- or under-count unique users, only one connected
 // To ensure we don't over- or under-count unique users, only one connected
 // request is made across all simultaneous multi-tunnels; and the connected
 // request is made across all simultaneous multi-tunnels; and the connected
-// request is repeated periodically.
+// request is repeated periodically for very long-lived tunnels.
+// The signalReportConnected mechanism is used to trigger another connected
+// request immediately after a reconnect.
 func (controller *Controller) connectedReporter() {
 func (controller *Controller) connectedReporter() {
 	defer controller.runWaitGroup.Done()
 	defer controller.runWaitGroup.Done()
 loop:
 loop:
@@ -285,7 +312,7 @@ loop:
 		reported := false
 		reported := false
 		tunnel := controller.getNextActiveTunnel()
 		tunnel := controller.getNextActiveTunnel()
 		if tunnel != nil {
 		if tunnel != nil {
-			err := tunnel.session.DoConnectedRequest()
+			err := tunnel.serverContext.DoConnectedRequest()
 			if err == nil {
 			if err == nil {
 				reported = true
 				reported = true
 			} else {
 			} else {
@@ -302,8 +329,10 @@ loop:
 		}
 		}
 		timeout := time.After(duration)
 		timeout := time.After(duration)
 		select {
 		select {
+		case <-controller.signalReportConnected:
 		case <-timeout:
 		case <-timeout:
 			// Make another connected request
 			// Make another connected request
+
 		case <-controller.shutdownBroadcast:
 		case <-controller.shutdownBroadcast:
 			break loop
 			break loop
 		}
 		}
@@ -312,7 +341,8 @@ loop:
 	NoticeInfo("exiting connected reporter")
 	NoticeInfo("exiting connected reporter")
 }
 }
 
 
-func (controller *Controller) startConnectedReporter() {
+func (controller *Controller) startOrSignalConnectedReporter() {
+	// session is nil when DisableApi is set
 	if controller.config.DisableApi {
 	if controller.config.DisableApi {
 		return
 		return
 	}
 	}
@@ -323,6 +353,11 @@ func (controller *Controller) startConnectedReporter() {
 		controller.startedConnectedReporter = true
 		controller.startedConnectedReporter = true
 		controller.runWaitGroup.Add(1)
 		controller.runWaitGroup.Add(1)
 		go controller.connectedReporter()
 		go controller.connectedReporter()
+	} else {
+		select {
+		case controller.signalReportConnected <- *new(struct{}):
+		default:
+		}
 	}
 	}
 }
 }
 
 
@@ -361,7 +396,10 @@ loop:
 	NoticeInfo("exiting upgrade downloader")
 	NoticeInfo("exiting upgrade downloader")
 }
 }
 
 
-func (controller *Controller) startClientUpgradeDownloader(clientUpgradeVersion string) {
+func (controller *Controller) startClientUpgradeDownloader(
+	serverContext *ServerContext) {
+
+	// serverContext is nil when DisableApi is set
 	if controller.config.DisableApi {
 	if controller.config.DisableApi {
 		return
 		return
 	}
 	}
@@ -372,7 +410,7 @@ func (controller *Controller) startClientUpgradeDownloader(clientUpgradeVersion
 		return
 		return
 	}
 	}
 
 
-	if clientUpgradeVersion == "" {
+	if serverContext.clientUpgradeVersion == "" {
 		// No upgrade is offered
 		// No upgrade is offered
 		return
 		return
 	}
 	}
@@ -382,7 +420,7 @@ func (controller *Controller) startClientUpgradeDownloader(clientUpgradeVersion
 	if !controller.startedUpgradeDownloader {
 	if !controller.startedUpgradeDownloader {
 		controller.startedUpgradeDownloader = true
 		controller.startedUpgradeDownloader = true
 		controller.runWaitGroup.Add(1)
 		controller.runWaitGroup.Add(1)
-		go controller.upgradeDownloader(clientUpgradeVersion)
+		go controller.upgradeDownloader(serverContext.clientUpgradeVersion)
 	}
 	}
 }
 }
 
 
@@ -419,7 +457,7 @@ loop:
 			// establishPendingConns; this causes the pendingConns.Add() within
 			// establishPendingConns; this causes the pendingConns.Add() within
 			// interruptibleTCPDial to succeed instead of aborting, and the result
 			// interruptibleTCPDial to succeed instead of aborting, and the result
 			// is that it's possible for establish goroutines to run all the way through
 			// is that it's possible for establish goroutines to run all the way through
-			// NewSession before being discarded... delaying shutdown.
+			// NewServerContext before being discarded... delaying shutdown.
 			select {
 			select {
 			case <-controller.shutdownBroadcast:
 			case <-controller.shutdownBroadcast:
 				break loop
 				break loop
@@ -437,16 +475,40 @@ loop:
 		// !TODO! design issue: might not be enough server entries with region/caps to ever fill tunnel slots
 		// !TODO! design issue: might not be enough server entries with region/caps to ever fill tunnel slots
 		// solution(?) target MIN(CountServerEntries(region, protocol), TunnelPoolSize)
 		// solution(?) target MIN(CountServerEntries(region, protocol), TunnelPoolSize)
 		case establishedTunnel := <-controller.establishedTunnels:
 		case establishedTunnel := <-controller.establishedTunnels:
-			if controller.registerTunnel(establishedTunnel) {
-				NoticeActiveTunnel(establishedTunnel.serverEntry.IpAddress)
+			tunnelCount, registered := controller.registerTunnel(establishedTunnel)
+			if registered {
+				NoticeActiveTunnel(establishedTunnel.serverEntry.IpAddress, establishedTunnel.protocol)
+
+				if tunnelCount == 1 {
+
+					// The split tunnel classifier is started once the first tunnel is
+					// established. This first tunnel is passed in to be used to make
+					// the routes data request.
+					// A long-running controller may run while the host device is present
+					// in different regions. In this case, we want the split tunnel logic
+					// to switch to routes for new regions and not classify traffic based
+					// on routes installed for older regions.
+					// We assume that when regions change, the host network will also
+					// change, and so all tunnels will fail and be re-established. Under
+					// that assumption, the classifier will be re-Start()-ed here when
+					// the region has changed.
+					controller.splitTunnelClassifier.Start(establishedTunnel)
+
+					// Signal a connected request on each 1st tunnel establishment. For
+					// multi-tunnels, the session is connected as long as at least one
+					// tunnel is established.
+					controller.startOrSignalConnectedReporter()
+
+					controller.startClientUpgradeDownloader(
+						establishedTunnel.serverContext)
+				}
+
 			} else {
 			} else {
 				controller.discardTunnel(establishedTunnel)
 				controller.discardTunnel(establishedTunnel)
 			}
 			}
 			if controller.isFullyEstablished() {
 			if controller.isFullyEstablished() {
 				controller.stopEstablishing()
 				controller.stopEstablishing()
 			}
 			}
-			controller.startConnectedReporter()
-			controller.startClientUpgradeDownloader(establishedTunnel.session.clientUpgradeVersion)
 
 
 		case <-controller.shutdownBroadcast:
 		case <-controller.shutdownBroadcast:
 			break loop
 			break loop
@@ -473,7 +535,7 @@ loop:
 
 
 // classifyImpairedProtocol tracks "impaired" protocol classifications for failed
 // classifyImpairedProtocol tracks "impaired" protocol classifications for failed
 // tunnels. A protocol is classified as impaired if a tunnel using that protocol
 // tunnels. A protocol is classified as impaired if a tunnel using that protocol
-// fails, repeatedly, shortly after the start of the session. During tunnel
+// fails, repeatedly, shortly after the start of the connection. During tunnel
 // establishment, impaired protocols are briefly skipped.
 // establishment, impaired protocols are briefly skipped.
 //
 //
 // One purpose of this measure is to defend against an attack where the adversary,
 // One purpose of this measure is to defend against an attack where the adversary,
@@ -484,7 +546,7 @@ loop:
 //
 //
 // Concurrency note: only the runTunnels() goroutine may call classifyImpairedProtocol
 // Concurrency note: only the runTunnels() goroutine may call classifyImpairedProtocol
 func (controller *Controller) classifyImpairedProtocol(failedTunnel *Tunnel) {
 func (controller *Controller) classifyImpairedProtocol(failedTunnel *Tunnel) {
-	if failedTunnel.sessionStartTime.Add(IMPAIRED_PROTOCOL_CLASSIFICATION_DURATION).After(time.Now()) {
+	if failedTunnel.startTime.Add(IMPAIRED_PROTOCOL_CLASSIFICATION_DURATION).After(time.Now()) {
 		controller.impairedProtocolClassification[failedTunnel.protocol] += 1
 		controller.impairedProtocolClassification[failedTunnel.protocol] += 1
 	} else {
 	} else {
 		controller.impairedProtocolClassification[failedTunnel.protocol] = 0
 		controller.impairedProtocolClassification[failedTunnel.protocol] = 0
@@ -538,46 +600,39 @@ func (controller *Controller) discardTunnel(tunnel *Tunnel) {
 	// discarded tunnel before fully active tunnels. Can a discarded tunnel
 	// discarded tunnel before fully active tunnels. Can a discarded tunnel
 	// be promoted (since it connects), but with lower rank than all active
 	// be promoted (since it connects), but with lower rank than all active
 	// tunnels?
 	// tunnels?
-	tunnel.Close()
+	tunnel.Close(true)
 }
 }
 
 
 // registerTunnel adds the connected tunnel to the pool of active tunnels
 // registerTunnel adds the connected tunnel to the pool of active tunnels
 // which are candidates for port forwarding. Returns true if the pool has an
 // which are candidates for port forwarding. Returns true if the pool has an
 // empty slot and false if the pool is full (caller should discard the tunnel).
 // empty slot and false if the pool is full (caller should discard the tunnel).
-func (controller *Controller) registerTunnel(tunnel *Tunnel) bool {
+func (controller *Controller) registerTunnel(tunnel *Tunnel) (int, bool) {
 	controller.tunnelMutex.Lock()
 	controller.tunnelMutex.Lock()
 	defer controller.tunnelMutex.Unlock()
 	defer controller.tunnelMutex.Unlock()
 	if len(controller.tunnels) >= controller.config.TunnelPoolSize {
 	if len(controller.tunnels) >= controller.config.TunnelPoolSize {
-		return false
+		return len(controller.tunnels), false
 	}
 	}
 	// Perform a final check just in case we've established
 	// Perform a final check just in case we've established
 	// a duplicate connection.
 	// a duplicate connection.
 	for _, activeTunnel := range controller.tunnels {
 	for _, activeTunnel := range controller.tunnels {
 		if activeTunnel.serverEntry.IpAddress == tunnel.serverEntry.IpAddress {
 		if activeTunnel.serverEntry.IpAddress == tunnel.serverEntry.IpAddress {
 			NoticeAlert("duplicate tunnel: %s", tunnel.serverEntry.IpAddress)
 			NoticeAlert("duplicate tunnel: %s", tunnel.serverEntry.IpAddress)
-			return false
+			return len(controller.tunnels), false
 		}
 		}
 	}
 	}
 	controller.establishedOnce = true
 	controller.establishedOnce = true
 	controller.tunnels = append(controller.tunnels, tunnel)
 	controller.tunnels = append(controller.tunnels, tunnel)
 	NoticeTunnels(len(controller.tunnels))
 	NoticeTunnels(len(controller.tunnels))
 
 
-	// The split tunnel classifier is started once the first tunnel is
-	// established. This first tunnel is passed in to be used to make
-	// the routes data request.
-	// A long-running controller may run while the host device is present
-	// in different regions. In this case, we want the split tunnel logic
-	// to switch to routes for new regions and not classify traffic based
-	// on routes installed for older regions.
-	// We assume that when regions change, the host network will also
-	// change, and so all tunnels will fail and be re-established. Under
-	// that assumption, the classifier will be re-Start()-ed here when
-	// the region has changed.
-	if len(controller.tunnels) == 1 {
-		controller.splitTunnelClassifier.Start(tunnel)
-	}
-
-	return true
+	// Promote this successful tunnel to first rank so it's one
+	// of the first candidates next time establish runs.
+	// Connecting to a TargetServerEntry does not change the
+	// ranking.
+	if controller.config.TargetServerEntry == "" {
+		PromoteServerEntry(tunnel.serverEntry.IpAddress)
+	}
+
+	return len(controller.tunnels), true
 }
 }
 
 
 // hasEstablishedOnce indicates if at least one active tunnel has
 // hasEstablishedOnce indicates if at least one active tunnel has
@@ -612,7 +667,7 @@ func (controller *Controller) terminateTunnel(tunnel *Tunnel) {
 			if controller.nextTunnel >= len(controller.tunnels) {
 			if controller.nextTunnel >= len(controller.tunnels) {
 				controller.nextTunnel = 0
 				controller.nextTunnel = 0
 			}
 			}
-			activeTunnel.Close()
+			activeTunnel.Close(false)
 			NoticeTunnels(len(controller.tunnels))
 			NoticeTunnels(len(controller.tunnels))
 			break
 			break
 		}
 		}
@@ -633,7 +688,7 @@ func (controller *Controller) terminateAllTunnels() {
 		tunnel := activeTunnel
 		tunnel := activeTunnel
 		go func() {
 		go func() {
 			defer closeWaitGroup.Done()
 			defer closeWaitGroup.Done()
-			tunnel.Close()
+			tunnel.Close(false)
 		}()
 		}()
 	}
 	}
 	closeWaitGroup.Wait()
 	closeWaitGroup.Wait()
@@ -720,9 +775,36 @@ func (controller *Controller) startEstablishing() {
 	controller.isEstablishing = true
 	controller.isEstablishing = true
 	controller.establishWaitGroup = new(sync.WaitGroup)
 	controller.establishWaitGroup = new(sync.WaitGroup)
 	controller.stopEstablishingBroadcast = make(chan struct{})
 	controller.stopEstablishingBroadcast = make(chan struct{})
-	controller.candidateServerEntries = make(chan *ServerEntry)
+	controller.candidateServerEntries = make(chan *candidateServerEntry)
 	controller.establishPendingConns.Reset()
 	controller.establishPendingConns.Reset()
 
 
+	// The server affinity mechanism attempts to favor the previously
+	// used server when reconnecting. This is beneficial for user
+	// applications which expect consistency in user IP address (for
+	// example, a web site which prompts for additional user
+	// authentication when the IP address changes).
+	//
+	// Only the very first server, as determined by
+	// datastore.PromoteServerEntry(), is the server affinity candidate.
+	// Concurrent connections attempts to many servers are launched
+	// without delay, in case the affinity server connection fails.
+	// While the affinity server connection is outstanding, when any
+	// other connection is established, there is a short grace period
+	// delay before delivering the established tunnel; this allows some
+	// time for the affinity server connection to succeed first.
+	// When the affinity server connection fails, any other established
+	// tunnel is registered without delay.
+	//
+	// Note: the establishTunnelWorker that receives the affinity
+	// candidate is solely resonsible for closing
+	// controller.serverAffinityDoneBroadcast.
+	//
+	// Note: if config.EgressRegion or config.TunnelProtocol has changed
+	// since the top server was promoted, the first server may not actually
+	// be the last connected server.
+	// TODO: should not favor the first server in this case
+	controller.serverAffinityDoneBroadcast = make(chan struct{})
+
 	for i := 0; i < controller.config.ConnectionWorkerPoolSize; i++ {
 	for i := 0; i < controller.config.ConnectionWorkerPoolSize; i++ {
 		controller.establishWaitGroup.Add(1)
 		controller.establishWaitGroup.Add(1)
 		go controller.establishTunnelWorker()
 		go controller.establishTunnelWorker()
@@ -753,6 +835,7 @@ func (controller *Controller) stopEstablishing() {
 	controller.establishWaitGroup = nil
 	controller.establishWaitGroup = nil
 	controller.stopEstablishingBroadcast = nil
 	controller.stopEstablishingBroadcast = nil
 	controller.candidateServerEntries = nil
 	controller.candidateServerEntries = nil
+	controller.serverAffinityDoneBroadcast = nil
 }
 }
 
 
 // establishCandidateGenerator populates the candidate queue with server entries
 // establishCandidateGenerator populates the candidate queue with server entries
@@ -770,6 +853,14 @@ func (controller *Controller) establishCandidateGenerator(impairedProtocols []st
 	}
 	}
 	defer iterator.Close()
 	defer iterator.Close()
 
 
+	isServerAffinityCandidate := true
+
+	// TODO: reconcile server affinity scheme with multi-tunnel mode
+	if controller.config.TunnelPoolSize > 1 {
+		isServerAffinityCandidate = false
+		close(controller.serverAffinityDoneBroadcast)
+	}
+
 loop:
 loop:
 	// Repeat until stopped
 	// Repeat until stopped
 	for i := 0; ; i++ {
 	for i := 0; ; i++ {
@@ -796,10 +887,10 @@ loop:
 			}
 			}
 
 
 			// Disable impaired protocols. This is only done for the
 			// Disable impaired protocols. This is only done for the
-			// first iteration of the ESTABLISH_TUNNEL_WORK_TIME_SECONDS
+			// first iteration of the ESTABLISH_TUNNEL_WORK_TIME
 			// loop since (a) one iteration should be sufficient to
 			// loop since (a) one iteration should be sufficient to
 			// evade the attack; (b) there's a good chance of false
 			// evade the attack; (b) there's a good chance of false
-			// positives (such as short session durations due to network
+			// positives (such as short tunnel durations due to network
 			// hopping on a mobile device).
 			// hopping on a mobile device).
 			// Impaired protocols logic is not applied when
 			// Impaired protocols logic is not applied when
 			// config.TunnelProtocol is specified.
 			// config.TunnelProtocol is specified.
@@ -815,18 +906,23 @@ loop:
 				}
 				}
 			}
 			}
 
 
+			// Note: there must be only one server affinity candidate, as it
+			// closes the serverAffinityDoneBroadcast channel.
+			candidate := &candidateServerEntry{serverEntry, isServerAffinityCandidate}
+			isServerAffinityCandidate = false
+
 			// TODO: here we could generate multiple candidates from the
 			// TODO: here we could generate multiple candidates from the
 			// server entry when there are many MeekFrontingAddresses.
 			// server entry when there are many MeekFrontingAddresses.
 
 
 			select {
 			select {
-			case controller.candidateServerEntries <- serverEntry:
+			case controller.candidateServerEntries <- candidate:
 			case <-controller.stopEstablishingBroadcast:
 			case <-controller.stopEstablishingBroadcast:
 				break loop
 				break loop
 			case <-controller.shutdownBroadcast:
 			case <-controller.shutdownBroadcast:
 				break loop
 				break loop
 			}
 			}
 
 
-			if time.Now().After(startTime.Add(ESTABLISH_TUNNEL_WORK_TIME_SECONDS)) {
+			if time.Now().After(startTime.Add(ESTABLISH_TUNNEL_WORK_TIME)) {
 				// Start over, after a brief pause, with a new shuffle of the server
 				// Start over, after a brief pause, with a new shuffle of the server
 				// entries, and potentially some newly fetched server entries.
 				// entries, and potentially some newly fetched server entries.
 				break
 				break
@@ -873,7 +969,7 @@ loop:
 func (controller *Controller) establishTunnelWorker() {
 func (controller *Controller) establishTunnelWorker() {
 	defer controller.establishWaitGroup.Done()
 	defer controller.establishWaitGroup.Done()
 loop:
 loop:
-	for serverEntry := range controller.candidateServerEntries {
+	for candidateServerEntry := range controller.candidateServerEntries {
 		// Note: don't receive from candidateServerEntries and stopEstablishingBroadcast
 		// Note: don't receive from candidateServerEntries and stopEstablishingBroadcast
 		// in the same select, since we want to prioritize receiving the stop signal
 		// in the same select, since we want to prioritize receiving the stop signal
 		if controller.isStopEstablishingBroadcast() {
 		if controller.isStopEstablishingBroadcast() {
@@ -881,26 +977,44 @@ loop:
 		}
 		}
 
 
 		// There may already be a tunnel to this candidate. If so, skip it.
 		// There may already be a tunnel to this candidate. If so, skip it.
-		if controller.isActiveTunnelServerEntry(serverEntry) {
+		if controller.isActiveTunnelServerEntry(candidateServerEntry.serverEntry) {
 			continue
 			continue
 		}
 		}
 
 
 		tunnel, err := EstablishTunnel(
 		tunnel, err := EstablishTunnel(
 			controller.config,
 			controller.config,
+			controller.untunneledDialConfig,
 			controller.sessionId,
 			controller.sessionId,
 			controller.establishPendingConns,
 			controller.establishPendingConns,
-			serverEntry,
+			candidateServerEntry.serverEntry,
 			controller) // TunnelOwner
 			controller) // TunnelOwner
 		if err != nil {
 		if err != nil {
+
+			// Unblock other candidates immediately when
+			// server affinity candidate fails.
+			if candidateServerEntry.isServerAffinityCandidate {
+				close(controller.serverAffinityDoneBroadcast)
+			}
+
 			// Before emitting error, check if establish interrupted, in which
 			// Before emitting error, check if establish interrupted, in which
 			// case the error is noise.
 			// case the error is noise.
 			if controller.isStopEstablishingBroadcast() {
 			if controller.isStopEstablishingBroadcast() {
 				break loop
 				break loop
 			}
 			}
-			NoticeInfo("failed to connect to %s: %s", serverEntry.IpAddress, err)
+			NoticeInfo("failed to connect to %s: %s", candidateServerEntry.serverEntry.IpAddress, err)
 			continue
 			continue
 		}
 		}
 
 
+		// Block for server affinity grace period before delivering.
+		if !candidateServerEntry.isServerAffinityCandidate {
+			timer := time.NewTimer(ESTABLISH_TUNNEL_SERVER_AFFINITY_GRACE_PERIOD)
+			select {
+			case <-timer.C:
+			case <-controller.serverAffinityDoneBroadcast:
+			case <-controller.stopEstablishingBroadcast:
+			}
+		}
+
 		// Deliver established tunnel.
 		// Deliver established tunnel.
 		// Don't block. Assumes the receiver has a buffer large enough for
 		// Don't block. Assumes the receiver has a buffer large enough for
 		// the number of desired tunnels. If there's no room, the tunnel must
 		// the number of desired tunnels. If there's no room, the tunnel must
@@ -910,6 +1024,12 @@ loop:
 		default:
 		default:
 			controller.discardTunnel(tunnel)
 			controller.discardTunnel(tunnel)
 		}
 		}
+
+		// Unblock other candidates only after delivering when
+		// server affinity candidate succeeds.
+		if candidateServerEntry.isServerAffinityCandidate {
+			close(controller.serverAffinityDoneBroadcast)
+		}
 	}
 	}
 	NoticeInfo("stopped establish worker")
 	NoticeInfo("stopped establish worker")
 }
 }

+ 621 - 328
psiphon/dataStore.go

@@ -1,5 +1,3 @@
-// +build windows
-
 /*
 /*
  * Copyright (c) 2015, Psiphon Inc.
  * Copyright (c) 2015, Psiphon Inc.
  * All rights reserved.
  * All rights reserved.
@@ -22,7 +20,7 @@
 package psiphon
 package psiphon
 
 
 import (
 import (
-	"database/sql"
+	"bytes"
 	"encoding/json"
 	"encoding/json"
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
@@ -32,14 +30,34 @@ import (
 	"sync"
 	"sync"
 	"time"
 	"time"
 
 
-	sqlite3 "github.com/Psiphon-Inc/go-sqlite3"
+	"github.com/Psiphon-Inc/bolt"
 )
 )
 
 
+// The BoltDB dataStore implementation is an alternative to the sqlite3-based
+// implementation in dataStore.go. Both implementations have the same interface.
+//
+// BoltDB is pure Go, and is intended to be used in cases where we have trouble
+// building sqlite3/CGO (e.g., currently go mobile due to
+// https://github.com/mattn/go-sqlite3/issues/201), and perhaps ultimately as
+// the primary dataStore implementation.
+//
 type dataStore struct {
 type dataStore struct {
 	init sync.Once
 	init sync.Once
-	db   *sql.DB
+	db   *bolt.DB
 }
 }
 
 
+const (
+	serverEntriesBucket         = "serverEntries"
+	rankedServerEntriesBucket   = "rankedServerEntries"
+	rankedServerEntriesKey      = "rankedServerEntries"
+	splitTunnelRouteETagsBucket = "splitTunnelRouteETags"
+	splitTunnelRouteDataBucket  = "splitTunnelRouteData"
+	urlETagsBucket              = "urlETags"
+	keyValueBucket              = "keyValues"
+	tunnelStatsBucket           = "tunnelStats"
+	rankedServerEntryCount      = 100
+)
+
 var singleton dataStore
 var singleton dataStore
 
 
 // InitDataStore initializes the singleton instance of dataStore. This
 // InitDataStore initializes the singleton instance of dataStore. This
@@ -52,58 +70,65 @@ var singleton dataStore
 // have been replaced by checkInitDataStore() to assert that Init was called.
 // have been replaced by checkInitDataStore() to assert that Init was called.
 func InitDataStore(config *Config) (err error) {
 func InitDataStore(config *Config) (err error) {
 	singleton.init.Do(func() {
 	singleton.init.Do(func() {
+		// Need to gather the list of migratable server entries before
+		// initializing the boltdb store (as prepareMigrationEntries
+		// checks for the existence of the bolt db file)
+		migratableServerEntries := prepareMigrationEntries(config)
+
 		filename := filepath.Join(config.DataStoreDirectory, DATA_STORE_FILENAME)
 		filename := filepath.Join(config.DataStoreDirectory, DATA_STORE_FILENAME)
-		var db *sql.DB
-		db, err = sql.Open(
-			"sqlite3",
-			fmt.Sprintf("file:%s?cache=private&mode=rwc", filename))
+		var db *bolt.DB
+		db, err = bolt.Open(filename, 0600, &bolt.Options{Timeout: 1 * time.Second})
 		if err != nil {
 		if err != nil {
 			// Note: intending to set the err return value for InitDataStore
 			// Note: intending to set the err return value for InitDataStore
 			err = fmt.Errorf("initDataStore failed to open database: %s", err)
 			err = fmt.Errorf("initDataStore failed to open database: %s", err)
 			return
 			return
 		}
 		}
-		initialization := "pragma journal_mode=WAL;\n"
-		if config.DataStoreTempDirectory != "" {
-			// On some platforms (e.g., Android), the standard temporary directories expected
-			// by sqlite (see unixGetTempname in aggregate sqlite3.c) may not be present.
-			// In that case, sqlite tries to use the current working directory; but this may
-			// be "/" (again, on Android) which is not writable.
-			// Instead of setting the process current working directory from this library,
-			// use the deprecated temp_store_directory pragma to force use of a specified
-			// temporary directory: https://www.sqlite.org/pragma.html#pragma_temp_store_directory.
-			// TODO: is there another way to restrict writing of temporary files? E.g. temp_store=3?
-			initialization += fmt.Sprintf(
-				"pragma temp_store_directory=\"%s\";\n", config.DataStoreTempDirectory)
-		}
-		initialization += `
-        create table if not exists serverEntry
-            (id text not null primary key,
-             rank integer not null unique,
-             region text not null,
-             data blob not null);
-        create index if not exists idx_serverEntry_region on serverEntry(region);
-        create table if not exists serverEntryProtocol
-            (serverEntryId text not null,
-             protocol text not null,
-             primary key (serverEntryId, protocol));
-        create table if not exists splitTunnelRoutes
-            (region text not null primary key,
-             etag text not null,
-             data blob not null);
-        create table if not exists urlETags
-            (url text not null primary key,
-             etag text not null);
-        create table if not exists keyValue
-            (key text not null primary key,
-             value text not null);
-        `
-		_, err = db.Exec(initialization)
+
+		err = db.Update(func(tx *bolt.Tx) error {
+			requiredBuckets := []string{
+				serverEntriesBucket,
+				rankedServerEntriesBucket,
+				splitTunnelRouteETagsBucket,
+				splitTunnelRouteDataBucket,
+				urlETagsBucket,
+				keyValueBucket,
+				tunnelStatsBucket,
+			}
+			for _, bucket := range requiredBuckets {
+				_, err := tx.CreateBucketIfNotExists([]byte(bucket))
+				if err != nil {
+					return err
+				}
+			}
+			return nil
+		})
 		if err != nil {
 		if err != nil {
-			err = fmt.Errorf("initDataStore failed to initialize: %s", err)
+			err = fmt.Errorf("initDataStore failed to create buckets: %s", err)
 			return
 			return
 		}
 		}
+
+		// Run consistency checks on datastore and emit errors for diagnostics purposes
+		// We assume this will complete quickly for typical size Psiphon datastores.
+		db.View(func(tx *bolt.Tx) error {
+			err := <-tx.Check()
+			if err != nil {
+				NoticeAlert("boltdb Check(): %s", err)
+			}
+			return nil
+		})
+
 		singleton.db = db
 		singleton.db = db
+
+		// The migrateServerEntries function requires the data store is
+		// initialized prior to execution so that migrated entries can be stored
+
+		if len(migratableServerEntries) > 0 {
+			migrateEntries(migratableServerEntries, filepath.Join(config.DataStoreDirectory, LEGACY_DATA_STORE_FILENAME))
+		}
+
+		resetAllTunnelStatsToUnreported()
 	})
 	})
+
 	return err
 	return err
 }
 }
 
 
@@ -113,71 +138,17 @@ func checkInitDataStore() {
 	}
 	}
 }
 }
 
 
-func canRetry(err error) bool {
-	sqlError, ok := err.(sqlite3.Error)
-	return ok && (sqlError.Code == sqlite3.ErrBusy ||
-		sqlError.Code == sqlite3.ErrLocked ||
-		sqlError.ExtendedCode == sqlite3.ErrLockedSharedCache ||
-		sqlError.ExtendedCode == sqlite3.ErrBusySnapshot)
-}
-
-// transactionWithRetry will retry a write transaction if sqlite3
-// reports a table is locked by another writer.
-func transactionWithRetry(updater func(*sql.Tx) error) error {
-	checkInitDataStore()
-	for i := 0; i < 10; i++ {
-		if i > 0 {
-			// Delay on retry
-			time.Sleep(100)
-		}
-		transaction, err := singleton.db.Begin()
-		if err != nil {
-			return ContextError(err)
-		}
-		err = updater(transaction)
-		if err != nil {
-			transaction.Rollback()
-			if canRetry(err) {
-				continue
-			}
-			return ContextError(err)
-		}
-		err = transaction.Commit()
-		if err != nil {
-			transaction.Rollback()
-			if canRetry(err) {
-				continue
-			}
-			return ContextError(err)
-		}
-		return nil
-	}
-	return ContextError(errors.New("retries exhausted"))
-}
-
-// serverEntryExists returns true if a serverEntry with the
-// given ipAddress id already exists.
-func serverEntryExists(transaction *sql.Tx, ipAddress string) (bool, error) {
-	query := "select count(*) from serverEntry where id  = ?;"
-	var count int
-	err := singleton.db.QueryRow(query, ipAddress).Scan(&count)
-	if err != nil {
-		return false, ContextError(err)
-	}
-	return count > 0, nil
-}
-
 // StoreServerEntry adds the server entry to the data store.
 // StoreServerEntry adds the server entry to the data store.
 // A newly stored (or re-stored) server entry is assigned the next-to-top
 // A newly stored (or re-stored) server entry is assigned the next-to-top
 // rank for iteration order (the previous top ranked entry is promoted). The
 // rank for iteration order (the previous top ranked entry is promoted). The
 // purpose of inserting at next-to-top is to keep the last selected server
 // purpose of inserting at next-to-top is to keep the last selected server
-// as the top ranked server. Note, server candidates are iterated in decending
-// rank order, so the largest rank is top rank.
+// as the top ranked server.
 // When replaceIfExists is true, an existing server entry record is
 // When replaceIfExists is true, an existing server entry record is
 // overwritten; otherwise, the existing record is unchanged.
 // overwritten; otherwise, the existing record is unchanged.
 // If the server entry data is malformed, an alert notice is issued and
 // If the server entry data is malformed, an alert notice is issued and
 // the entry is skipped; no error is returned.
 // the entry is skipped; no error is returned.
 func StoreServerEntry(serverEntry *ServerEntry, replaceIfExists bool) error {
 func StoreServerEntry(serverEntry *ServerEntry, replaceIfExists bool) error {
+	checkInitDataStore()
 
 
 	// Server entries should already be validated before this point,
 	// Server entries should already be validated before this point,
 	// so instead of skipping we fail with an error.
 	// so instead of skipping we fail with an error.
@@ -186,63 +157,62 @@ func StoreServerEntry(serverEntry *ServerEntry, replaceIfExists bool) error {
 		return ContextError(errors.New("invalid server entry"))
 		return ContextError(errors.New("invalid server entry"))
 	}
 	}
 
 
-	return transactionWithRetry(func(transaction *sql.Tx) error {
-		serverEntryExists, err := serverEntryExists(transaction, serverEntry.IpAddress)
-		if err != nil {
-			return ContextError(err)
+	// BoltDB implementation note:
+	// For simplicity, we don't maintain indexes on server entry
+	// region or supported protocols. Instead, we perform full-bucket
+	// scans with a filter. With a small enough database (thousands or
+	// even tens of thousand of server entries) and common enough
+	// values (e.g., many servers support all protocols), performance
+	// is expected to be acceptable.
+
+	serverEntryExists := false
+	err = singleton.db.Update(func(tx *bolt.Tx) error {
+
+		serverEntries := tx.Bucket([]byte(serverEntriesBucket))
+
+		// Check not only that the entry exists, but is valid. This
+		// will replace in the rare case where the data is corrupt.
+		existingServerEntryValid := false
+		existingData := serverEntries.Get([]byte(serverEntry.IpAddress))
+		if existingData != nil {
+			existingServerEntry := new(ServerEntry)
+			if json.Unmarshal(existingData, existingServerEntry) == nil {
+				existingServerEntryValid = true
+			}
 		}
 		}
-		if serverEntryExists && !replaceIfExists {
+
+		if existingServerEntryValid && !replaceIfExists {
 			// Disabling this notice, for now, as it generates too much noise
 			// Disabling this notice, for now, as it generates too much noise
 			// in diagnostics with clients that always submit embedded servers
 			// in diagnostics with clients that always submit embedded servers
 			// to the core on each run.
 			// to the core on each run.
 			// NoticeInfo("ignored update for server %s", serverEntry.IpAddress)
 			// NoticeInfo("ignored update for server %s", serverEntry.IpAddress)
 			return nil
 			return nil
 		}
 		}
-		_, err = transaction.Exec(`
-            update serverEntry set rank = rank + 1
-                where id = (select id from serverEntry order by rank desc limit 1);
-            `)
-		if err != nil {
-			// Note: ContextError() would break canRetry()
-			return err
-		}
+
 		data, err := json.Marshal(serverEntry)
 		data, err := json.Marshal(serverEntry)
 		if err != nil {
 		if err != nil {
 			return ContextError(err)
 			return ContextError(err)
 		}
 		}
-		_, err = transaction.Exec(`
-            insert or replace into serverEntry (id, rank, region, data)
-            values (?, (select coalesce(max(rank)-1, 0) from serverEntry), ?, ?);
-            `, serverEntry.IpAddress, serverEntry.Region, data)
+		err = serverEntries.Put([]byte(serverEntry.IpAddress), data)
 		if err != nil {
 		if err != nil {
-			return err
+			return ContextError(err)
 		}
 		}
-		_, err = transaction.Exec(`
-            delete from serverEntryProtocol where serverEntryId = ?;
-            `, serverEntry.IpAddress)
+
+		err = insertRankedServerEntry(tx, serverEntry.IpAddress, 1)
 		if err != nil {
 		if err != nil {
-			return err
-		}
-		for _, protocol := range SupportedTunnelProtocols {
-			// Note: for meek, the capabilities are FRONTED-MEEK and UNFRONTED-MEEK
-			// and the additonal OSSH service is assumed to be available internally.
-			requiredCapability := strings.TrimSuffix(protocol, "-OSSH")
-			if Contains(serverEntry.Capabilities, requiredCapability) {
-				_, err = transaction.Exec(`
-                    insert into serverEntryProtocol (serverEntryId, protocol)
-                    values (?, ?);
-                    `, serverEntry.IpAddress, protocol)
-				if err != nil {
-					return err
-				}
-			}
-		}
-		// TODO: post notice after commit
-		if !serverEntryExists {
-			NoticeInfo("updated server %s", serverEntry.IpAddress)
+			return ContextError(err)
 		}
 		}
+
 		return nil
 		return nil
 	})
 	})
+	if err != nil {
+		return ContextError(err)
+	}
+
+	if !serverEntryExists {
+		NoticeInfo("updated server %s", serverEntry.IpAddress)
+	}
+	return nil
 }
 }
 
 
 // StoreServerEntries shuffles and stores a list of server entries.
 // StoreServerEntries shuffles and stores a list of server entries.
@@ -250,6 +220,7 @@ func StoreServerEntry(serverEntry *ServerEntry, replaceIfExists bool) error {
 // load balancing.
 // load balancing.
 // There is an independent transaction for each entry insert/update.
 // There is an independent transaction for each entry insert/update.
 func StoreServerEntries(serverEntries []*ServerEntry, replaceIfExists bool) error {
 func StoreServerEntries(serverEntries []*ServerEntry, replaceIfExists bool) error {
+	checkInitDataStore()
 
 
 	for index := len(serverEntries) - 1; index > 0; index-- {
 	for index := len(serverEntries) - 1; index > 0; index-- {
 		swapIndex := rand.Intn(index + 1)
 		swapIndex := rand.Intn(index + 1)
@@ -275,18 +246,110 @@ func StoreServerEntries(serverEntries []*ServerEntry, replaceIfExists bool) erro
 // iterated in decending rank order, so this server entry will be
 // iterated in decending rank order, so this server entry will be
 // the first candidate in a subsequent tunnel establishment.
 // the first candidate in a subsequent tunnel establishment.
 func PromoteServerEntry(ipAddress string) error {
 func PromoteServerEntry(ipAddress string) error {
-	return transactionWithRetry(func(transaction *sql.Tx) error {
-		_, err := transaction.Exec(`
-            update serverEntry
-            set rank = (select MAX(rank)+1 from serverEntry)
-            where id = ?;
-            `, ipAddress)
-		if err != nil {
-			// Note: ContextError() would break canRetry()
-			return err
+	checkInitDataStore()
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+
+		// Ensure the corresponding entry exists before
+		// inserting into rank.
+		bucket := tx.Bucket([]byte(serverEntriesBucket))
+		data := bucket.Get([]byte(ipAddress))
+		if data == nil {
+			NoticeAlert(
+				"PromoteServerEntry: ignoring unknown server entry: %s",
+				ipAddress)
+			return nil
 		}
 		}
-		return nil
+
+		return insertRankedServerEntry(tx, ipAddress, 0)
 	})
 	})
+
+	if err != nil {
+		return ContextError(err)
+	}
+	return nil
+}
+
+func getRankedServerEntries(tx *bolt.Tx) ([]string, error) {
+	bucket := tx.Bucket([]byte(rankedServerEntriesBucket))
+	data := bucket.Get([]byte(rankedServerEntriesKey))
+
+	if data == nil {
+		return []string{}, nil
+	}
+
+	rankedServerEntries := make([]string, 0)
+	err := json.Unmarshal(data, &rankedServerEntries)
+	if err != nil {
+		return nil, ContextError(err)
+	}
+	return rankedServerEntries, nil
+}
+
+func setRankedServerEntries(tx *bolt.Tx, rankedServerEntries []string) error {
+	data, err := json.Marshal(rankedServerEntries)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	bucket := tx.Bucket([]byte(rankedServerEntriesBucket))
+	err = bucket.Put([]byte(rankedServerEntriesKey), data)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	return nil
+}
+
+func insertRankedServerEntry(tx *bolt.Tx, serverEntryId string, position int) error {
+	rankedServerEntries, err := getRankedServerEntries(tx)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	// BoltDB implementation note:
+	// For simplicity, we store the ranked server ids in an array serialized to
+	// a single key value. To ensure this value doesn't grow without bound,
+	// it's capped at rankedServerEntryCount. For now, this cap should be large
+	// enough to meet the shuffleHeadLength = config.TunnelPoolSize criteria, for
+	// any reasonable configuration of config.TunnelPoolSize.
+
+	// Using: https://github.com/golang/go/wiki/SliceTricks
+
+	// When serverEntryId is already ranked, remove it first to avoid duplicates
+
+	for i, rankedServerEntryId := range rankedServerEntries {
+		if rankedServerEntryId == serverEntryId {
+			rankedServerEntries = append(
+				rankedServerEntries[:i], rankedServerEntries[i+1:]...)
+			break
+		}
+	}
+
+	// SliceTricks insert, with length cap enforced
+
+	if len(rankedServerEntries) < rankedServerEntryCount {
+		rankedServerEntries = append(rankedServerEntries, "")
+	}
+	if position >= len(rankedServerEntries) {
+		position = len(rankedServerEntries) - 1
+	}
+	copy(rankedServerEntries[position+1:], rankedServerEntries[position:])
+	rankedServerEntries[position] = serverEntryId
+
+	err = setRankedServerEntries(tx, rankedServerEntries)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	return nil
+}
+
+func serverEntrySupportsProtocol(serverEntry *ServerEntry, protocol string) bool {
+	// Note: for meek, the capabilities are FRONTED-MEEK and UNFRONTED-MEEK
+	// and the additonal OSSH service is assumed to be available internally.
+	requiredCapability := strings.TrimSuffix(protocol, "-OSSH")
+	return Contains(serverEntry.Capabilities, requiredCapability)
 }
 }
 
 
 // ServerEntryIterator is used to iterate over
 // ServerEntryIterator is used to iterate over
@@ -295,14 +358,14 @@ type ServerEntryIterator struct {
 	region                      string
 	region                      string
 	protocol                    string
 	protocol                    string
 	shuffleHeadLength           int
 	shuffleHeadLength           int
-	transaction                 *sql.Tx
-	cursor                      *sql.Rows
+	serverEntryIds              []string
+	serverEntryIndex            int
 	isTargetServerEntryIterator bool
 	isTargetServerEntryIterator bool
 	hasNextTargetServerEntry    bool
 	hasNextTargetServerEntry    bool
 	targetServerEntry           *ServerEntry
 	targetServerEntry           *ServerEntry
 }
 }
 
 
-// NewServerEntryIterator creates a new NewServerEntryIterator
+// NewServerEntryIterator creates a new ServerEntryIterator
 func NewServerEntryIterator(config *Config) (iterator *ServerEntryIterator, err error) {
 func NewServerEntryIterator(config *Config) (iterator *ServerEntryIterator, err error) {
 
 
 	// When configured, this target server entry is the only candidate
 	// When configured, this target server entry is the only candidate
@@ -362,54 +425,69 @@ func (iterator *ServerEntryIterator) Reset() error {
 	count := CountServerEntries(iterator.region, iterator.protocol)
 	count := CountServerEntries(iterator.region, iterator.protocol)
 	NoticeCandidateServers(iterator.region, iterator.protocol, count)
 	NoticeCandidateServers(iterator.region, iterator.protocol, count)
 
 
-	transaction, err := singleton.db.Begin()
-	if err != nil {
-		return ContextError(err)
-	}
-	var cursor *sql.Rows
-
 	// This query implements the Psiphon server candidate selection
 	// This query implements the Psiphon server candidate selection
 	// algorithm: the first TunnelPoolSize server candidates are in rank
 	// algorithm: the first TunnelPoolSize server candidates are in rank
 	// (priority) order, to favor previously successful servers; then the
 	// (priority) order, to favor previously successful servers; then the
 	// remaining long tail is shuffled to raise up less recent candidates.
 	// remaining long tail is shuffled to raise up less recent candidates.
 
 
-	whereClause, whereParams := makeServerEntryWhereClause(
-		iterator.region, iterator.protocol, nil)
-	headLength := iterator.shuffleHeadLength
-	queryFormat := `
-		select data from serverEntry %s
-		order by case
-		when rank > coalesce((select rank from serverEntry %s order by rank desc limit ?, 1), -1) then rank
-		else abs(random())%%((select rank from serverEntry %s order by rank desc limit ?, 1))
-		end desc;`
-	query := fmt.Sprintf(queryFormat, whereClause, whereClause, whereClause)
-	params := make([]interface{}, 0)
-	params = append(params, whereParams...)
-	params = append(params, whereParams...)
-	params = append(params, headLength)
-	params = append(params, whereParams...)
-	params = append(params, headLength)
-
-	cursor, err = transaction.Query(query, params...)
+	// BoltDB implementation note:
+	// We don't keep a transaction open for the duration of the iterator
+	// because this would expose the following semantics to consumer code:
+	//
+	//     Read-only transactions and read-write transactions ... generally
+	//     shouldn't be opened simultaneously in the same goroutine. This can
+	//     cause a deadlock as the read-write transaction needs to periodically
+	//     re-map the data file but it cannot do so while a read-only
+	//     transaction is open.
+	//     (https://github.com/boltdb/bolt)
+	//
+	// So the underlying serverEntriesBucket could change after the serverEntryIds
+	// list is built.
+
+	var serverEntryIds []string
+
+	err := singleton.db.View(func(tx *bolt.Tx) error {
+		var err error
+		serverEntryIds, err = getRankedServerEntries(tx)
+		if err != nil {
+			return err
+		}
+
+		skipServerEntryIds := make(map[string]bool)
+		for _, serverEntryId := range serverEntryIds {
+			skipServerEntryIds[serverEntryId] = true
+		}
+
+		bucket := tx.Bucket([]byte(serverEntriesBucket))
+		cursor := bucket.Cursor()
+		for key, _ := cursor.Last(); key != nil; key, _ = cursor.Prev() {
+			serverEntryId := string(key)
+			if _, ok := skipServerEntryIds[serverEntryId]; ok {
+				continue
+			}
+			serverEntryIds = append(serverEntryIds, serverEntryId)
+		}
+		return nil
+	})
 	if err != nil {
 	if err != nil {
-		transaction.Rollback()
 		return ContextError(err)
 		return ContextError(err)
 	}
 	}
-	iterator.transaction = transaction
-	iterator.cursor = cursor
+
+	for i := len(serverEntryIds) - 1; i > iterator.shuffleHeadLength-1; i-- {
+		j := rand.Intn(i+1-iterator.shuffleHeadLength) + iterator.shuffleHeadLength
+		serverEntryIds[i], serverEntryIds[j] = serverEntryIds[j], serverEntryIds[i]
+	}
+
+	iterator.serverEntryIds = serverEntryIds
+	iterator.serverEntryIndex = 0
+
 	return nil
 	return nil
 }
 }
 
 
 // Close cleans up resources associated with a ServerEntryIterator.
 // Close cleans up resources associated with a ServerEntryIterator.
 func (iterator *ServerEntryIterator) Close() {
 func (iterator *ServerEntryIterator) Close() {
-	if iterator.cursor != nil {
-		iterator.cursor.Close()
-	}
-	iterator.cursor = nil
-	if iterator.transaction != nil {
-		iterator.transaction.Rollback()
-	}
-	iterator.transaction = nil
+	iterator.serverEntryIds = nil
+	iterator.serverEntryIndex = 0
 }
 }
 
 
 // Next returns the next server entry, by rank, for a ServerEntryIterator.
 // Next returns the next server entry, by rank, for a ServerEntryIterator.
@@ -429,24 +507,55 @@ func (iterator *ServerEntryIterator) Next() (serverEntry *ServerEntry, err error
 		return nil, nil
 		return nil, nil
 	}
 	}
 
 
-	if !iterator.cursor.Next() {
-		err = iterator.cursor.Err()
+	// There are no region/protocol indexes for the server entries bucket.
+	// Loop until we have the next server entry that matches the iterator
+	// filter requirements.
+	for {
+		if iterator.serverEntryIndex >= len(iterator.serverEntryIds) {
+			// There is no next item
+			return nil, nil
+		}
+
+		serverEntryId := iterator.serverEntryIds[iterator.serverEntryIndex]
+		iterator.serverEntryIndex += 1
+
+		var data []byte
+		err = singleton.db.View(func(tx *bolt.Tx) error {
+			bucket := tx.Bucket([]byte(serverEntriesBucket))
+			value := bucket.Get([]byte(serverEntryId))
+			if value != nil {
+				// Must make a copy as slice is only valid within transaction.
+				data = make([]byte, len(value))
+				copy(data, value)
+			}
+			return nil
+		})
 		if err != nil {
 		if err != nil {
 			return nil, ContextError(err)
 			return nil, ContextError(err)
 		}
 		}
-		// There is no next item
-		return nil, nil
-	}
 
 
-	var data []byte
-	err = iterator.cursor.Scan(&data)
-	if err != nil {
-		return nil, ContextError(err)
-	}
-	serverEntry = new(ServerEntry)
-	err = json.Unmarshal(data, serverEntry)
-	if err != nil {
-		return nil, ContextError(err)
+		if data == nil {
+			// In case of data corruption or a bug causing this condition,
+			// do not stop iterating.
+			NoticeAlert("ServerEntryIterator.Next: unexpected missing server entry: %s", serverEntryId)
+			continue
+		}
+
+		serverEntry = new(ServerEntry)
+		err = json.Unmarshal(data, serverEntry)
+		if err != nil {
+			// In case of data corruption or a bug causing this condition,
+			// do not stop iterating.
+			NoticeAlert("ServerEntryIterator.Next: %s", ContextError(err))
+			continue
+		}
+
+		// Check filter requirements
+		if (iterator.region == "" || serverEntry.Region == iterator.region) &&
+			(iterator.protocol == "" || serverEntrySupportsProtocol(serverEntry, iterator.protocol)) {
+
+			break
+		}
 	}
 	}
 
 
 	return MakeCompatibleServerEntry(serverEntry), nil
 	return MakeCompatibleServerEntry(serverEntry), nil
@@ -465,123 +574,95 @@ func MakeCompatibleServerEntry(serverEntry *ServerEntry) *ServerEntry {
 	return serverEntry
 	return serverEntry
 }
 }
 
 
-func makeServerEntryWhereClause(
-	region, protocol string, excludeIds []string) (whereClause string, whereParams []interface{}) {
-	whereClause = ""
-	whereParams = make([]interface{}, 0)
-	if region != "" {
-		whereClause += " where region = ?"
-		whereParams = append(whereParams, region)
-	}
-	if protocol != "" {
-		if len(whereClause) > 0 {
-			whereClause += " and"
-		} else {
-			whereClause += " where"
-		}
-		whereClause +=
-			" exists (select 1 from serverEntryProtocol where protocol = ? and serverEntryId = serverEntry.id)"
-		whereParams = append(whereParams, protocol)
-	}
-	if len(excludeIds) > 0 {
-		if len(whereClause) > 0 {
-			whereClause += " and"
-		} else {
-			whereClause += " where"
-		}
-		whereClause += " id in ("
-		for index, id := range excludeIds {
-			if index > 0 {
-				whereClause += ", "
+func scanServerEntries(scanner func(*ServerEntry)) error {
+	err := singleton.db.View(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(serverEntriesBucket))
+		cursor := bucket.Cursor()
+
+		for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
+			serverEntry := new(ServerEntry)
+			err := json.Unmarshal(value, serverEntry)
+			if err != nil {
+				// In case of data corruption or a bug causing this condition,
+				// do not stop iterating.
+				NoticeAlert("scanServerEntries: %s", ContextError(err))
+				continue
 			}
 			}
-			whereClause += "?"
-			whereParams = append(whereParams, id)
+			scanner(serverEntry)
 		}
 		}
-		whereClause += ")"
+
+		return nil
+	})
+
+	if err != nil {
+		return ContextError(err)
 	}
 	}
-	return whereClause, whereParams
+
+	return nil
 }
 }
 
 
 // CountServerEntries returns a count of stored servers for the
 // CountServerEntries returns a count of stored servers for the
 // specified region and protocol.
 // specified region and protocol.
 func CountServerEntries(region, protocol string) int {
 func CountServerEntries(region, protocol string) int {
 	checkInitDataStore()
 	checkInitDataStore()
-	var count int
-	whereClause, whereParams := makeServerEntryWhereClause(region, protocol, nil)
-	query := "select count(*) from serverEntry" + whereClause
-	err := singleton.db.QueryRow(query, whereParams...).Scan(&count)
+
+	count := 0
+	err := scanServerEntries(func(serverEntry *ServerEntry) {
+		if (region == "" || serverEntry.Region == region) &&
+			(protocol == "" || serverEntrySupportsProtocol(serverEntry, protocol)) {
+			count += 1
+		}
+	})
 
 
 	if err != nil {
 	if err != nil {
 		NoticeAlert("CountServerEntries failed: %s", err)
 		NoticeAlert("CountServerEntries failed: %s", err)
 		return 0
 		return 0
 	}
 	}
 
 
-	if region == "" {
-		region = "(any)"
-	}
-	if protocol == "" {
-		protocol = "(any)"
-	}
-	NoticeInfo("servers for region %s and protocol %s: %d",
-		region, protocol, count)
-
 	return count
 	return count
 }
 }
 
 
 // ReportAvailableRegions prints a notice with the available egress regions.
 // ReportAvailableRegions prints a notice with the available egress regions.
+// Note that this report ignores config.TunnelProtocol.
 func ReportAvailableRegions() {
 func ReportAvailableRegions() {
 	checkInitDataStore()
 	checkInitDataStore()
 
 
-	// TODO: For consistency, regions-per-protocol should be used
+	regions := make(map[string]bool)
+	err := scanServerEntries(func(serverEntry *ServerEntry) {
+		regions[serverEntry.Region] = true
+	})
 
 
-	rows, err := singleton.db.Query("select distinct(region) from serverEntry;")
 	if err != nil {
 	if err != nil {
-		NoticeAlert("failed to query data store for available regions: %s", ContextError(err))
+		NoticeAlert("ReportAvailableRegions failed: %s", err)
 		return
 		return
 	}
 	}
-	defer rows.Close()
-
-	var regions []string
-
-	for rows.Next() {
-		var region string
-		err = rows.Scan(&region)
-		if err != nil {
-			NoticeAlert("failed to retrieve available regions from data store: %s", ContextError(err))
-			return
-		}
 
 
+	regionList := make([]string, 0, len(regions))
+	for region, _ := range regions {
 		// Some server entries do not have a region, but it makes no sense to return
 		// Some server entries do not have a region, but it makes no sense to return
 		// an empty string as an "available region".
 		// an empty string as an "available region".
 		if region != "" {
 		if region != "" {
-			regions = append(regions, region)
+			regionList = append(regionList, region)
 		}
 		}
 	}
 	}
 
 
-	NoticeAvailableEgressRegions(regions)
+	NoticeAvailableEgressRegions(regionList)
 }
 }
 
 
 // GetServerEntryIpAddresses returns an array containing
 // GetServerEntryIpAddresses returns an array containing
 // all stored server IP addresses.
 // all stored server IP addresses.
 func GetServerEntryIpAddresses() (ipAddresses []string, err error) {
 func GetServerEntryIpAddresses() (ipAddresses []string, err error) {
 	checkInitDataStore()
 	checkInitDataStore()
+
 	ipAddresses = make([]string, 0)
 	ipAddresses = make([]string, 0)
-	rows, err := singleton.db.Query("select id from serverEntry;")
+	err = scanServerEntries(func(serverEntry *ServerEntry) {
+		ipAddresses = append(ipAddresses, serverEntry.IpAddress)
+	})
+
 	if err != nil {
 	if err != nil {
 		return nil, ContextError(err)
 		return nil, ContextError(err)
 	}
 	}
-	defer rows.Close()
-	for rows.Next() {
-		var ipAddress string
-		err = rows.Scan(&ipAddress)
-		if err != nil {
-			return nil, ContextError(err)
-		}
-		ipAddresses = append(ipAddresses, ipAddress)
-	}
-	if err = rows.Err(); err != nil {
-		return nil, ContextError(err)
-	}
+
 	return ipAddresses, nil
 	return ipAddresses, nil
 }
 }
 
 
@@ -589,28 +670,34 @@ func GetServerEntryIpAddresses() (ipAddresses []string, err error) {
 // the given region. The associated etag is also stored and
 // the given region. The associated etag is also stored and
 // used to make efficient web requests for updates to the data.
 // used to make efficient web requests for updates to the data.
 func SetSplitTunnelRoutes(region, etag string, data []byte) error {
 func SetSplitTunnelRoutes(region, etag string, data []byte) error {
-	return transactionWithRetry(func(transaction *sql.Tx) error {
-		_, err := transaction.Exec(`
-            insert or replace into splitTunnelRoutes (region, etag, data)
-            values (?, ?, ?);
-            `, region, etag, data)
-		if err != nil {
-			// Note: ContextError() would break canRetry()
-			return err
-		}
-		return nil
+	checkInitDataStore()
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(splitTunnelRouteETagsBucket))
+		err := bucket.Put([]byte(region), []byte(etag))
+
+		bucket = tx.Bucket([]byte(splitTunnelRouteDataBucket))
+		err = bucket.Put([]byte(region), data)
+		return err
 	})
 	})
+
+	if err != nil {
+		return ContextError(err)
+	}
+	return nil
 }
 }
 
 
 // GetSplitTunnelRoutesETag retrieves the etag for cached routes
 // GetSplitTunnelRoutesETag retrieves the etag for cached routes
 // data for the specified region. If not found, it returns an empty string value.
 // data for the specified region. If not found, it returns an empty string value.
 func GetSplitTunnelRoutesETag(region string) (etag string, err error) {
 func GetSplitTunnelRoutesETag(region string) (etag string, err error) {
 	checkInitDataStore()
 	checkInitDataStore()
-	rows := singleton.db.QueryRow("select etag from splitTunnelRoutes where region = ?;", region)
-	err = rows.Scan(&etag)
-	if err == sql.ErrNoRows {
-		return "", nil
-	}
+
+	err = singleton.db.View(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(splitTunnelRouteETagsBucket))
+		etag = string(bucket.Get([]byte(region)))
+		return nil
+	})
+
 	if err != nil {
 	if err != nil {
 		return "", ContextError(err)
 		return "", ContextError(err)
 	}
 	}
@@ -621,11 +708,18 @@ func GetSplitTunnelRoutesETag(region string) (etag string, err error) {
 // for the specified region. If not found, it returns a nil value.
 // for the specified region. If not found, it returns a nil value.
 func GetSplitTunnelRoutesData(region string) (data []byte, err error) {
 func GetSplitTunnelRoutesData(region string) (data []byte, err error) {
 	checkInitDataStore()
 	checkInitDataStore()
-	rows := singleton.db.QueryRow("select data from splitTunnelRoutes where region = ?;", region)
-	err = rows.Scan(&data)
-	if err == sql.ErrNoRows {
-		return nil, nil
-	}
+
+	err = singleton.db.View(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(splitTunnelRouteDataBucket))
+		value := bucket.Get([]byte(region))
+		if value != nil {
+			// Must make a copy as slice is only valid within transaction.
+			data = make([]byte, len(value))
+			copy(data, value)
+		}
+		return nil
+	})
+
 	if err != nil {
 	if err != nil {
 		return nil, ContextError(err)
 		return nil, ContextError(err)
 	}
 	}
@@ -636,28 +730,31 @@ func GetSplitTunnelRoutesData(region string) (data []byte, err error) {
 // Note: input URL is treated as a string, and is not
 // Note: input URL is treated as a string, and is not
 // encoded or decoded or otherwise canonicalized.
 // encoded or decoded or otherwise canonicalized.
 func SetUrlETag(url, etag string) error {
 func SetUrlETag(url, etag string) error {
-	return transactionWithRetry(func(transaction *sql.Tx) error {
-		_, err := transaction.Exec(`
-            insert or replace into urlETags (url, etag)
-            values (?, ?);
-            `, url, etag)
-		if err != nil {
-			// Note: ContextError() would break canRetry()
-			return err
-		}
-		return nil
+	checkInitDataStore()
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(urlETagsBucket))
+		err := bucket.Put([]byte(url), []byte(etag))
+		return err
 	})
 	})
+
+	if err != nil {
+		return ContextError(err)
+	}
+	return nil
 }
 }
 
 
 // GetUrlETag retrieves a previously stored an ETag for the
 // GetUrlETag retrieves a previously stored an ETag for the
 // specfied URL. If not found, it returns an empty string value.
 // specfied URL. If not found, it returns an empty string value.
 func GetUrlETag(url string) (etag string, err error) {
 func GetUrlETag(url string) (etag string, err error) {
 	checkInitDataStore()
 	checkInitDataStore()
-	rows := singleton.db.QueryRow("select etag from urlETags where url = ?;", url)
-	err = rows.Scan(&etag)
-	if err == sql.ErrNoRows {
-		return "", nil
-	}
+
+	err = singleton.db.View(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(urlETagsBucket))
+		etag = string(bucket.Get([]byte(url)))
+		return nil
+	})
+
 	if err != nil {
 	if err != nil {
 		return "", ContextError(err)
 		return "", ContextError(err)
 	}
 	}
@@ -666,30 +763,226 @@ func GetUrlETag(url string) (etag string, err error) {
 
 
 // SetKeyValue stores a key/value pair.
 // SetKeyValue stores a key/value pair.
 func SetKeyValue(key, value string) error {
 func SetKeyValue(key, value string) error {
-	return transactionWithRetry(func(transaction *sql.Tx) error {
-		_, err := transaction.Exec(`
-            insert or replace into keyValue (key, value)
-            values (?, ?);
-            `, key, value)
-		if err != nil {
-			// Note: ContextError() would break canRetry()
-			return err
-		}
-		return nil
+	checkInitDataStore()
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(keyValueBucket))
+		err := bucket.Put([]byte(key), []byte(value))
+		return err
 	})
 	})
+
+	if err != nil {
+		return ContextError(err)
+	}
+	return nil
 }
 }
 
 
 // GetKeyValue retrieves the value for a given key. If not found,
 // GetKeyValue retrieves the value for a given key. If not found,
 // it returns an empty string value.
 // it returns an empty string value.
 func GetKeyValue(key string) (value string, err error) {
 func GetKeyValue(key string) (value string, err error) {
 	checkInitDataStore()
 	checkInitDataStore()
-	rows := singleton.db.QueryRow("select value from keyValue where key = ?;", key)
-	err = rows.Scan(&value)
-	if err == sql.ErrNoRows {
-		return "", nil
-	}
+
+	err = singleton.db.View(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(keyValueBucket))
+		value = string(bucket.Get([]byte(key)))
+		return nil
+	})
+
 	if err != nil {
 	if err != nil {
 		return "", ContextError(err)
 		return "", ContextError(err)
 	}
 	}
 	return value, nil
 	return value, nil
 }
 }
+
+// Tunnel stats records in the tunnelStatsStateUnreported
+// state are available for take out.
+// Records in the tunnelStatsStateReporting have been
+// taken out and are pending either deleting (for a
+// successful request) or change to StateUnreported (for
+// a failed request).
+// All tunnel stats records are reverted to StateUnreported
+// when the datastore is initialized at start up.
+
+var tunnelStatsStateUnreported = []byte("0")
+var tunnelStatsStateReporting = []byte("1")
+
+// StoreTunnelStats adds a new tunnel stats record, which is
+// set to StateUnreported and is an immediate candidate for
+// reporting.
+// tunnelStats is a JSON byte array containing fields as
+// required by the Psiphon server API (see RecordTunnelStats).
+// It's assumed that the JSON value contains enough unique
+// information for the value to function as a key in the
+// key/value datastore. This assumption is currently satisfied
+// by the fields sessionId + tunnelNumber.
+func StoreTunnelStats(tunnelStats []byte) error {
+	checkInitDataStore()
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(tunnelStatsBucket))
+		err := bucket.Put(tunnelStats, tunnelStatsStateUnreported)
+		return err
+	})
+
+	if err != nil {
+		return ContextError(err)
+	}
+	return nil
+}
+
+// CountUnreportedTunnelStats returns the number of tunnel
+// stats records in StateUnreported.
+func CountUnreportedTunnelStats() int {
+	checkInitDataStore()
+
+	unreported := 0
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(tunnelStatsBucket))
+		cursor := bucket.Cursor()
+		for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
+			if 0 == bytes.Compare(value, tunnelStatsStateUnreported) {
+				unreported++
+				break
+			}
+		}
+		return nil
+	})
+
+	if err != nil {
+		NoticeAlert("CountUnreportedTunnelStats failed: %s", err)
+		return 0
+	}
+
+	return unreported
+}
+
+// TakeOutUnreportedTunnelStats returns up to maxCount tunnel
+// stats records that are in StateUnreported. The records are set
+// to StateReporting. If the records are successfully reported,
+// clear them with ClearReportedTunnelStats. If the records are
+// not successfully reported, restore them with
+// PutBackUnreportedTunnelStats.
+func TakeOutUnreportedTunnelStats(maxCount int) ([][]byte, error) {
+	checkInitDataStore()
+
+	tunnelStats := make([][]byte, 0)
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(tunnelStatsBucket))
+		cursor := bucket.Cursor()
+		for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
+
+			// Perform a test JSON unmarshaling. In case of data corruption or a bug,
+			// skip the record.
+			var jsonData interface{}
+			err := json.Unmarshal(key, &jsonData)
+			if err != nil {
+				NoticeAlert(
+					"Invalid key in TakeOutUnreportedTunnelStats: %s: %s",
+					string(key), err)
+				continue
+			}
+
+			if 0 == bytes.Compare(value, tunnelStatsStateUnreported) {
+				// Must make a copy as slice is only valid within transaction.
+				data := make([]byte, len(key))
+				copy(data, key)
+				tunnelStats = append(tunnelStats, data)
+				if len(tunnelStats) >= maxCount {
+					break
+				}
+			}
+		}
+		for _, key := range tunnelStats {
+			err := bucket.Put(key, tunnelStatsStateReporting)
+			if err != nil {
+				return err
+			}
+		}
+
+		return nil
+	})
+
+	if err != nil {
+		return nil, ContextError(err)
+	}
+	return tunnelStats, nil
+}
+
+// PutBackUnreportedTunnelStats restores a list of tunnel
+// stats records to StateUnreported.
+func PutBackUnreportedTunnelStats(tunnelStats [][]byte) error {
+	checkInitDataStore()
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(tunnelStatsBucket))
+		for _, key := range tunnelStats {
+			err := bucket.Put(key, tunnelStatsStateUnreported)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+
+	if err != nil {
+		return ContextError(err)
+	}
+	return nil
+}
+
+// ClearReportedTunnelStats deletes a list of tunnel
+// stats records that were succesdfully reported.
+func ClearReportedTunnelStats(tunnelStats [][]byte) error {
+	checkInitDataStore()
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(tunnelStatsBucket))
+		for _, key := range tunnelStats {
+			err := bucket.Delete(key)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+
+	if err != nil {
+		return ContextError(err)
+	}
+	return nil
+}
+
+// resetAllTunnelStatsToUnreported sets all tunnel
+// stats records to StateUnreported. This reset is called
+// when the datastore is initialized at start up, as we do
+// not know if tunnel records in StateReporting were reported
+// or not.
+func resetAllTunnelStatsToUnreported() error {
+	checkInitDataStore()
+
+	err := singleton.db.Update(func(tx *bolt.Tx) error {
+		bucket := tx.Bucket([]byte(tunnelStatsBucket))
+		resetKeys := make([][]byte, 0)
+		cursor := bucket.Cursor()
+		for key, _ := cursor.First(); key != nil; key, _ = cursor.Next() {
+			resetKeys = append(resetKeys, key)
+		}
+		// TODO: data mutation is done outside cursor. Is this
+		// strictly necessary in this case?
+		// https://godoc.org/github.com/boltdb/bolt#Cursor
+		for _, key := range resetKeys {
+			err := bucket.Put(key, tunnelStatsStateUnreported)
+			if err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+
+	if err != nil {
+		return ContextError(err)
+	}
+	return nil
+}

+ 0 - 719
psiphon/dataStore_alt.go

@@ -1,719 +0,0 @@
-// +build !windows
-
-/*
- * Copyright (c) 2015, Psiphon Inc.
- * All rights reserved.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- *
- */
-
-package psiphon
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"math/rand"
-	"path/filepath"
-	"strings"
-	"sync"
-	"time"
-
-	"github.com/Psiphon-Inc/bolt"
-)
-
-// The BoltDB dataStore implementation is an alternative to the sqlite3-based
-// implementation in dataStore.go. Both implementations have the same interface.
-//
-// BoltDB is pure Go, and is intended to be used in cases where we have trouble
-// building sqlite3/CGO (e.g., currently go mobile due to
-// https://github.com/mattn/go-sqlite3/issues/201), and perhaps ultimately as
-// the primary dataStore implementation.
-//
-type dataStore struct {
-	init sync.Once
-	db   *bolt.DB
-}
-
-const (
-	serverEntriesBucket         = "serverEntries"
-	rankedServerEntriesBucket   = "rankedServerEntries"
-	rankedServerEntriesKey      = "rankedServerEntries"
-	splitTunnelRouteETagsBucket = "splitTunnelRouteETags"
-	splitTunnelRouteDataBucket  = "splitTunnelRouteData"
-	urlETagsBucket              = "urlETags"
-	keyValueBucket              = "keyValues"
-	rankedServerEntryCount      = 100
-)
-
-var singleton dataStore
-
-// InitDataStore initializes the singleton instance of dataStore. This
-// function uses a sync.Once and is safe for use by concurrent goroutines.
-// The underlying sql.DB connection pool is also safe.
-//
-// Note: the sync.Once was more useful when initDataStore was private and
-// called on-demand by the public functions below. Now we require an explicit
-// InitDataStore() call with the filename passed in. The on-demand calls
-// have been replaced by checkInitDataStore() to assert that Init was called.
-func InitDataStore(config *Config) (err error) {
-	singleton.init.Do(func() {
-		filename := filepath.Join(config.DataStoreDirectory, DATA_STORE_FILENAME)
-		var db *bolt.DB
-		db, err = bolt.Open(filename, 0600, &bolt.Options{Timeout: 1 * time.Second})
-		if err != nil {
-			// Note: intending to set the err return value for InitDataStore
-			err = fmt.Errorf("initDataStore failed to open database: %s", err)
-			return
-		}
-
-		err = db.Update(func(tx *bolt.Tx) error {
-			requiredBuckets := []string{
-				serverEntriesBucket,
-				rankedServerEntriesBucket,
-				splitTunnelRouteETagsBucket,
-				splitTunnelRouteDataBucket,
-				urlETagsBucket,
-				keyValueBucket,
-			}
-			for _, bucket := range requiredBuckets {
-				_, err := tx.CreateBucketIfNotExists([]byte(bucket))
-				if err != nil {
-					return err
-				}
-			}
-			return nil
-		})
-		if err != nil {
-			err = fmt.Errorf("initDataStore failed to create buckets: %s", err)
-			return
-		}
-
-		singleton.db = db
-	})
-	return err
-}
-
-func checkInitDataStore() {
-	if singleton.db == nil {
-		panic("checkInitDataStore: datastore not initialized")
-	}
-}
-
-// StoreServerEntry adds the server entry to the data store.
-// A newly stored (or re-stored) server entry is assigned the next-to-top
-// rank for iteration order (the previous top ranked entry is promoted). The
-// purpose of inserting at next-to-top is to keep the last selected server
-// as the top ranked server.
-// When replaceIfExists is true, an existing server entry record is
-// overwritten; otherwise, the existing record is unchanged.
-// If the server entry data is malformed, an alert notice is issued and
-// the entry is skipped; no error is returned.
-func StoreServerEntry(serverEntry *ServerEntry, replaceIfExists bool) error {
-	checkInitDataStore()
-
-	// Server entries should already be validated before this point,
-	// so instead of skipping we fail with an error.
-	err := ValidateServerEntry(serverEntry)
-	if err != nil {
-		return ContextError(errors.New("invalid server entry"))
-	}
-
-	// BoltDB implementation note:
-	// For simplicity, we don't maintain indexes on server entry
-	// region or supported protocols. Instead, we perform full-bucket
-	// scans with a filter. With a small enough database (thousands or
-	// even tens of thousand of server entries) and common enough
-	// values (e.g., many servers support all protocols), performance
-	// is expected to be acceptable.
-
-	serverEntryExists := false
-	err = singleton.db.Update(func(tx *bolt.Tx) error {
-
-		serverEntries := tx.Bucket([]byte(serverEntriesBucket))
-		serverEntryExists = (serverEntries.Get([]byte(serverEntry.IpAddress)) != nil)
-
-		if serverEntryExists && !replaceIfExists {
-			// Disabling this notice, for now, as it generates too much noise
-			// in diagnostics with clients that always submit embedded servers
-			// to the core on each run.
-			// NoticeInfo("ignored update for server %s", serverEntry.IpAddress)
-			return nil
-		}
-
-		data, err := json.Marshal(serverEntry)
-		if err != nil {
-			return ContextError(err)
-		}
-		err = serverEntries.Put([]byte(serverEntry.IpAddress), data)
-		if err != nil {
-			return ContextError(err)
-		}
-
-		err = insertRankedServerEntry(tx, serverEntry.IpAddress, 1)
-		if err != nil {
-			return ContextError(err)
-		}
-
-		return nil
-	})
-	if err != nil {
-		return ContextError(err)
-	}
-
-	if !serverEntryExists {
-		NoticeInfo("updated server %s", serverEntry.IpAddress)
-	}
-	return nil
-}
-
-// StoreServerEntries shuffles and stores a list of server entries.
-// Shuffling is performed on imported server entrues as part of client-side
-// load balancing.
-// There is an independent transaction for each entry insert/update.
-func StoreServerEntries(serverEntries []*ServerEntry, replaceIfExists bool) error {
-	checkInitDataStore()
-
-	for index := len(serverEntries) - 1; index > 0; index-- {
-		swapIndex := rand.Intn(index + 1)
-		serverEntries[index], serverEntries[swapIndex] = serverEntries[swapIndex], serverEntries[index]
-	}
-
-	for _, serverEntry := range serverEntries {
-		err := StoreServerEntry(serverEntry, replaceIfExists)
-		if err != nil {
-			return ContextError(err)
-		}
-	}
-
-	// Since there has possibly been a significant change in the server entries,
-	// take this opportunity to update the available egress regions.
-	ReportAvailableRegions()
-
-	return nil
-}
-
-// PromoteServerEntry assigns the top rank (one more than current
-// max rank) to the specified server entry. Server candidates are
-// iterated in decending rank order, so this server entry will be
-// the first candidate in a subsequent tunnel establishment.
-func PromoteServerEntry(ipAddress string) error {
-	checkInitDataStore()
-
-	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		return insertRankedServerEntry(tx, ipAddress, 0)
-	})
-
-	if err != nil {
-		return ContextError(err)
-	}
-	return nil
-}
-
-func getRankedServerEntries(tx *bolt.Tx) ([]string, error) {
-	bucket := tx.Bucket([]byte(rankedServerEntriesBucket))
-	data := bucket.Get([]byte(rankedServerEntriesKey))
-
-	if data == nil {
-		return []string{}, nil
-	}
-
-	rankedServerEntries := make([]string, 0)
-	err := json.Unmarshal(data, &rankedServerEntries)
-	if err != nil {
-		return nil, ContextError(err)
-	}
-	return rankedServerEntries, nil
-}
-
-func setRankedServerEntries(tx *bolt.Tx, rankedServerEntries []string) error {
-	data, err := json.Marshal(rankedServerEntries)
-	if err != nil {
-		return ContextError(err)
-	}
-
-	bucket := tx.Bucket([]byte(rankedServerEntriesBucket))
-	err = bucket.Put([]byte(rankedServerEntriesKey), data)
-	if err != nil {
-		return ContextError(err)
-	}
-
-	return nil
-}
-
-func insertRankedServerEntry(tx *bolt.Tx, serverEntryId string, position int) error {
-	rankedServerEntries, err := getRankedServerEntries(tx)
-	if err != nil {
-		return ContextError(err)
-	}
-
-	// BoltDB implementation note:
-	// For simplicity, we store the ranked server ids in an array serialized to
-	// a single key value. To ensure this value doesn't grow without bound,
-	// it's capped at rankedServerEntryCount. For now, this cap should be large
-	// enough to meet the shuffleHeadLength = config.TunnelPoolSize criteria, for
-	// any reasonable configuration of config.TunnelPoolSize.
-
-	if position >= len(rankedServerEntries) {
-		rankedServerEntries = append(rankedServerEntries, serverEntryId)
-	} else {
-		end := len(rankedServerEntries)
-		if end+1 > rankedServerEntryCount {
-			end = rankedServerEntryCount
-		}
-		// insert: https://github.com/golang/go/wiki/SliceTricks
-		rankedServerEntries = append(
-			rankedServerEntries[:position],
-			append([]string{serverEntryId},
-				rankedServerEntries[position:end]...)...)
-	}
-
-	err = setRankedServerEntries(tx, rankedServerEntries)
-	if err != nil {
-		return ContextError(err)
-	}
-
-	return nil
-}
-
-func serverEntrySupportsProtocol(serverEntry *ServerEntry, protocol string) bool {
-	// Note: for meek, the capabilities are FRONTED-MEEK and UNFRONTED-MEEK
-	// and the additonal OSSH service is assumed to be available internally.
-	requiredCapability := strings.TrimSuffix(protocol, "-OSSH")
-	return Contains(serverEntry.Capabilities, requiredCapability)
-}
-
-// ServerEntryIterator is used to iterate over
-// stored server entries in rank order.
-type ServerEntryIterator struct {
-	region                      string
-	protocol                    string
-	shuffleHeadLength           int
-	serverEntryIds              []string
-	serverEntryIndex            int
-	isTargetServerEntryIterator bool
-	hasNextTargetServerEntry    bool
-	targetServerEntry           *ServerEntry
-}
-
-// NewServerEntryIterator creates a new ServerEntryIterator
-func NewServerEntryIterator(config *Config) (iterator *ServerEntryIterator, err error) {
-
-	// When configured, this target server entry is the only candidate
-	if config.TargetServerEntry != "" {
-		return newTargetServerEntryIterator(config)
-	}
-
-	checkInitDataStore()
-	iterator = &ServerEntryIterator{
-		region:                      config.EgressRegion,
-		protocol:                    config.TunnelProtocol,
-		shuffleHeadLength:           config.TunnelPoolSize,
-		isTargetServerEntryIterator: false,
-	}
-	err = iterator.Reset()
-	if err != nil {
-		return nil, err
-	}
-	return iterator, nil
-}
-
-// newTargetServerEntryIterator is a helper for initializing the TargetServerEntry case
-func newTargetServerEntryIterator(config *Config) (iterator *ServerEntryIterator, err error) {
-	serverEntry, err := DecodeServerEntry(config.TargetServerEntry)
-	if err != nil {
-		return nil, err
-	}
-	if config.EgressRegion != "" && serverEntry.Region != config.EgressRegion {
-		return nil, errors.New("TargetServerEntry does not support EgressRegion")
-	}
-	if config.TunnelProtocol != "" {
-		// Note: same capability/protocol mapping as in StoreServerEntry
-		requiredCapability := strings.TrimSuffix(config.TunnelProtocol, "-OSSH")
-		if !Contains(serverEntry.Capabilities, requiredCapability) {
-			return nil, errors.New("TargetServerEntry does not support TunnelProtocol")
-		}
-	}
-	iterator = &ServerEntryIterator{
-		isTargetServerEntryIterator: true,
-		hasNextTargetServerEntry:    true,
-		targetServerEntry:           serverEntry,
-	}
-	NoticeInfo("using TargetServerEntry: %s", serverEntry.IpAddress)
-	return iterator, nil
-}
-
-// Reset a NewServerEntryIterator to the start of its cycle. The next
-// call to Next will return the first server entry.
-func (iterator *ServerEntryIterator) Reset() error {
-	iterator.Close()
-
-	if iterator.isTargetServerEntryIterator {
-		iterator.hasNextTargetServerEntry = true
-		return nil
-	}
-
-	count := CountServerEntries(iterator.region, iterator.protocol)
-	NoticeCandidateServers(iterator.region, iterator.protocol, count)
-
-	// This query implements the Psiphon server candidate selection
-	// algorithm: the first TunnelPoolSize server candidates are in rank
-	// (priority) order, to favor previously successful servers; then the
-	// remaining long tail is shuffled to raise up less recent candidates.
-
-	// BoltDB implementation note:
-	// We don't keep a transaction open for the duration of the iterator
-	// because this would expose the following semantics to consumer code:
-	//
-	//     Read-only transactions and read-write transactions ... generally
-	//     shouldn't be opened simultaneously in the same goroutine. This can
-	//     cause a deadlock as the read-write transaction needs to periodically
-	//     re-map the data file but it cannot do so while a read-only
-	//     transaction is open.
-	//     (https://github.com/boltdb/bolt)
-	//
-	// So the uderlying serverEntriesBucket could change after the serverEntryIds
-	// list is built.
-
-	var serverEntryIds []string
-
-	err := singleton.db.View(func(tx *bolt.Tx) error {
-		var err error
-		serverEntryIds, err = getRankedServerEntries(tx)
-		if err != nil {
-			return err
-		}
-
-		skipServerEntryIds := make(map[string]bool)
-		for _, serverEntryId := range serverEntryIds {
-			skipServerEntryIds[serverEntryId] = true
-		}
-
-		bucket := tx.Bucket([]byte(serverEntriesBucket))
-		cursor := bucket.Cursor()
-		for key, _ := cursor.Last(); key != nil; key, _ = cursor.Prev() {
-			serverEntryId := string(key)
-			if _, ok := skipServerEntryIds[serverEntryId]; ok {
-				continue
-			}
-			serverEntryIds = append(serverEntryIds, serverEntryId)
-		}
-		return nil
-	})
-	if err != nil {
-		return ContextError(err)
-	}
-
-	for i := len(serverEntryIds) - 1; i > iterator.shuffleHeadLength-1; i-- {
-		j := rand.Intn(i)
-		serverEntryIds[i], serverEntryIds[j] = serverEntryIds[j], serverEntryIds[i]
-	}
-
-	iterator.serverEntryIds = serverEntryIds
-	iterator.serverEntryIndex = 0
-
-	return nil
-}
-
-// Close cleans up resources associated with a ServerEntryIterator.
-func (iterator *ServerEntryIterator) Close() {
-	iterator.serverEntryIds = nil
-	iterator.serverEntryIndex = 0
-}
-
-// Next returns the next server entry, by rank, for a ServerEntryIterator.
-// Returns nil with no error when there is no next item.
-func (iterator *ServerEntryIterator) Next() (serverEntry *ServerEntry, err error) {
-	defer func() {
-		if err != nil {
-			iterator.Close()
-		}
-	}()
-
-	if iterator.isTargetServerEntryIterator {
-		if iterator.hasNextTargetServerEntry {
-			iterator.hasNextTargetServerEntry = false
-			return MakeCompatibleServerEntry(iterator.targetServerEntry), nil
-		}
-		return nil, nil
-	}
-
-	// There are no region/protocol indexes for the server entries bucket.
-	// Loop until we have the next server entry that matches the iterator
-	// filter requirements.
-	for {
-		if iterator.serverEntryIndex >= len(iterator.serverEntryIds) {
-			// There is no next item
-			return nil, nil
-		}
-
-		serverEntryId := iterator.serverEntryIds[iterator.serverEntryIndex]
-		iterator.serverEntryIndex += 1
-
-		var data []byte
-		err = singleton.db.View(func(tx *bolt.Tx) error {
-			bucket := tx.Bucket([]byte(serverEntriesBucket))
-			data = bucket.Get([]byte(serverEntryId))
-			return nil
-		})
-		if err != nil {
-			return nil, ContextError(err)
-		}
-
-		if data == nil {
-			return nil, ContextError(
-				fmt.Errorf("Unexpected missing server entry: %s", serverEntryId))
-		}
-
-		serverEntry = new(ServerEntry)
-		err = json.Unmarshal(data, serverEntry)
-		if err != nil {
-			return nil, ContextError(err)
-		}
-
-		if (iterator.region == "" || serverEntry.Region == iterator.region) &&
-			(iterator.protocol == "" || serverEntrySupportsProtocol(serverEntry, iterator.protocol)) {
-
-			break
-		}
-	}
-
-	return MakeCompatibleServerEntry(serverEntry), nil
-}
-
-// MakeCompatibleServerEntry provides backwards compatibility with old server entries
-// which have a single meekFrontingDomain and not a meekFrontingAddresses array.
-// By copying this one meekFrontingDomain into meekFrontingAddresses, this client effectively
-// uses that single value as legacy clients do.
-func MakeCompatibleServerEntry(serverEntry *ServerEntry) *ServerEntry {
-	if len(serverEntry.MeekFrontingAddresses) == 0 && serverEntry.MeekFrontingDomain != "" {
-		serverEntry.MeekFrontingAddresses =
-			append(serverEntry.MeekFrontingAddresses, serverEntry.MeekFrontingDomain)
-	}
-
-	return serverEntry
-}
-
-func scanServerEntries(scanner func(*ServerEntry)) error {
-	err := singleton.db.View(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(serverEntriesBucket))
-		cursor := bucket.Cursor()
-
-		for key, value := cursor.First(); key != nil; key, value = cursor.Next() {
-			serverEntry := new(ServerEntry)
-			err := json.Unmarshal(value, serverEntry)
-			if err != nil {
-				return err
-			}
-			scanner(serverEntry)
-		}
-
-		return nil
-	})
-
-	if err != nil {
-		return ContextError(err)
-	}
-
-	return nil
-}
-
-// CountServerEntries returns a count of stored servers for the
-// specified region and protocol.
-func CountServerEntries(region, protocol string) int {
-	checkInitDataStore()
-
-	count := 0
-	err := scanServerEntries(func(serverEntry *ServerEntry) {
-		if (region == "" || serverEntry.Region == region) &&
-			(protocol == "" || serverEntrySupportsProtocol(serverEntry, protocol)) {
-			count += 1
-		}
-	})
-
-	if err != nil {
-		NoticeAlert("CountServerEntries failed: %s", err)
-		return 0
-	}
-
-	return count
-}
-
-// ReportAvailableRegions prints a notice with the available egress regions.
-// Note that this report ignores config.TunnelProtocol.
-func ReportAvailableRegions() {
-	checkInitDataStore()
-
-	regions := make(map[string]bool)
-	err := scanServerEntries(func(serverEntry *ServerEntry) {
-		regions[serverEntry.Region] = true
-	})
-
-	if err != nil {
-		NoticeAlert("ReportAvailableRegions failed: %s", err)
-		return
-	}
-
-	regionList := make([]string, 0, len(regions))
-	for region, _ := range regions {
-		// Some server entries do not have a region, but it makes no sense to return
-		// an empty string as an "available region".
-		if region != "" {
-			regionList = append(regionList, region)
-		}
-	}
-
-	NoticeAvailableEgressRegions(regionList)
-}
-
-// GetServerEntryIpAddresses returns an array containing
-// all stored server IP addresses.
-func GetServerEntryIpAddresses() (ipAddresses []string, err error) {
-	checkInitDataStore()
-
-	ipAddresses = make([]string, 0)
-	err = scanServerEntries(func(serverEntry *ServerEntry) {
-		ipAddresses = append(ipAddresses, serverEntry.IpAddress)
-	})
-
-	if err != nil {
-		return nil, ContextError(err)
-	}
-
-	return ipAddresses, nil
-}
-
-// SetSplitTunnelRoutes updates the cached routes data for
-// the given region. The associated etag is also stored and
-// used to make efficient web requests for updates to the data.
-func SetSplitTunnelRoutes(region, etag string, data []byte) error {
-	checkInitDataStore()
-
-	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(splitTunnelRouteETagsBucket))
-		err := bucket.Put([]byte(region), []byte(etag))
-
-		bucket = tx.Bucket([]byte(splitTunnelRouteDataBucket))
-		err = bucket.Put([]byte(region), data)
-		return err
-	})
-
-	if err != nil {
-		return ContextError(err)
-	}
-	return nil
-}
-
-// GetSplitTunnelRoutesETag retrieves the etag for cached routes
-// data for the specified region. If not found, it returns an empty string value.
-func GetSplitTunnelRoutesETag(region string) (etag string, err error) {
-	checkInitDataStore()
-
-	err = singleton.db.View(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(splitTunnelRouteETagsBucket))
-		etag = string(bucket.Get([]byte(region)))
-		return nil
-	})
-
-	if err != nil {
-		return "", ContextError(err)
-	}
-	return etag, nil
-}
-
-// GetSplitTunnelRoutesData retrieves the cached routes data
-// for the specified region. If not found, it returns a nil value.
-func GetSplitTunnelRoutesData(region string) (data []byte, err error) {
-	checkInitDataStore()
-
-	err = singleton.db.View(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(splitTunnelRouteDataBucket))
-		data = bucket.Get([]byte(region))
-		return nil
-	})
-
-	if err != nil {
-		return nil, ContextError(err)
-	}
-	return data, nil
-}
-
-// SetUrlETag stores an ETag for the specfied URL.
-// Note: input URL is treated as a string, and is not
-// encoded or decoded or otherwise canonicalized.
-func SetUrlETag(url, etag string) error {
-	checkInitDataStore()
-
-	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(urlETagsBucket))
-		err := bucket.Put([]byte(url), []byte(etag))
-		return err
-	})
-
-	if err != nil {
-		return ContextError(err)
-	}
-	return nil
-}
-
-// GetUrlETag retrieves a previously stored an ETag for the
-// specfied URL. If not found, it returns an empty string value.
-func GetUrlETag(url string) (etag string, err error) {
-	checkInitDataStore()
-
-	err = singleton.db.View(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(urlETagsBucket))
-		etag = string(bucket.Get([]byte(url)))
-		return nil
-	})
-
-	if err != nil {
-		return "", ContextError(err)
-	}
-	return etag, nil
-}
-
-// SetKeyValue stores a key/value pair.
-func SetKeyValue(key, value string) error {
-	checkInitDataStore()
-
-	err := singleton.db.Update(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(keyValueBucket))
-		err := bucket.Put([]byte(key), []byte(value))
-		return err
-	})
-
-	if err != nil {
-		return ContextError(err)
-	}
-	return nil
-}
-
-// GetKeyValue retrieves the value for a given key. If not found,
-// it returns an empty string value.
-func GetKeyValue(key string) (value string, err error) {
-	checkInitDataStore()
-
-	err = singleton.db.View(func(tx *bolt.Tx) error {
-		bucket := tx.Bucket([]byte(keyValueBucket))
-		value = string(bucket.Get([]byte(key)))
-		return nil
-	})
-
-	if err != nil {
-		return "", ContextError(err)
-	}
-	return value, nil
-}

+ 3 - 2
psiphon/httpProxy.go

@@ -71,10 +71,11 @@ var _HTTP_PROXY_TYPE = "HTTP"
 func NewHttpProxy(
 func NewHttpProxy(
 	config *Config,
 	config *Config,
 	untunneledDialConfig *DialConfig,
 	untunneledDialConfig *DialConfig,
-	tunneler Tunneler) (proxy *HttpProxy, err error) {
+	tunneler Tunneler,
+	listenIP string) (proxy *HttpProxy, err error) {
 
 
 	listener, err := net.Listen(
 	listener, err := net.Listen(
-		"tcp", fmt.Sprintf("127.0.0.1:%d", config.LocalHttpProxyPort))
+		"tcp", fmt.Sprintf("%s:%d", listenIP, config.LocalHttpProxyPort))
 	if err != nil {
 	if err != nil {
 		if IsAddressInUseError(err) {
 		if IsAddressInUseError(err) {
 			NoticeHttpProxyPortInUse(config.LocalHttpProxyPort)
 			NoticeHttpProxyPortInUse(config.LocalHttpProxyPort)

+ 1 - 2
psiphon/meekConn.go

@@ -157,8 +157,7 @@ func DialMeek(
 		// classify Psiphon traffic on some CDNs but not others) may throttle non-MiM CDNs so that our server
 		// classify Psiphon traffic on some CDNs but not others) may throttle non-MiM CDNs so that our server
 		// selection always chooses tunnels to the MiM CDN (without any server cert verification, we won't
 		// selection always chooses tunnels to the MiM CDN (without any server cert verification, we won't
 		// exclusively connect to non-MiM CDNs); then the adversary kills the underlying TCP connection after
 		// exclusively connect to non-MiM CDNs); then the adversary kills the underlying TCP connection after
-		// some short period. This is similar to the "unidentified protocol" attack outlined in selectProtocol().
-		// A similar weighted selection defense may be appropriate.
+		// some short period. This is mitigated by the "impaired" protocol classification mechanism.
 
 
 		dialer = NewCustomTLSDialer(
 		dialer = NewCustomTLSDialer(
 			&CustomTLSConfig{
 			&CustomTLSConfig{

+ 31 - 0
psiphon/migrateDataStore.go

@@ -0,0 +1,31 @@
+// +build !windows
+
+/*
+ * Copyright (c) 2015, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package psiphon
+
+// Stub function to return an empty list for non-Windows builds
+func prepareMigrationEntries(config *Config) []*ServerEntry {
+	return nil
+}
+
+// Stub function to return immediately for non-Windows builds
+func migrateEntries(serverEntries []*ServerEntry, legacyDataStoreFilename string) {
+}

+ 243 - 0
psiphon/migrateDataStore_windows.go

@@ -0,0 +1,243 @@
+/*
+ * Copyright (c) 2015, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package psiphon
+
+import (
+	"database/sql"
+	"encoding/json"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	_ "github.com/Psiphon-Inc/go-sqlite3"
+)
+
+var legacyDb *sql.DB
+
+func prepareMigrationEntries(config *Config) []*ServerEntry {
+	var migratableServerEntries []*ServerEntry
+
+	// If DATA_STORE_FILENAME does not exist on disk
+	if _, err := os.Stat(filepath.Join(config.DataStoreDirectory, DATA_STORE_FILENAME)); os.IsNotExist(err) {
+		// If LEGACY_DATA_STORE_FILENAME exists on disk
+		if _, err := os.Stat(filepath.Join(config.DataStoreDirectory, LEGACY_DATA_STORE_FILENAME)); err == nil {
+
+			legacyDb, err = sql.Open("sqlite3", fmt.Sprintf("file:%s?cache=private&mode=rwc", filepath.Join(config.DataStoreDirectory, LEGACY_DATA_STORE_FILENAME)))
+			defer legacyDb.Close()
+
+			if err != nil {
+				NoticeAlert("prepareMigrationEntries: sql.Open failed: %s", err)
+				return nil
+			}
+
+			initialization := "pragma journal_mode=WAL;\n"
+			_, err = legacyDb.Exec(initialization)
+			if err != nil {
+				NoticeAlert("prepareMigrationEntries: sql.DB.Exec failed: %s", err)
+				return nil
+			}
+
+			iterator, err := newlegacyServerEntryIterator(config)
+			if err != nil {
+				NoticeAlert("prepareMigrationEntries: newlegacyServerEntryIterator failed: %s", err)
+				return nil
+			}
+			defer iterator.Close()
+
+			for {
+				serverEntry, err := iterator.Next()
+				if err != nil {
+					NoticeAlert("prepareMigrationEntries: legacyServerEntryIterator.Next failed: %s", err)
+					break
+				}
+				if serverEntry == nil {
+					break
+				}
+
+				migratableServerEntries = append(migratableServerEntries, serverEntry)
+			}
+			NoticeInfo("%d server entries prepared for data store migration", len(migratableServerEntries))
+		}
+	}
+
+	return migratableServerEntries
+}
+
+// migrateEntries calls the BoltDB data store method to shuffle
+// and store an array of server entries (StoreServerEntries)
+// Failing to migrate entries, or delete the legacy file is never fatal
+func migrateEntries(serverEntries []*ServerEntry, legacyDataStoreFilename string) {
+	checkInitDataStore()
+
+	err := StoreServerEntries(serverEntries, false)
+	if err != nil {
+		NoticeAlert("migrateEntries: StoreServerEntries failed: %s", err)
+	} else {
+		// Retain server affinity from old datastore by taking the first
+		// array element (previous top ranked server) and promoting it
+		// to the top rank before the server selection process begins
+		err = PromoteServerEntry(serverEntries[0].IpAddress)
+		if err != nil {
+			NoticeAlert("migrateEntries: PromoteServerEntry failed: %s", err)
+		}
+
+		NoticeAlert("%d server entries successfully migrated to new data store", len(serverEntries))
+	}
+
+	err = os.Remove(legacyDataStoreFilename)
+	if err != nil {
+		NoticeAlert("migrateEntries: failed to delete legacy data store file '%s': %s", legacyDataStoreFilename, err)
+	}
+
+	return
+}
+
+// This code is copied from the dataStore.go code used to operate the legacy
+// SQLite datastore. The word "legacy" was added to all of the method names to avoid
+// namespace conflicts with the methods used to operate the BoltDB datastore
+
+// legacyServerEntryIterator is used to iterate over
+// stored server entries in rank order.
+type legacyServerEntryIterator struct {
+	shuffleHeadLength int
+	transaction       *sql.Tx
+	cursor            *sql.Rows
+}
+
+// newLegacyServerEntryIterator creates a new legacyServerEntryIterator
+func newlegacyServerEntryIterator(config *Config) (iterator *legacyServerEntryIterator, err error) {
+
+	iterator = &legacyServerEntryIterator{
+		shuffleHeadLength: config.TunnelPoolSize,
+	}
+	err = iterator.Reset()
+	if err != nil {
+		return nil, err
+	}
+	return iterator, nil
+}
+
+// Close cleans up resources associated with a legacyServerEntryIterator.
+func (iterator *legacyServerEntryIterator) Close() {
+	if iterator.cursor != nil {
+		iterator.cursor.Close()
+	}
+	iterator.cursor = nil
+	if iterator.transaction != nil {
+		iterator.transaction.Rollback()
+	}
+	iterator.transaction = nil
+}
+
+// Next returns the next server entry, by rank, for a legacyServerEntryIterator.
+// Returns nil with no error when there is no next item.
+func (iterator *legacyServerEntryIterator) Next() (serverEntry *ServerEntry, err error) {
+	defer func() {
+		if err != nil {
+			iterator.Close()
+		}
+	}()
+
+	if !iterator.cursor.Next() {
+		err = iterator.cursor.Err()
+		if err != nil {
+			return nil, ContextError(err)
+		}
+		// There is no next item
+		return nil, nil
+	}
+
+	var data []byte
+	err = iterator.cursor.Scan(&data)
+	if err != nil {
+		return nil, ContextError(err)
+	}
+	serverEntry = new(ServerEntry)
+	err = json.Unmarshal(data, serverEntry)
+	if err != nil {
+		return nil, ContextError(err)
+	}
+
+	return MakeCompatibleServerEntry(serverEntry), nil
+}
+
+// Reset a NewlegacyServerEntryIterator to the start of its cycle. The next
+// call to Next will return the first server entry.
+func (iterator *legacyServerEntryIterator) Reset() error {
+	iterator.Close()
+
+	transaction, err := legacyDb.Begin()
+	if err != nil {
+		return ContextError(err)
+	}
+	var cursor *sql.Rows
+
+	// This query implements the Psiphon server candidate selection
+	// algorithm: the first TunnelPoolSize server candidates are in rank
+	// (priority) order, to favor previously successful servers; then the
+	// remaining long tail is shuffled to raise up less recent candidates.
+
+	whereClause, whereParams := makeServerEntryWhereClause(nil)
+	headLength := iterator.shuffleHeadLength
+	queryFormat := `
+		select data from serverEntry %s
+		order by case
+		when rank > coalesce((select rank from serverEntry %s order by rank desc limit ?, 1), -1) then rank
+		else abs(random())%%((select rank from serverEntry %s order by rank desc limit ?, 1))
+		end desc;`
+	query := fmt.Sprintf(queryFormat, whereClause, whereClause, whereClause)
+	params := make([]interface{}, 0)
+	params = append(params, whereParams...)
+	params = append(params, whereParams...)
+	params = append(params, headLength)
+	params = append(params, whereParams...)
+	params = append(params, headLength)
+
+	cursor, err = transaction.Query(query, params...)
+	if err != nil {
+		transaction.Rollback()
+		return ContextError(err)
+	}
+	iterator.transaction = transaction
+	iterator.cursor = cursor
+	return nil
+}
+
+func makeServerEntryWhereClause(excludeIds []string) (whereClause string, whereParams []interface{}) {
+	whereClause = ""
+	whereParams = make([]interface{}, 0)
+	if len(excludeIds) > 0 {
+		if len(whereClause) > 0 {
+			whereClause += " and"
+		} else {
+			whereClause += " where"
+		}
+		whereClause += " id in ("
+		for index, id := range excludeIds {
+			if index > 0 {
+				whereClause += ", "
+			}
+			whereClause += "?"
+			whereParams = append(whereParams, id)
+		}
+		whereClause += ")"
+	}
+	return whereClause, whereParams
+}

+ 61 - 0
psiphon/net.go

@@ -20,9 +20,12 @@
 package psiphon
 package psiphon
 
 
 import (
 import (
+	"crypto/x509"
 	"fmt"
 	"fmt"
 	"io"
 	"io"
 	"net"
 	"net"
+	"net/http"
+	"net/url"
 	"reflect"
 	"reflect"
 	"sync"
 	"sync"
 	"time"
 	"time"
@@ -241,3 +244,61 @@ func ResolveIP(host string, conn net.Conn) (addrs []net.IP, ttls []time.Duration
 	}
 	}
 	return addrs, ttls, nil
 	return addrs, ttls, nil
 }
 }
+
+// MakeUntunneledHttpsClient returns a net/http.Client which is
+// configured to use custom dialing features -- including BindToDevice,
+// UseIndistinguishableTLS, etc. -- for a specific HTTPS request URL.
+// If verifyLegacyCertificate is not nil, it's used for certificate
+// verification.
+// Because UseIndistinguishableTLS requires a hack to work with
+// net/http, MakeUntunneledHttpClient may return a modified request URL
+// to be used. Callers should always use this return value to make
+// requests, not the input value.
+func MakeUntunneledHttpsClient(
+	dialConfig *DialConfig,
+	verifyLegacyCertificate *x509.Certificate,
+	requestUrl string,
+	requestTimeout time.Duration) (*http.Client, string, error) {
+
+	dialer := NewCustomTLSDialer(
+		// Note: when verifyLegacyCertificate is not nil, some
+		// of the other CustomTLSConfig is overridden.
+		&CustomTLSConfig{
+			Dial: NewTCPDialer(dialConfig),
+			VerifyLegacyCertificate:       verifyLegacyCertificate,
+			SendServerName:                true,
+			SkipVerify:                    false,
+			UseIndistinguishableTLS:       dialConfig.UseIndistinguishableTLS,
+			TrustedCACertificatesFilename: dialConfig.TrustedCACertificatesFilename,
+		})
+
+	urlComponents, err := url.Parse(requestUrl)
+	if err != nil {
+		return nil, "", ContextError(err)
+	}
+
+	// Change the scheme to "http"; otherwise http.Transport will try to do
+	// another TLS handshake inside the explicit TLS session. Also need to
+	// force an explicit port, as the default for "http", 80, won't talk TLS.
+	urlComponents.Scheme = "http"
+	host, port, err := net.SplitHostPort(urlComponents.Host)
+	if err != nil {
+		// Assume there's no port
+		host = urlComponents.Host
+		port = ""
+	}
+	if port == "" {
+		port = "443"
+	}
+	urlComponents.Host = net.JoinHostPort(host, port)
+
+	transport := &http.Transport{
+		Dial: dialer,
+	}
+	httpClient := &http.Client{
+		Timeout:   requestTimeout,
+		Transport: transport,
+	}
+
+	return httpClient, urlComponents.String(), nil
+}

+ 79 - 0
psiphon/networkInterface.go

@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2015, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package psiphon
+
+import (
+	"net"
+)
+
+// Take in an interface name ("lo", "eth0", "any") passed from either
+// a config setting or by -interface command line flag and return the IP
+// address associated with it.
+// If no interface is provided use the default loopback interface (127.0.0.1).
+// If "any" is passed then listen on 0.0.0.0
+func GetInterfaceIPAddress(listenInterface string) (string, error) {
+	var ip net.IP
+
+	if listenInterface == "" {
+		ip = net.ParseIP("127.0.0.1")
+	} else if listenInterface == "any" {
+		ip = net.ParseIP("0.0.0.0")
+	} else {
+		//Get a list of interfaces
+		availableInterfaces, err := net.Interfaces()
+		if err != nil {
+			return "", ContextError(err)
+		}
+
+		var selectedInterface net.Interface
+		found := false
+		for _, networkInterface := range availableInterfaces {
+			if listenInterface == networkInterface.Name {
+				NoticeInfo("Using interface: %s", networkInterface.Name)
+				selectedInterface = networkInterface
+				found = true
+				break
+			}
+		}
+		if !found {
+			NoticeAlert("Interface not found: %s", listenInterface)
+			ip = net.ParseIP("127.0.0.1")
+		} else {
+			netAddrs, err := selectedInterface.Addrs()
+			if err != nil {
+				return "", ContextError(err)
+			}
+
+			for _, ipAddr := range netAddrs {
+				ip, _, err = net.ParseCIDR(ipAddr.String())
+				if err != nil {
+					return "", ContextError(err)
+				}
+				if ip.To4() != nil {
+					break
+				}
+			}
+		}
+	}
+
+	NoticeInfo("Listening on IP address: %s", ip.String())
+
+	return ip.String(), nil
+}

+ 15 - 7
psiphon/notice.go

@@ -91,12 +91,12 @@ func NoticeInfo(format string, args ...interface{}) {
 	outputNotice("Info", false, "message", fmt.Sprintf(format, args...))
 	outputNotice("Info", false, "message", fmt.Sprintf(format, args...))
 }
 }
 
 
-// NoticeInfo is an alert message; typically a recoverable error condition
+// NoticeAlert is an alert message; typically a recoverable error condition
 func NoticeAlert(format string, args ...interface{}) {
 func NoticeAlert(format string, args ...interface{}) {
 	outputNotice("Alert", false, "message", fmt.Sprintf(format, args...))
 	outputNotice("Alert", false, "message", fmt.Sprintf(format, args...))
 }
 }
 
 
-// NoticeInfo is an error message; typically an unrecoverable error condition
+// NoticeError is an error message; typically an unrecoverable error condition
 func NoticeError(format string, args ...interface{}) {
 func NoticeError(format string, args ...interface{}) {
 	outputNotice("Error", true, "message", fmt.Sprintf(format, args...))
 	outputNotice("Error", true, "message", fmt.Sprintf(format, args...))
 }
 }
@@ -124,8 +124,8 @@ func NoticeConnectingServer(ipAddress, region, protocol, frontingAddress string)
 }
 }
 
 
 // NoticeActiveTunnel is a successful connection that is used as an active tunnel for port forwarding
 // NoticeActiveTunnel is a successful connection that is used as an active tunnel for port forwarding
-func NoticeActiveTunnel(ipAddress string) {
-	outputNotice("ActiveTunnel", false, "ipAddress", ipAddress)
+func NoticeActiveTunnel(ipAddress, protocol string) {
+	outputNotice("ActiveTunnel", false, "ipAddress", ipAddress, "protocol", protocol)
 }
 }
 
 
 // NoticeSocksProxyPortInUse is a failure to use the configured LocalSocksProxyPort
 // NoticeSocksProxyPortInUse is a failure to use the configured LocalSocksProxyPort
@@ -201,9 +201,17 @@ func NoticeClientUpgradeDownloaded(filename string) {
 }
 }
 
 
 // NoticeBytesTransferred reports how many tunneled bytes have been
 // NoticeBytesTransferred reports how many tunneled bytes have been
-// transferred since the last NoticeBytesTransferred.
-func NoticeBytesTransferred(sent, received int64) {
-	outputNotice("BytesTransferred", false, "sent", sent, "received", received)
+// transferred since the last NoticeBytesTransferred, for the tunnel
+// to the server at ipAddress.
+func NoticeBytesTransferred(ipAddress string, sent, received int64) {
+	outputNotice("BytesTransferred", false, "ipAddress", ipAddress, "sent", sent, "received", received)
+}
+
+// NoticeTotalBytesTransferred reports how many tunneled bytes have been
+// transferred in total up to this point, for the tunnel to the server
+// at ipAddress.
+func NoticeTotalBytesTransferred(ipAddress string, sent, received int64) {
+	outputNotice("TotalBytesTransferred", false, "ipAddress", ipAddress, "sent", sent, "received", received)
 }
 }
 
 
 // NoticeLocalProxyError reports a local proxy error message. Repetitive
 // NoticeLocalProxyError reports a local proxy error message. Repetitive

+ 3 - 38
psiphon/remoteServerList.go

@@ -23,9 +23,7 @@ import (
 	"errors"
 	"errors"
 	"fmt"
 	"fmt"
 	"io/ioutil"
 	"io/ioutil"
-	"net"
 	"net/http"
 	"net/http"
-	"net/url"
 )
 )
 
 
 // FetchRemoteServerList downloads a remote server list JSON record from
 // FetchRemoteServerList downloads a remote server list JSON record from
@@ -42,46 +40,13 @@ func FetchRemoteServerList(config *Config, dialConfig *DialConfig) (err error) {
 		return ContextError(errors.New("remote server list signature public key blank"))
 		return ContextError(errors.New("remote server list signature public key blank"))
 	}
 	}
 
 
-	dialer := NewTCPDialer(dialConfig)
-
-	// When the URL is HTTPS, use the custom TLS dialer with the
-	// UseIndistinguishableTLS option.
-	// TODO: refactor into helper function
-	requestUrl, err := url.Parse(config.RemoteServerListUrl)
+	httpClient, requestUrl, err := MakeUntunneledHttpsClient(
+		dialConfig, nil, config.RemoteServerListUrl, FETCH_REMOTE_SERVER_LIST_TIMEOUT)
 	if err != nil {
 	if err != nil {
 		return ContextError(err)
 		return ContextError(err)
 	}
 	}
-	if requestUrl.Scheme == "https" {
-		dialer = NewCustomTLSDialer(
-			&CustomTLSConfig{
-				Dial:                          dialer,
-				SendServerName:                true,
-				SkipVerify:                    false,
-				UseIndistinguishableTLS:       config.UseIndistinguishableTLS,
-				TrustedCACertificatesFilename: config.TrustedCACertificatesFilename,
-			})
-
-		// Change the scheme to "http"; otherwise http.Transport will try to do
-		// another TLS handshake inside the explicit TLS session. Also need to
-		// force the port to 443,as the default for "http", 80, won't talk TLS.
-		requestUrl.Scheme = "http"
-		host, _, err := net.SplitHostPort(requestUrl.Host)
-		if err != nil {
-			// Assume there's no port
-			host = requestUrl.Host
-		}
-		requestUrl.Host = net.JoinHostPort(host, "443")
-	}
-
-	transport := &http.Transport{
-		Dial: dialer,
-	}
-	httpClient := http.Client{
-		Timeout:   FETCH_REMOTE_SERVER_LIST_TIMEOUT,
-		Transport: transport,
-	}
 
 
-	request, err := http.NewRequest("GET", requestUrl.String(), nil)
+	request, err := http.NewRequest("GET", requestUrl, nil)
 	if err != nil {
 	if err != nil {
 		return ContextError(err)
 		return ContextError(err)
 	}
 	}

+ 397 - 136
psiphon/serverApi.go

@@ -31,30 +31,39 @@ import (
 	"net"
 	"net"
 	"net/http"
 	"net/http"
 	"strconv"
 	"strconv"
+	"sync/atomic"
 
 
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/transferstats"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/transferstats"
 )
 )
 
 
-// Session is a utility struct which holds all of the data associated
-// with a Psiphon session. In addition to the established tunnel, this
-// includes the session ID (used for Psiphon API requests) and a http
+// ServerContext is a utility struct which holds all of the data associated
+// with a Psiphon server connection. In addition to the established tunnel, this
+// includes data associated with Psiphon API requests and a persistent http
 // client configured to make tunneled Psiphon API requests.
 // client configured to make tunneled Psiphon API requests.
-type Session struct {
-	sessionId            string
-	baseRequestUrl       string
-	psiphonHttpsClient   *http.Client
-	statsRegexps         *transferstats.Regexps
-	statsServerId        string
-	clientRegion         string
-	clientUpgradeVersion string
+type ServerContext struct {
+	sessionId                string
+	tunnelNumber             int64
+	baseRequestUrl           string
+	psiphonHttpsClient       *http.Client
+	statsRegexps             *transferstats.Regexps
+	clientRegion             string
+	clientUpgradeVersion     string
+	serverHandshakeTimestamp string
 }
 }
 
 
-// MakeSessionId creates a new session ID. Making the session ID is not done
-// in NewSession because:
-// (1) the transport needs to send the ID in the SSH credentials before the tunnel
-//     is established and NewSession performs a handshake on an established tunnel.
-// (2) the same session ID is used across multi-tunnel controller runs, where each
-//     tunnel has its own Session instance.
+// nextTunnelNumber is a monotonically increasing number assigned to each
+// successive tunnel connection. The sessionId and tunnelNumber together
+// form a globally unique identifier for tunnels, which is used for
+// stats. Note that the number is increasing but not necessarily
+// consecutive for each active tunnel in session.
+var nextTunnelNumber int64
+
+// MakeSessionId creates a new session ID. The same session ID is used across
+// multi-tunnel controller runs, where each tunnel has its own ServerContext
+// instance.
+// In server-side stats, we now consider a "session" to be the lifetime of the
+// Controller (e.g., the user's commanded start and stop) and we measure this
+// duration as well as the duration of each tunnel within the session.
 func MakeSessionId() (sessionId string, err error) {
 func MakeSessionId() (sessionId string, err error) {
 	randomId, err := MakeSecureRandomBytes(PSIPHON_API_CLIENT_SESSION_ID_LENGTH)
 	randomId, err := MakeSecureRandomBytes(PSIPHON_API_CLIENT_SESSION_ID_LENGTH)
 	if err != nil {
 	if err != nil {
@@ -63,115 +72,35 @@ func MakeSessionId() (sessionId string, err error) {
 	return hex.EncodeToString(randomId), nil
 	return hex.EncodeToString(randomId), nil
 }
 }
 
 
-// NewSession makes the tunnelled handshake request to the
-// Psiphon server and returns a Session struct, initialized with the
-// session ID, for use with subsequent Psiphon server API requests (e.g.,
-// periodic connected and status requests).
-func NewSession(config *Config, tunnel *Tunnel, sessionId string) (session *Session, err error) {
+// NewServerContext makes the tunnelled handshake request to the Psiphon server
+// and returns a ServerContext struct for use with subsequent Psiphon server API
+// requests (e.g., periodic connected and status requests).
+func NewServerContext(tunnel *Tunnel, sessionId string) (*ServerContext, error) {
 
 
 	psiphonHttpsClient, err := makePsiphonHttpsClient(tunnel)
 	psiphonHttpsClient, err := makePsiphonHttpsClient(tunnel)
 	if err != nil {
 	if err != nil {
 		return nil, ContextError(err)
 		return nil, ContextError(err)
 	}
 	}
-	session = &Session{
+
+	serverContext := &ServerContext{
 		sessionId:          sessionId,
 		sessionId:          sessionId,
-		baseRequestUrl:     makeBaseRequestUrl(config, tunnel, sessionId),
+		tunnelNumber:       atomic.AddInt64(&nextTunnelNumber, 1),
+		baseRequestUrl:     makeBaseRequestUrl(tunnel, "", sessionId),
 		psiphonHttpsClient: psiphonHttpsClient,
 		psiphonHttpsClient: psiphonHttpsClient,
-		statsServerId:      tunnel.serverEntry.IpAddress,
 	}
 	}
 
 
-	err = session.doHandshakeRequest()
+	err = serverContext.doHandshakeRequest()
 	if err != nil {
 	if err != nil {
 		return nil, ContextError(err)
 		return nil, ContextError(err)
 	}
 	}
 
 
-	return session, nil
-}
-
-// DoConnectedRequest performs the connected API request. This request is
-// used for statistics. The server returns a last_connected token for
-// the client to store and send next time it connects. This token is
-// a timestamp (using the server clock, and should be rounded to the
-// nearest hour) which is used to determine when a connection represents
-// a unique user for a time period.
-func (session *Session) DoConnectedRequest() error {
-	const DATA_STORE_LAST_CONNECTED_KEY = "lastConnected"
-	lastConnected, err := GetKeyValue(DATA_STORE_LAST_CONNECTED_KEY)
-	if err != nil {
-		return ContextError(err)
-	}
-	if lastConnected == "" {
-		lastConnected = "None"
-	}
-	url := session.buildRequestUrl(
-		"connected",
-		&ExtraParam{"session_id", session.sessionId},
-		&ExtraParam{"last_connected", lastConnected})
-	responseBody, err := session.doGetRequest(url)
-	if err != nil {
-		return ContextError(err)
-	}
-
-	var response struct {
-		ConnectedTimestamp string `json:"connected_timestamp"`
-	}
-	err = json.Unmarshal(responseBody, &response)
-	if err != nil {
-		return ContextError(err)
-	}
-
-	err = SetKeyValue(DATA_STORE_LAST_CONNECTED_KEY, response.ConnectedTimestamp)
-	if err != nil {
-		return ContextError(err)
-	}
-	return nil
-}
-
-// ServerID provides a unique identifier for the server the session connects to.
-// This ID is consistent between multiple sessions/tunnels connected to that server.
-func (session *Session) StatsServerID() string {
-	return session.statsServerId
-}
-
-// StatsRegexps gets the Regexps used for the statistics for this tunnel.
-func (session *Session) StatsRegexps() *transferstats.Regexps {
-	return session.statsRegexps
-}
-
-// DoStatusRequest makes a /status request to the server, sending session stats.
-func (session *Session) DoStatusRequest(statsPayload json.Marshaler) error {
-	statsPayloadJSON, err := json.Marshal(statsPayload)
-	if err != nil {
-		return ContextError(err)
-	}
-
-	// Add a random amount of padding to help prevent stats updates from being
-	// a predictable size (which often happens when the connection is quiet).
-	padding := MakeSecureRandomPadding(0, PSIPHON_API_STATUS_REQUEST_PADDING_MAX_BYTES)
-
-	// "connected" is a legacy parameter. This client does not report when
-	// it has disconnected.
-
-	url := session.buildRequestUrl(
-		"status",
-		&ExtraParam{"session_id", session.sessionId},
-		&ExtraParam{"connected", "1"},
-		// TODO: base64 encoding of padding means the padding
-		// size is not exactly [0, PADDING_MAX_BYTES]
-		&ExtraParam{"padding", base64.StdEncoding.EncodeToString(padding)})
-
-	err = session.doPostRequest(url, "application/json", bytes.NewReader(statsPayloadJSON))
-	if err != nil {
-		return ContextError(err)
-	}
-
-	return nil
+	return serverContext, nil
 }
 }
 
 
 // doHandshakeRequest performs the handshake API request. The handshake
 // doHandshakeRequest performs the handshake API request. The handshake
 // returns upgrade info, newly discovered server entries -- which are
 // returns upgrade info, newly discovered server entries -- which are
 // stored -- and sponsor info (home pages, stat regexes).
 // stored -- and sponsor info (home pages, stat regexes).
-func (session *Session) doHandshakeRequest() error {
+func (serverContext *ServerContext) doHandshakeRequest() error {
 	extraParams := make([]*ExtraParam, 0)
 	extraParams := make([]*ExtraParam, 0)
 	serverEntryIpAddresses, err := GetServerEntryIpAddresses()
 	serverEntryIpAddresses, err := GetServerEntryIpAddresses()
 	if err != nil {
 	if err != nil {
@@ -182,8 +111,8 @@ func (session *Session) doHandshakeRequest() error {
 	for _, ipAddress := range serverEntryIpAddresses {
 	for _, ipAddress := range serverEntryIpAddresses {
 		extraParams = append(extraParams, &ExtraParam{"known_server", ipAddress})
 		extraParams = append(extraParams, &ExtraParam{"known_server", ipAddress})
 	}
 	}
-	url := session.buildRequestUrl("handshake", extraParams...)
-	responseBody, err := session.doGetRequest(url)
+	url := buildRequestUrl(serverContext.baseRequestUrl, "handshake", extraParams...)
+	responseBody, err := serverContext.doGetRequest(url)
 	if err != nil {
 	if err != nil {
 		return ContextError(err)
 		return ContextError(err)
 	}
 	}
@@ -210,14 +139,15 @@ func (session *Session) doHandshakeRequest() error {
 		HttpsRequestRegexes  []map[string]string `json:"https_request_regexes"`
 		HttpsRequestRegexes  []map[string]string `json:"https_request_regexes"`
 		EncodedServerList    []string            `json:"encoded_server_list"`
 		EncodedServerList    []string            `json:"encoded_server_list"`
 		ClientRegion         string              `json:"client_region"`
 		ClientRegion         string              `json:"client_region"`
+		ServerTimestamp      string              `json:"server_timestamp"`
 	}
 	}
 	err = json.Unmarshal(configLine, &handshakeConfig)
 	err = json.Unmarshal(configLine, &handshakeConfig)
 	if err != nil {
 	if err != nil {
 		return ContextError(err)
 		return ContextError(err)
 	}
 	}
 
 
-	session.clientRegion = handshakeConfig.ClientRegion
-	NoticeClientRegion(session.clientRegion)
+	serverContext.clientRegion = handshakeConfig.ClientRegion
+	NoticeClientRegion(serverContext.clientRegion)
 
 
 	var decodedServerEntries []*ServerEntry
 	var decodedServerEntries []*ServerEntry
 
 
@@ -250,13 +180,13 @@ func (session *Session) doHandshakeRequest() error {
 		NoticeHomepage(homepage)
 		NoticeHomepage(homepage)
 	}
 	}
 
 
-	session.clientUpgradeVersion = handshakeConfig.UpgradeClientVersion
+	serverContext.clientUpgradeVersion = handshakeConfig.UpgradeClientVersion
 	if handshakeConfig.UpgradeClientVersion != "" {
 	if handshakeConfig.UpgradeClientVersion != "" {
 		NoticeClientUpgradeAvailable(handshakeConfig.UpgradeClientVersion)
 		NoticeClientUpgradeAvailable(handshakeConfig.UpgradeClientVersion)
 	}
 	}
 
 
 	var regexpsNotices []string
 	var regexpsNotices []string
-	session.statsRegexps, regexpsNotices = transferstats.MakeRegexps(
+	serverContext.statsRegexps, regexpsNotices = transferstats.MakeRegexps(
 		handshakeConfig.PageViewRegexes,
 		handshakeConfig.PageViewRegexes,
 		handshakeConfig.HttpsRequestRegexes)
 		handshakeConfig.HttpsRequestRegexes)
 
 
@@ -264,15 +194,345 @@ func (session *Session) doHandshakeRequest() error {
 		NoticeAlert(notice)
 		NoticeAlert(notice)
 	}
 	}
 
 
+	serverContext.serverHandshakeTimestamp = handshakeConfig.ServerTimestamp
+
+	return nil
+}
+
+// DoConnectedRequest performs the connected API request. This request is
+// used for statistics. The server returns a last_connected token for
+// the client to store and send next time it connects. This token is
+// a timestamp (using the server clock, and should be rounded to the
+// nearest hour) which is used to determine when a connection represents
+// a unique user for a time period.
+func (serverContext *ServerContext) DoConnectedRequest() error {
+	const DATA_STORE_LAST_CONNECTED_KEY = "lastConnected"
+	lastConnected, err := GetKeyValue(DATA_STORE_LAST_CONNECTED_KEY)
+	if err != nil {
+		return ContextError(err)
+	}
+	if lastConnected == "" {
+		lastConnected = "None"
+	}
+	url := buildRequestUrl(
+		serverContext.baseRequestUrl,
+		"connected",
+		&ExtraParam{"session_id", serverContext.sessionId},
+		&ExtraParam{"last_connected", lastConnected})
+	responseBody, err := serverContext.doGetRequest(url)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	var response struct {
+		ConnectedTimestamp string `json:"connected_timestamp"`
+	}
+	err = json.Unmarshal(responseBody, &response)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	err = SetKeyValue(DATA_STORE_LAST_CONNECTED_KEY, response.ConnectedTimestamp)
+	if err != nil {
+		return ContextError(err)
+	}
 	return nil
 	return nil
 }
 }
 
 
+// StatsRegexps gets the Regexps used for the statistics for this tunnel.
+func (serverContext *ServerContext) StatsRegexps() *transferstats.Regexps {
+	return serverContext.statsRegexps
+}
+
+// DoStatusRequest makes a /status request to the server, sending session stats.
+func (serverContext *ServerContext) DoStatusRequest(tunnel *Tunnel) error {
+
+	url := makeStatusRequestUrl(serverContext.sessionId, serverContext.baseRequestUrl, true)
+
+	payload, payloadInfo, err := makeStatusRequestPayload(tunnel.serverEntry.IpAddress)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	err = serverContext.doPostRequest(url, "application/json", bytes.NewReader(payload))
+	if err != nil {
+
+		// Resend the transfer stats and tunnel stats later
+		// Note: potential duplicate reports if the server received and processed
+		// the request but the client failed to receive the response.
+		putBackStatusRequestPayload(payloadInfo)
+
+		return ContextError(err)
+	}
+	confirmStatusRequestPayload(payloadInfo)
+
+	return nil
+}
+
+func makeStatusRequestUrl(sessionId, baseRequestUrl string, isTunneled bool) string {
+
+	// Add a random amount of padding to help prevent stats updates from being
+	// a predictable size (which often happens when the connection is quiet).
+	padding := MakeSecureRandomPadding(0, PSIPHON_API_STATUS_REQUEST_PADDING_MAX_BYTES)
+
+	// Legacy clients set "connected" to "0" when disconnecting, and this value
+	// is used to calculate session duration estimates. This is now superseded
+	// by explicit tunnel stats duration reporting.
+	// The legacy method of reconstructing session durations is not compatible
+	// with this client's connected request retries and asynchronous final
+	// status request attempts. So we simply set this "connected" flag to reflect
+	// whether the request is sent tunneled or not.
+
+	connected := "1"
+	if !isTunneled {
+		connected = "0"
+	}
+
+	return buildRequestUrl(
+		baseRequestUrl,
+		"status",
+		&ExtraParam{"session_id", sessionId},
+		&ExtraParam{"connected", connected},
+		// TODO: base64 encoding of padding means the padding
+		// size is not exactly [0, PADDING_MAX_BYTES]
+		&ExtraParam{"padding", base64.StdEncoding.EncodeToString(padding)})
+}
+
+// statusRequestPayloadInfo is a temporary structure for data used to
+// either "clear" or "put back" status request payload data depending
+// on whether or not the request succeeded.
+type statusRequestPayloadInfo struct {
+	serverId      string
+	transferStats *transferstats.AccumulatedStats
+	tunnelStats   [][]byte
+}
+
+func makeStatusRequestPayload(
+	serverId string) ([]byte, *statusRequestPayloadInfo, error) {
+
+	transferStats := transferstats.TakeOutStatsForServer(serverId)
+	tunnelStats, err := TakeOutUnreportedTunnelStats(
+		PSIPHON_API_TUNNEL_STATS_MAX_COUNT)
+	if err != nil {
+		NoticeAlert(
+			"TakeOutUnreportedTunnelStats failed: %s", ContextError(err))
+		tunnelStats = nil
+		// Proceed with transferStats only
+	}
+	payloadInfo := &statusRequestPayloadInfo{
+		serverId, transferStats, tunnelStats}
+
+	payload := make(map[string]interface{})
+
+	hostBytes, bytesTransferred := transferStats.GetStatsForStatusRequest()
+	payload["host_bytes"] = hostBytes
+	payload["bytes_transferred"] = bytesTransferred
+
+	// We're not recording these fields, but the server requires them.
+	payload["page_views"] = make([]string, 0)
+	payload["https_requests"] = make([]string, 0)
+
+	// Tunnel stats records are already in JSON format
+	jsonTunnelStats := make([]json.RawMessage, len(tunnelStats))
+	for i, tunnelStatsRecord := range tunnelStats {
+		jsonTunnelStats[i] = json.RawMessage(tunnelStatsRecord)
+	}
+	payload["tunnel_stats"] = jsonTunnelStats
+
+	jsonPayload, err := json.Marshal(payload)
+	if err != nil {
+
+		// Send the transfer stats and tunnel stats later
+		putBackStatusRequestPayload(payloadInfo)
+
+		return nil, nil, ContextError(err)
+	}
+
+	return jsonPayload, payloadInfo, nil
+}
+
+func putBackStatusRequestPayload(payloadInfo *statusRequestPayloadInfo) {
+	transferstats.PutBackStatsForServer(
+		payloadInfo.serverId, payloadInfo.transferStats)
+	err := PutBackUnreportedTunnelStats(payloadInfo.tunnelStats)
+	if err != nil {
+		// These tunnel stats records won't be resent under after a
+		// datastore re-initialization.
+		NoticeAlert(
+			"PutBackUnreportedTunnelStats failed: %s", ContextError(err))
+	}
+}
+
+func confirmStatusRequestPayload(payloadInfo *statusRequestPayloadInfo) {
+	err := ClearReportedTunnelStats(payloadInfo.tunnelStats)
+	if err != nil {
+		// These tunnel stats records may be resent.
+		NoticeAlert(
+			"ClearReportedTunnelStats failed: %s", ContextError(err))
+	}
+}
+
+// TryUntunneledStatusRequest makes direct connections to the specified
+// server (if supported) in an attempt to send useful bytes transferred
+// and tunnel duration stats after a tunnel has alreay failed.
+// The tunnel is assumed to be closed, but its config, protocol, and
+// context values must still be valid.
+// TryUntunneledStatusRequest emits notices detailing failed attempts.
+func TryUntunneledStatusRequest(tunnel *Tunnel, isShutdown bool) error {
+
+	for _, port := range tunnel.serverEntry.GetDirectWebRequestPorts() {
+		err := doUntunneledStatusRequest(tunnel, port, isShutdown)
+		if err == nil {
+			return nil
+		}
+		NoticeAlert("doUntunneledStatusRequest failed for %s:%s: %s",
+			tunnel.serverEntry.IpAddress, port, err)
+	}
+
+	return errors.New("all attempts failed")
+}
+
+// doUntunneledStatusRequest attempts an untunneled stratus request.
+func doUntunneledStatusRequest(
+	tunnel *Tunnel, port string, isShutdown bool) error {
+
+	url := makeStatusRequestUrl(
+		tunnel.serverContext.sessionId,
+		makeBaseRequestUrl(tunnel, port, tunnel.serverContext.sessionId),
+		false)
+
+	certificate, err := DecodeCertificate(tunnel.serverEntry.WebServerCertificate)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	timeout := PSIPHON_API_SERVER_TIMEOUT
+	if isShutdown {
+		timeout = PSIPHON_API_SHUTDOWN_SERVER_TIMEOUT
+	}
+
+	httpClient, requestUrl, err := MakeUntunneledHttpsClient(
+		tunnel.untunneledDialConfig,
+		certificate,
+		url,
+		timeout)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	payload, payloadInfo, err := makeStatusRequestPayload(tunnel.serverEntry.IpAddress)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	bodyType := "application/json"
+	body := bytes.NewReader(payload)
+
+	response, err := httpClient.Post(requestUrl, bodyType, body)
+	if err == nil && response.StatusCode != http.StatusOK {
+		response.Body.Close()
+		err = fmt.Errorf("HTTP POST request failed with response code: %d", response.StatusCode)
+	}
+	if err != nil {
+
+		// Resend the transfer stats and tunnel stats later
+		// Note: potential duplicate reports if the server received and processed
+		// the request but the client failed to receive the response.
+		putBackStatusRequestPayload(payloadInfo)
+
+		// Trim this error since it may include long URLs
+		return ContextError(TrimError(err))
+	}
+	confirmStatusRequestPayload(payloadInfo)
+	response.Body.Close()
+
+	return nil
+}
+
+// RecordTunnelStats records a tunnel duration and bytes
+// sent and received for subsequent reporting and quality
+// analysis.
+//
+// Tunnel durations are precisely measured client-side
+// and reported in status requests. As the duration is
+// not determined until the tunnel is closed, tunnel
+// stats records are stored in the persistent datastore
+// and reported via subsequent status requests sent to any
+// Psiphon server.
+//
+// Since the status request that reports a tunnel stats
+// record is not necessarily handled by the same server, the
+// tunnel stats records include the original server ID.
+//
+// Other fields that may change between tunnel stats recording
+// and reporting include client geo data, propagation channel,
+// sponsor ID, client version. These are not stored in the
+// datastore (client region, in particular, since that would
+// create an on-disk record of user location).
+// TODO: the server could encrypt, with a nonce and key unknown to
+// the client, a blob containing this data; return it in the
+// handshake response; and the client could store and later report
+// this blob with its tunnel stats records.
+//
+// Multiple "status" requests may be in flight at once (due
+// to multi-tunnel, asynchronous final status retry, and
+// aggressive status requests for pre-registered tunnels),
+// To avoid duplicate reporting, tunnel stats records are
+// "taken-out" by a status request and then "put back" in
+// case the request fails.
+//
+// Note: since tunnel stats records have a globally unique
+// identifier (sessionId + tunnelNumber), we could tolerate
+// duplicate reporting and filter our duplicates on the
+// server-side. Permitting duplicate reporting could increase
+// the velocity of reporting (for example, both the asynchronous
+// untunneled final status requests and the post-connected
+// immediate startus requests could try to report the same tunnel
+// stats).
+// Duplicate reporting may also occur when a server receives and
+// processes a status request but the client fails to receive
+// the response.
+func RecordTunnelStats(
+	sessionId string,
+	tunnelNumber int64,
+	tunnelServerIpAddress string,
+	serverHandshakeTimestamp, duration string,
+	totalBytesSent, totalBytesReceived int64) error {
+
+	tunnelStats := struct {
+		SessionId                string `json:"session_id"`
+		TunnelNumber             int64  `json:"tunnel_number"`
+		TunnelServerIpAddress    string `json:"tunnel_server_ip_address"`
+		ServerHandshakeTimestamp string `json:"server_handshake_timestamp"`
+		Duration                 string `json:"duration"`
+		TotalBytesSent           int64  `json:"total_bytes_sent"`
+		TotalBytesReceived       int64  `json:"total_bytes_received"`
+	}{
+		sessionId,
+		tunnelNumber,
+		tunnelServerIpAddress,
+		serverHandshakeTimestamp,
+		duration,
+		totalBytesSent,
+		totalBytesReceived,
+	}
+
+	tunnelStatsJson, err := json.Marshal(tunnelStats)
+	if err != nil {
+		return ContextError(err)
+	}
+
+	return StoreTunnelStats(tunnelStatsJson)
+}
+
 // doGetRequest makes a tunneled HTTPS request and returns the response body.
 // doGetRequest makes a tunneled HTTPS request and returns the response body.
-func (session *Session) doGetRequest(requestUrl string) (responseBody []byte, err error) {
-	response, err := session.psiphonHttpsClient.Get(requestUrl)
+func (serverContext *ServerContext) doGetRequest(
+	requestUrl string) (responseBody []byte, err error) {
+
+	response, err := serverContext.psiphonHttpsClient.Get(requestUrl)
 	if err == nil && response.StatusCode != http.StatusOK {
 	if err == nil && response.StatusCode != http.StatusOK {
 		response.Body.Close()
 		response.Body.Close()
-		err = fmt.Errorf("unexpected response status code: %d", response.StatusCode)
+		err = fmt.Errorf("HTTP GET request failed with response code: %d", response.StatusCode)
 	}
 	}
 	if err != nil {
 	if err != nil {
 		// Trim this error since it may include long URLs
 		// Trim this error since it may include long URLs
@@ -283,41 +543,42 @@ func (session *Session) doGetRequest(requestUrl string) (responseBody []byte, er
 	if err != nil {
 	if err != nil {
 		return nil, ContextError(err)
 		return nil, ContextError(err)
 	}
 	}
-	if response.StatusCode != http.StatusOK {
-		return nil, ContextError(fmt.Errorf("HTTP GET request failed with response code: %d", response.StatusCode))
-	}
 	return body, nil
 	return body, nil
 }
 }
 
 
 // doPostRequest makes a tunneled HTTPS POST request.
 // doPostRequest makes a tunneled HTTPS POST request.
-func (session *Session) doPostRequest(requestUrl string, bodyType string, body io.Reader) (err error) {
-	response, err := session.psiphonHttpsClient.Post(requestUrl, bodyType, body)
+func (serverContext *ServerContext) doPostRequest(
+	requestUrl string, bodyType string, body io.Reader) (err error) {
+
+	response, err := serverContext.psiphonHttpsClient.Post(requestUrl, bodyType, body)
 	if err == nil && response.StatusCode != http.StatusOK {
 	if err == nil && response.StatusCode != http.StatusOK {
 		response.Body.Close()
 		response.Body.Close()
-		err = fmt.Errorf("unexpected response status code: %d", response.StatusCode)
+		err = fmt.Errorf("HTTP POST request failed with response code: %d", response.StatusCode)
 	}
 	}
 	if err != nil {
 	if err != nil {
 		// Trim this error since it may include long URLs
 		// Trim this error since it may include long URLs
 		return ContextError(TrimError(err))
 		return ContextError(TrimError(err))
 	}
 	}
 	response.Body.Close()
 	response.Body.Close()
-	if response.StatusCode != http.StatusOK {
-		return ContextError(fmt.Errorf("HTTP POST request failed with response code: %d", response.StatusCode))
-	}
-	return
+	return nil
 }
 }
 
 
 // makeBaseRequestUrl makes a URL containing all the common parameters
 // makeBaseRequestUrl makes a URL containing all the common parameters
 // that are included with Psiphon API requests. These common parameters
 // that are included with Psiphon API requests. These common parameters
 // are used for statistics.
 // are used for statistics.
-func makeBaseRequestUrl(config *Config, tunnel *Tunnel, sessionId string) string {
+func makeBaseRequestUrl(tunnel *Tunnel, port, sessionId string) string {
 	var requestUrl bytes.Buffer
 	var requestUrl bytes.Buffer
+
+	if port == "" {
+		port = tunnel.serverEntry.WebServerPort
+	}
+
 	// Note: don't prefix with HTTPS scheme, see comment in doGetRequest.
 	// Note: don't prefix with HTTPS scheme, see comment in doGetRequest.
 	// e.g., don't do this: requestUrl.WriteString("https://")
 	// e.g., don't do this: requestUrl.WriteString("https://")
 	requestUrl.WriteString("http://")
 	requestUrl.WriteString("http://")
 	requestUrl.WriteString(tunnel.serverEntry.IpAddress)
 	requestUrl.WriteString(tunnel.serverEntry.IpAddress)
 	requestUrl.WriteString(":")
 	requestUrl.WriteString(":")
-	requestUrl.WriteString(tunnel.serverEntry.WebServerPort)
+	requestUrl.WriteString(port)
 	requestUrl.WriteString("/")
 	requestUrl.WriteString("/")
 	// Placeholder for the path component of a request
 	// Placeholder for the path component of a request
 	requestUrl.WriteString("%s")
 	requestUrl.WriteString("%s")
@@ -326,18 +587,18 @@ func makeBaseRequestUrl(config *Config, tunnel *Tunnel, sessionId string) string
 	requestUrl.WriteString("&server_secret=")
 	requestUrl.WriteString("&server_secret=")
 	requestUrl.WriteString(tunnel.serverEntry.WebServerSecret)
 	requestUrl.WriteString(tunnel.serverEntry.WebServerSecret)
 	requestUrl.WriteString("&propagation_channel_id=")
 	requestUrl.WriteString("&propagation_channel_id=")
-	requestUrl.WriteString(config.PropagationChannelId)
+	requestUrl.WriteString(tunnel.config.PropagationChannelId)
 	requestUrl.WriteString("&sponsor_id=")
 	requestUrl.WriteString("&sponsor_id=")
-	requestUrl.WriteString(config.SponsorId)
+	requestUrl.WriteString(tunnel.config.SponsorId)
 	requestUrl.WriteString("&client_version=")
 	requestUrl.WriteString("&client_version=")
-	requestUrl.WriteString(config.ClientVersion)
+	requestUrl.WriteString(tunnel.config.ClientVersion)
 	// TODO: client_tunnel_core_version
 	// TODO: client_tunnel_core_version
 	requestUrl.WriteString("&relay_protocol=")
 	requestUrl.WriteString("&relay_protocol=")
 	requestUrl.WriteString(tunnel.protocol)
 	requestUrl.WriteString(tunnel.protocol)
 	requestUrl.WriteString("&client_platform=")
 	requestUrl.WriteString("&client_platform=")
-	requestUrl.WriteString(config.ClientPlatform)
+	requestUrl.WriteString(tunnel.config.ClientPlatform)
 	requestUrl.WriteString("&tunnel_whole_device=")
 	requestUrl.WriteString("&tunnel_whole_device=")
-	requestUrl.WriteString(strconv.Itoa(config.TunnelWholeDevice))
+	requestUrl.WriteString(strconv.Itoa(tunnel.config.TunnelWholeDevice))
 	return requestUrl.String()
 	return requestUrl.String()
 }
 }
 
 
@@ -345,9 +606,9 @@ type ExtraParam struct{ name, value string }
 
 
 // buildRequestUrl makes a URL for an API request. The URL includes the
 // buildRequestUrl makes a URL for an API request. The URL includes the
 // base request URL and any extra parameters for the specific request.
 // base request URL and any extra parameters for the specific request.
-func (session *Session) buildRequestUrl(path string, extraParams ...*ExtraParam) string {
+func buildRequestUrl(baseRequestUrl, path string, extraParams ...*ExtraParam) string {
 	var requestUrl bytes.Buffer
 	var requestUrl bytes.Buffer
-	requestUrl.WriteString(fmt.Sprintf(session.baseRequestUrl, path))
+	requestUrl.WriteString(fmt.Sprintf(baseRequestUrl, path))
 	for _, extraParam := range extraParams {
 	for _, extraParam := range extraParams {
 		requestUrl.WriteString("&")
 		requestUrl.WriteString("&")
 		requestUrl.WriteString(extraParam.name)
 		requestUrl.WriteString(extraParam.name)

+ 14 - 0
psiphon/serverEntry.go

@@ -109,6 +109,20 @@ func (serverEntry *ServerEntry) DisableImpairedProtocols(impairedProtocols []str
 	serverEntry.Capabilities = capabilities
 	serverEntry.Capabilities = capabilities
 }
 }
 
 
+func (serverEntry *ServerEntry) GetDirectWebRequestPorts() []string {
+	ports := make([]string, 0)
+	if Contains(serverEntry.Capabilities, "handshake") {
+		// Server-side configuration quirk: there's a port forward from
+		// port 443 to the web server, which we can try, except on servers
+		// running FRONTED_MEEK, which listens on port 443.
+		if serverEntry.SupportsProtocol(TUNNEL_PROTOCOL_FRONTED_MEEK) {
+			ports = append(ports, "443")
+		}
+		ports = append(ports, serverEntry.WebServerPort)
+	}
+	return ports
+}
+
 // DecodeServerEntry extracts server entries from the encoding
 // DecodeServerEntry extracts server entries from the encoding
 // used by remote server lists and Psiphon server handshake requests.
 // used by remote server lists and Psiphon server handshake requests.
 func DecodeServerEntry(encodedServerEntry string) (serverEntry *ServerEntry, err error) {
 func DecodeServerEntry(encodedServerEntry string) (serverEntry *ServerEntry, err error) {

+ 6 - 2
psiphon/socksProxy.go

@@ -44,9 +44,13 @@ var _SOCKS_PROXY_TYPE = "SOCKS"
 // NewSocksProxy initializes a new SOCKS server. It begins listening for
 // NewSocksProxy initializes a new SOCKS server. It begins listening for
 // connections, starts a goroutine that runs an accept loop, and returns
 // connections, starts a goroutine that runs an accept loop, and returns
 // leaving the accept loop running.
 // leaving the accept loop running.
-func NewSocksProxy(config *Config, tunneler Tunneler) (proxy *SocksProxy, err error) {
+func NewSocksProxy(
+	config *Config,
+	tunneler Tunneler,
+	listenIP string) (proxy *SocksProxy, err error) {
+
 	listener, err := socks.ListenSocks(
 	listener, err := socks.ListenSocks(
-		"tcp", fmt.Sprintf("127.0.0.1:%d", config.LocalSocksProxyPort))
+		"tcp", fmt.Sprintf("%s:%d", listenIP, config.LocalSocksProxyPort))
 	if err != nil {
 	if err != nil {
 		if IsAddressInUseError(err) {
 		if IsAddressInUseError(err) {
 			NoticeSocksProxyPortInUse(config.LocalSocksProxyPort)
 			NoticeSocksProxyPortInUse(config.LocalSocksProxyPort)

+ 8 - 8
psiphon/splitTunnel.go

@@ -114,12 +114,12 @@ func (classifier *SplitTunnelClassifier) Start(fetchRoutesTunnel *Tunnel) {
 		return
 		return
 	}
 	}
 
 
-	if fetchRoutesTunnel.session == nil {
-		// Tunnel has no session
+	if fetchRoutesTunnel.serverContext == nil {
+		// Tunnel has no serverContext
 		return
 		return
 	}
 	}
 
 
-	if fetchRoutesTunnel.session.clientRegion == "" {
+	if fetchRoutesTunnel.serverContext.clientRegion == "" {
 		// Split tunnel region is unknown
 		// Split tunnel region is unknown
 		return
 		return
 	}
 	}
@@ -207,7 +207,7 @@ func (classifier *SplitTunnelClassifier) setRoutes(tunnel *Tunnel) {
 		return
 		return
 	}
 	}
 
 
-	NoticeSplitTunnelRegion(tunnel.session.clientRegion)
+	NoticeSplitTunnelRegion(tunnel.serverContext.clientRegion)
 }
 }
 
 
 // getRoutes makes a web request to download fresh routes data for the
 // getRoutes makes a web request to download fresh routes data for the
@@ -216,13 +216,13 @@ func (classifier *SplitTunnelClassifier) setRoutes(tunnel *Tunnel) {
 // fails and cached routes data is present, that cached data is returned.
 // fails and cached routes data is present, that cached data is returned.
 func (classifier *SplitTunnelClassifier) getRoutes(tunnel *Tunnel) (routesData []byte, err error) {
 func (classifier *SplitTunnelClassifier) getRoutes(tunnel *Tunnel) (routesData []byte, err error) {
 
 
-	url := fmt.Sprintf(classifier.fetchRoutesUrlFormat, tunnel.session.clientRegion)
+	url := fmt.Sprintf(classifier.fetchRoutesUrlFormat, tunnel.serverContext.clientRegion)
 	request, err := http.NewRequest("GET", url, nil)
 	request, err := http.NewRequest("GET", url, nil)
 	if err != nil {
 	if err != nil {
 		return nil, ContextError(err)
 		return nil, ContextError(err)
 	}
 	}
 
 
-	etag, err := GetSplitTunnelRoutesETag(tunnel.session.clientRegion)
+	etag, err := GetSplitTunnelRoutesETag(tunnel.serverContext.clientRegion)
 	if err != nil {
 	if err != nil {
 		return nil, ContextError(err)
 		return nil, ContextError(err)
 	}
 	}
@@ -310,7 +310,7 @@ func (classifier *SplitTunnelClassifier) getRoutes(tunnel *Tunnel) (routesData [
 	if !useCachedRoutes {
 	if !useCachedRoutes {
 		etag := response.Header.Get("ETag")
 		etag := response.Header.Get("ETag")
 		if etag != "" {
 		if etag != "" {
-			err := SetSplitTunnelRoutes(tunnel.session.clientRegion, etag, routesData)
+			err := SetSplitTunnelRoutes(tunnel.serverContext.clientRegion, etag, routesData)
 			if err != nil {
 			if err != nil {
 				NoticeAlert("failed to cache split tunnel routes: %s", ContextError(err))
 				NoticeAlert("failed to cache split tunnel routes: %s", ContextError(err))
 				// Proceed with fetched data, even when we can't cache it
 				// Proceed with fetched data, even when we can't cache it
@@ -319,7 +319,7 @@ func (classifier *SplitTunnelClassifier) getRoutes(tunnel *Tunnel) (routesData [
 	}
 	}
 
 
 	if useCachedRoutes {
 	if useCachedRoutes {
-		routesData, err = GetSplitTunnelRoutesData(tunnel.session.clientRegion)
+		routesData, err = GetSplitTunnelRoutesData(tunnel.serverContext.clientRegion)
 		if err != nil {
 		if err != nil {
 			return nil, ContextError(err)
 			return nil, ContextError(err)
 		}
 		}

+ 72 - 62
psiphon/transferstats/collector.go

@@ -20,7 +20,6 @@
 package transferstats
 package transferstats
 
 
 import (
 import (
-	"encoding/json"
 	"sync"
 	"sync"
 )
 )
 
 
@@ -40,21 +39,39 @@ type hostStats struct {
 	numBytesReceived int64
 	numBytesReceived int64
 }
 }
 
 
-func newHostStats() *hostStats {
-	return &hostStats{}
+// AccumulatedStats holds the Psiphon Server API status request data for a
+// given server. To accommodate status requests that may fail, and be retried,
+// the TakeOutStatsForServer/PutBackStatsForServer procedure allows the requester
+// to check out stats for reporting and merge back stats for a later retry.
+type AccumulatedStats struct {
+	hostnameToStats map[string]*hostStats
 }
 }
 
 
-// serverStats holds per-server stats.
-type serverStats struct {
-	hostnameToStats    map[string]*hostStats
-	totalBytesSent     int64
-	totalBytesReceived int64
-}
+// GetStatsForStatusRequest summarizes AccumulatedStats data as
+// required for the Psiphon Server API status request.
+func (stats AccumulatedStats) GetStatsForStatusRequest() (map[string]int64, int64) {
 
 
-func newServerStats() *serverStats {
-	return &serverStats{
-		hostnameToStats: make(map[string]*hostStats),
+	hostBytes := make(map[string]int64)
+	bytesTransferred := int64(0)
+
+	for hostname, hostStats := range stats.hostnameToStats {
+		totalBytes := hostStats.numBytesReceived + hostStats.numBytesSent
+		bytesTransferred += totalBytes
+		hostBytes[hostname] = totalBytes
 	}
 	}
+
+	return hostBytes, bytesTransferred
+}
+
+// serverStats holds per-server stats.
+// accumulatedStats data is payload for the Psiphon status request
+// which is accessed via TakeOut/PutBack.
+// recentBytes data is for tunnel monitoring which is accessed via
+// ReportRecentBytesTransferredForServer.
+type serverStats struct {
+	accumulatedStats    *AccumulatedStats
+	recentBytesSent     int64
+	recentBytesReceived int64
 }
 }
 
 
 // allStats is the root object that holds stats for all servers and all hosts,
 // allStats is the root object that holds stats for all servers and all hosts,
@@ -73,10 +90,9 @@ type statsUpdate struct {
 }
 }
 
 
 // recordStats makes sure the given stats update is added to the global
 // recordStats makes sure the given stats update is added to the global
-// collection. Guaranteed to not block.
-// Callers of this function should assume that it "takes control" of the
-// statsUpdate object.
-func recordStat(stat *statsUpdate) {
+// collection. recentBytes are not adjusted when isPutBack is true,
+// as recentBytes aren't subject to TakeOut/PutBack.
+func recordStat(stat *statsUpdate, isPutBack bool) {
 	allStats.statsMutex.Lock()
 	allStats.statsMutex.Lock()
 	defer allStats.statsMutex.Unlock()
 	defer allStats.statsMutex.Unlock()
 
 
@@ -86,51 +102,31 @@ func recordStat(stat *statsUpdate) {
 
 
 	storedServerStats := allStats.serverIDtoStats[stat.serverID]
 	storedServerStats := allStats.serverIDtoStats[stat.serverID]
 	if storedServerStats == nil {
 	if storedServerStats == nil {
-		storedServerStats = newServerStats()
+		storedServerStats = &serverStats{
+			accumulatedStats: &AccumulatedStats{
+				hostnameToStats: make(map[string]*hostStats)}}
 		allStats.serverIDtoStats[stat.serverID] = storedServerStats
 		allStats.serverIDtoStats[stat.serverID] = storedServerStats
 	}
 	}
 
 
-	storedHostStats := storedServerStats.hostnameToStats[stat.hostname]
+	storedHostStats := storedServerStats.accumulatedStats.hostnameToStats[stat.hostname]
 	if storedHostStats == nil {
 	if storedHostStats == nil {
-		storedHostStats = newHostStats()
-		storedServerStats.hostnameToStats[stat.hostname] = storedHostStats
+		storedHostStats = &hostStats{}
+		storedServerStats.accumulatedStats.hostnameToStats[stat.hostname] = storedHostStats
 	}
 	}
 
 
-	storedServerStats.totalBytesSent += stat.numBytesSent
-	storedServerStats.totalBytesReceived += stat.numBytesReceived
-
 	storedHostStats.numBytesSent += stat.numBytesSent
 	storedHostStats.numBytesSent += stat.numBytesSent
 	storedHostStats.numBytesReceived += stat.numBytesReceived
 	storedHostStats.numBytesReceived += stat.numBytesReceived
 
 
-	//fmt.Println("server:", stat.serverID, "host:", stat.hostname, "sent:", storedHostStats.numBytesSent, "received:", storedHostStats.numBytesReceived)
-}
-
-// Implement the json.Marshaler interface
-func (ss serverStats) MarshalJSON() ([]byte, error) {
-	out := make(map[string]interface{})
-
-	hostBytes := make(map[string]int64)
-	bytesTransferred := int64(0)
-
-	for hostname, hostStats := range ss.hostnameToStats {
-		totalBytes := hostStats.numBytesReceived + hostStats.numBytesSent
-		bytesTransferred += totalBytes
-		hostBytes[hostname] = totalBytes
+	if !isPutBack {
+		storedServerStats.recentBytesSent += stat.numBytesSent
+		storedServerStats.recentBytesReceived += stat.numBytesReceived
 	}
 	}
-
-	out["bytes_transferred"] = bytesTransferred
-	out["host_bytes"] = hostBytes
-
-	// We're not using these fields, but the server requires them
-	out["page_views"] = make([]string, 0)
-	out["https_requests"] = make([]string, 0)
-
-	return json.Marshal(out)
 }
 }
 
 
-// GetBytesTransferredForServer returns total bytes sent and received since
-// the last call to GetBytesTransferredForServer.
-func GetBytesTransferredForServer(serverID string) (sent, received int64) {
+// ReportRecentBytesTransferredForServer returns bytes sent and received since
+// the last call to ReportRecentBytesTransferredForServer. The accumulated sent
+// and received are reset to 0 by this call.
+func ReportRecentBytesTransferredForServer(serverID string) (sent, received int64) {
 	allStats.statsMutex.Lock()
 	allStats.statsMutex.Lock()
 	defer allStats.statsMutex.Unlock()
 	defer allStats.statsMutex.Unlock()
 
 
@@ -140,35 +136,49 @@ func GetBytesTransferredForServer(serverID string) (sent, received int64) {
 		return
 		return
 	}
 	}
 
 
-	sent = stats.totalBytesSent
-	received = stats.totalBytesReceived
+	sent = stats.recentBytesSent
+	received = stats.recentBytesReceived
 
 
-	stats.totalBytesSent = 0
-	stats.totalBytesReceived = 0
+	stats.recentBytesSent = 0
+	stats.recentBytesReceived = 0
 
 
 	return
 	return
 }
 }
 
 
-// GetForServer returns the json-able stats package for the given server.
-// If there are no stats, nil will be returned.
-func GetForServer(serverID string) (payload *serverStats) {
+// TakeOutStatsForServer borrows the AccumulatedStats for the specified
+// server. When we fail to report these stats, resubmit them with
+// PutBackStatsForServer. Stats will continue to be accumulated between
+// TakeOut and PutBack calls. The recentBytes values are unaffected by
+// TakeOut/PutBack. Returns empty stats if the serverID is not found.
+func TakeOutStatsForServer(serverID string) (accumulatedStats *AccumulatedStats) {
 	allStats.statsMutex.Lock()
 	allStats.statsMutex.Lock()
 	defer allStats.statsMutex.Unlock()
 	defer allStats.statsMutex.Unlock()
 
 
-	payload = allStats.serverIDtoStats[serverID]
-	delete(allStats.serverIDtoStats, serverID)
+	newAccumulatedStats := &AccumulatedStats{
+		hostnameToStats: make(map[string]*hostStats)}
+
+	// Note: for an existing serverStats, only the accumulatedStats is
+	// affected; the recentBytes fields are not changed.
+	serverStats := allStats.serverIDtoStats[serverID]
+	if serverStats != nil {
+		accumulatedStats = serverStats.accumulatedStats
+		serverStats.accumulatedStats = newAccumulatedStats
+	} else {
+		accumulatedStats = newAccumulatedStats
+	}
 	return
 	return
 }
 }
 
 
-// PutBack re-adds a set of server stats to the collection.
-func PutBack(serverID string, ss *serverStats) {
-	for hostname, hoststats := range ss.hostnameToStats {
+// PutBackStatsForServer re-adds a set of server stats to the collection.
+func PutBackStatsForServer(serverID string, accumulatedStats *AccumulatedStats) {
+	for hostname, hoststats := range accumulatedStats.hostnameToStats {
 		recordStat(
 		recordStat(
 			&statsUpdate{
 			&statsUpdate{
 				serverID:         serverID,
 				serverID:         serverID,
 				hostname:         hostname,
 				hostname:         hostname,
 				numBytesSent:     hoststats.numBytesSent,
 				numBytesSent:     hoststats.numBytesSent,
 				numBytesReceived: hoststats.numBytesReceived,
 				numBytesReceived: hoststats.numBytesReceived,
-			})
+			},
+			true)
 	}
 	}
 }
 }

+ 6 - 4
psiphon/transferstats/conn.go

@@ -47,8 +47,8 @@ type Conn struct {
 }
 }
 
 
 // NewConn creates a Conn. serverID can be anything that uniquely
 // NewConn creates a Conn. serverID can be anything that uniquely
-// identifies the server; it will be passed to GetForServer() when retrieving
-// the accumulated stats.
+// identifies the server; it will be passed to TakeOutStatsForServer() when
+// retrieving the accumulated stats.
 func NewConn(nextConn net.Conn, serverID string, regexps *Regexps) *Conn {
 func NewConn(nextConn net.Conn, serverID string, regexps *Regexps) *Conn {
 	return &Conn{
 	return &Conn{
 		Conn:           nextConn,
 		Conn:           nextConn,
@@ -85,7 +85,8 @@ func (conn *Conn) Write(buffer []byte) (n int, err error) {
 			conn.serverID,
 			conn.serverID,
 			conn.hostname,
 			conn.hostname,
 			int64(n),
 			int64(n),
-			0})
+			0},
+			false)
 	}
 	}
 
 
 	return
 	return
@@ -108,7 +109,8 @@ func (conn *Conn) Read(buffer []byte) (n int, err error) {
 		conn.serverID,
 		conn.serverID,
 		hostname,
 		hostname,
 		0,
 		0,
-		int64(n)})
+		int64(n)},
+		false)
 
 
 	return
 	return
 }
 }

+ 6 - 4
psiphon/transferstats/regexp.go

@@ -71,10 +71,12 @@ func MakeRegexps(pageViewRegexes, httpsRequestRegexes []map[string]string) (rege
 // string that should be used for stats.
 // string that should be used for stats.
 func regexHostname(hostname string, regexps *Regexps) (statsHostname string) {
 func regexHostname(hostname string, regexps *Regexps) (statsHostname string) {
 	statsHostname = "(OTHER)"
 	statsHostname = "(OTHER)"
-	for _, rr := range *regexps {
-		if rr.regexp.MatchString(hostname) {
-			statsHostname = rr.regexp.ReplaceAllString(hostname, rr.replace)
-			break
+	if regexps != nil {
+		for _, rr := range *regexps {
+			if rr.regexp.MatchString(hostname) {
+				statsHostname = rr.regexp.ReplaceAllString(hostname, rr.replace)
+				break
+			}
 		}
 		}
 	}
 	}
 	return
 	return

+ 22 - 18
psiphon/transferstats/transferstats_test.go

@@ -91,21 +91,23 @@ func (suite *StatsTestSuite) Test_StatsConn() {
 	resp, err = suite.httpClient.Get("https://example.org/index.html")
 	resp, err = suite.httpClient.Get("https://example.org/index.html")
 	suite.Nil(err, "basic HTTPS requests should succeed")
 	suite.Nil(err, "basic HTTPS requests should succeed")
 	resp.Body.Close()
 	resp.Body.Close()
+
+	// Clear out stats
+	_ = TakeOutStatsForServer(_SERVER_ID)
 }
 }
 
 
-func (suite *StatsTestSuite) Test_GetForServer() {
-	payload := GetForServer(_SERVER_ID)
-	suite.Nil(payload, "should get nil stats before any traffic (but not crash)")
+func (suite *StatsTestSuite) Test_TakeOutStatsForServer() {
+
+	zeroPayload := &AccumulatedStats{hostnameToStats: make(map[string]*hostStats)}
+
+	payload := TakeOutStatsForServer(_SERVER_ID)
+	suite.Equal(payload, zeroPayload, "should get zero stats before any traffic")
 
 
 	resp, err := suite.httpClient.Get("http://example.com/index.html")
 	resp, err := suite.httpClient.Get("http://example.com/index.html")
 	suite.Nil(err, "need successful http to proceed with tests")
 	suite.Nil(err, "need successful http to proceed with tests")
 	resp.Body.Close()
 	resp.Body.Close()
 
 
-	// Make sure there aren't stats returned for a bad server ID
-	payload = GetForServer("INVALID")
-	suite.Nil(payload, "should get nil stats for invalid server ID")
-
-	payload = GetForServer(_SERVER_ID)
+	payload = TakeOutStatsForServer(_SERVER_ID)
 	suite.NotNil(payload, "should receive valid payload for valid server ID")
 	suite.NotNil(payload, "should receive valid payload for valid server ID")
 
 
 	payloadJSON, err := json.Marshal(payload)
 	payloadJSON, err := json.Marshal(payload)
@@ -114,27 +116,29 @@ func (suite *StatsTestSuite) Test_GetForServer() {
 	suite.Nil(err, "payload JSON should parse successfully")
 	suite.Nil(err, "payload JSON should parse successfully")
 
 
 	// After we retrieve the stats for a server, they should be cleared out of the tracked stats
 	// After we retrieve the stats for a server, they should be cleared out of the tracked stats
-	payload = GetForServer(_SERVER_ID)
-	suite.Nil(payload, "after retrieving stats for a server, there should be no more stats (until more data goes through)")
+	payload = TakeOutStatsForServer(_SERVER_ID)
+	suite.Equal(payload, zeroPayload, "after retrieving stats for a server, there should be zero stats (until more data goes through)")
 }
 }
 
 
-func (suite *StatsTestSuite) Test_PutBack() {
+func (suite *StatsTestSuite) Test_PutBackStatsForServer() {
 	resp, err := suite.httpClient.Get("http://example.com/index.html")
 	resp, err := suite.httpClient.Get("http://example.com/index.html")
 	suite.Nil(err, "need successful http to proceed with tests")
 	suite.Nil(err, "need successful http to proceed with tests")
 	resp.Body.Close()
 	resp.Body.Close()
 
 
-	payloadToPutBack := GetForServer(_SERVER_ID)
+	payloadToPutBack := TakeOutStatsForServer(_SERVER_ID)
 	suite.NotNil(payloadToPutBack, "should receive valid payload for valid server ID")
 	suite.NotNil(payloadToPutBack, "should receive valid payload for valid server ID")
 
 
-	payload := GetForServer(_SERVER_ID)
-	suite.Nil(payload, "should not be any remaining stats after getting them")
+	zeroPayload := &AccumulatedStats{hostnameToStats: make(map[string]*hostStats)}
+
+	payload := TakeOutStatsForServer(_SERVER_ID)
+	suite.Equal(payload, zeroPayload, "should be zero stats after getting them")
 
 
-	PutBack(_SERVER_ID, payloadToPutBack)
+	PutBackStatsForServer(_SERVER_ID, payloadToPutBack)
 	// PutBack is asynchronous, so we'll need to wait a moment for it to do its thing
 	// PutBack is asynchronous, so we'll need to wait a moment for it to do its thing
 	<-time.After(100 * time.Millisecond)
 	<-time.After(100 * time.Millisecond)
 
 
-	payload = GetForServer(_SERVER_ID)
-	suite.NotNil(payload, "stats should be re-added after putting back")
+	payload = TakeOutStatsForServer(_SERVER_ID)
+	suite.NotEqual(payload, zeroPayload, "stats should be re-added after putting back")
 	suite.Equal(payload, payloadToPutBack, "stats should be the same as after the first retrieval")
 	suite.Equal(payload, payloadToPutBack, "stats should be the same as after the first retrieval")
 }
 }
 
 
@@ -216,7 +220,7 @@ func (suite *StatsTestSuite) Test_Regex() {
 		suite.Nil(err)
 		suite.Nil(err)
 		resp.Body.Close()
 		resp.Body.Close()
 
 
-		payload := GetForServer(_SERVER_ID)
+		payload := TakeOutStatsForServer(_SERVER_ID)
 		suite.NotNil(payload, "should get stats because we made HTTP reqs; %s", scheme)
 		suite.NotNil(payload, "should get stats because we made HTTP reqs; %s", scheme)
 
 
 		expectedHostnames := mapset.NewSet()
 		expectedHostnames := mapset.NewSet()

+ 291 - 110
psiphon/tunnel.go

@@ -63,17 +63,20 @@ type TunnelOwner interface {
 // and an SSH session built on top of that transport.
 // and an SSH session built on top of that transport.
 type Tunnel struct {
 type Tunnel struct {
 	mutex                    *sync.Mutex
 	mutex                    *sync.Mutex
+	config                   *Config
+	untunneledDialConfig     *DialConfig
+	isDiscarded              bool
 	isClosed                 bool
 	isClosed                 bool
 	serverEntry              *ServerEntry
 	serverEntry              *ServerEntry
-	session                  *Session
+	serverContext            *ServerContext
 	protocol                 string
 	protocol                 string
 	conn                     net.Conn
 	conn                     net.Conn
 	sshClient                *ssh.Client
 	sshClient                *ssh.Client
 	operateWaitGroup         *sync.WaitGroup
 	operateWaitGroup         *sync.WaitGroup
 	shutdownOperateBroadcast chan struct{}
 	shutdownOperateBroadcast chan struct{}
-	portForwardFailures      chan int
-	portForwardFailureTotal  int
-	sessionStartTime         time.Time
+	signalPortForwardFailure chan struct{}
+	totalPortForwardFailures int
+	startTime                time.Time
 }
 }
 
 
 // EstablishTunnel first makes a network transport connection to the
 // EstablishTunnel first makes a network transport connection to the
@@ -85,8 +88,10 @@ type Tunnel struct {
 // HTTP (meek protocol).
 // HTTP (meek protocol).
 // When requiredProtocol is not blank, that protocol is used. Otherwise,
 // When requiredProtocol is not blank, that protocol is used. Otherwise,
 // the a random supported protocol is used.
 // the a random supported protocol is used.
+// untunneledDialConfig is used for untunneled final status requests.
 func EstablishTunnel(
 func EstablishTunnel(
 	config *Config,
 	config *Config,
+	untunneledDialConfig *DialConfig,
 	sessionId string,
 	sessionId string,
 	pendingConns *Conns,
 	pendingConns *Conns,
 	serverEntry *ServerEntry,
 	serverEntry *ServerEntry,
@@ -115,6 +120,8 @@ func EstablishTunnel(
 	// The tunnel is now connected
 	// The tunnel is now connected
 	tunnel = &Tunnel{
 	tunnel = &Tunnel{
 		mutex:                    new(sync.Mutex),
 		mutex:                    new(sync.Mutex),
+		config:                   config,
+		untunneledDialConfig:     untunneledDialConfig,
 		isClosed:                 false,
 		isClosed:                 false,
 		serverEntry:              serverEntry,
 		serverEntry:              serverEntry,
 		protocol:                 selectedProtocol,
 		protocol:                 selectedProtocol,
@@ -122,45 +129,44 @@ func EstablishTunnel(
 		sshClient:                sshClient,
 		sshClient:                sshClient,
 		operateWaitGroup:         new(sync.WaitGroup),
 		operateWaitGroup:         new(sync.WaitGroup),
 		shutdownOperateBroadcast: make(chan struct{}),
 		shutdownOperateBroadcast: make(chan struct{}),
-		// portForwardFailures buffer size is large enough to receive the thresold number
-		// of failure reports without blocking. Senders can drop failures without blocking.
-		portForwardFailures: make(chan int, config.PortForwardFailureThreshold)}
+		// A buffer allows at least one signal to be sent even when the receiver is
+		// not listening. Senders should not block.
+		signalPortForwardFailure: make(chan struct{}, 1),
+	}
 
 
-	// Create a new Psiphon API session for this tunnel. This includes performing
-	// a handshake request. If the handshake fails, this establishment fails.
-	//
-	// TODO: as long as the servers are not enforcing that a client perform a handshake,
-	// proceed with this tunnel as long as at least one previous handhake succeeded?
-	//
+	// Create a new Psiphon API server context for this tunnel. This includes
+	// performing a handshake request. If the handshake fails, this establishment
+	// fails.
 	if !config.DisableApi {
 	if !config.DisableApi {
-		NoticeInfo("starting session for %s", tunnel.serverEntry.IpAddress)
-		tunnel.session, err = NewSession(config, tunnel, sessionId)
+		NoticeInfo("starting server context for %s", tunnel.serverEntry.IpAddress)
+		tunnel.serverContext, err = NewServerContext(tunnel, sessionId)
 		if err != nil {
 		if err != nil {
-			return nil, ContextError(fmt.Errorf("error starting session for %s: %s", tunnel.serverEntry.IpAddress, err))
+			return nil, ContextError(
+				fmt.Errorf("error starting server context for %s: %s",
+					tunnel.serverEntry.IpAddress, err))
 		}
 		}
 	}
 	}
 
 
-	tunnel.sessionStartTime = time.Now()
+	tunnel.startTime = time.Now()
 
 
 	// Now that network operations are complete, cancel interruptibility
 	// Now that network operations are complete, cancel interruptibility
 	pendingConns.Remove(conn)
 	pendingConns.Remove(conn)
 
 
-	// Promote this successful tunnel to first rank so it's one
-	// of the first candidates next time establish runs.
-	PromoteServerEntry(tunnel.serverEntry.IpAddress)
-
 	// Spawn the operateTunnel goroutine, which monitors the tunnel and handles periodic stats updates.
 	// Spawn the operateTunnel goroutine, which monitors the tunnel and handles periodic stats updates.
 	tunnel.operateWaitGroup.Add(1)
 	tunnel.operateWaitGroup.Add(1)
-	go tunnel.operateTunnel(config, tunnelOwner)
+	go tunnel.operateTunnel(tunnelOwner)
 
 
 	return tunnel, nil
 	return tunnel, nil
 }
 }
 
 
 // Close stops operating the tunnel and closes the underlying connection.
 // Close stops operating the tunnel and closes the underlying connection.
 // Supports multiple and/or concurrent calls to Close().
 // Supports multiple and/or concurrent calls to Close().
-func (tunnel *Tunnel) Close() {
+// When isDicarded is set, operateTunnel will not attempt to send final
+// status requests.
+func (tunnel *Tunnel) Close(isDiscarded bool) {
 
 
 	tunnel.mutex.Lock()
 	tunnel.mutex.Lock()
+	tunnel.isDiscarded = isDiscarded
 	isClosed := tunnel.isClosed
 	isClosed := tunnel.isClosed
 	tunnel.isClosed = true
 	tunnel.isClosed = true
 	tunnel.mutex.Unlock()
 	tunnel.mutex.Unlock()
@@ -184,6 +190,13 @@ func (tunnel *Tunnel) Close() {
 	}
 	}
 }
 }
 
 
+// IsDiscarded returns the tunnel's discarded flag.
+func (tunnel *Tunnel) IsDiscarded() bool {
+	tunnel.mutex.Lock()
+	defer tunnel.mutex.Unlock()
+	return tunnel.isDiscarded
+}
+
 // Dial establishes a port forward connection through the tunnel
 // Dial establishes a port forward connection through the tunnel
 // This Dial doesn't support split tunnel, so alwaysTunnel is not referenced
 // This Dial doesn't support split tunnel, so alwaysTunnel is not referenced
 func (tunnel *Tunnel) Dial(
 func (tunnel *Tunnel) Dial(
@@ -214,7 +227,7 @@ func (tunnel *Tunnel) Dial(
 	if result.err != nil {
 	if result.err != nil {
 		// TODO: conditional on type of error or error message?
 		// TODO: conditional on type of error or error message?
 		select {
 		select {
-		case tunnel.portForwardFailures <- 1:
+		case tunnel.signalPortForwardFailure <- *new(struct{}):
 		default:
 		default:
 		}
 		}
 		return nil, ContextError(result.err)
 		return nil, ContextError(result.err)
@@ -225,11 +238,14 @@ func (tunnel *Tunnel) Dial(
 		tunnel:         tunnel,
 		tunnel:         tunnel,
 		downstreamConn: downstreamConn}
 		downstreamConn: downstreamConn}
 
 
-	// Tunnel does not have a session when DisableApi is set
-	if tunnel.session != nil {
-		conn = transferstats.NewConn(
-			conn, tunnel.session.StatsServerID(), tunnel.session.StatsRegexps())
+	// Tunnel does not have a serverContext when DisableApi is set. We still use
+	// transferstats.Conn to count bytes transferred for monitoring tunnel
+	// quality.
+	var regexps *transferstats.Regexps
+	if tunnel.serverContext != nil {
+		regexps = tunnel.serverContext.StatsRegexps()
 	}
 	}
+	conn = transferstats.NewConn(conn, tunnel.serverEntry.IpAddress, regexps)
 
 
 	return conn, nil
 	return conn, nil
 }
 }
@@ -238,7 +254,7 @@ func (tunnel *Tunnel) Dial(
 // This will terminate the tunnel.
 // This will terminate the tunnel.
 func (tunnel *Tunnel) SignalComponentFailure() {
 func (tunnel *Tunnel) SignalComponentFailure() {
 	NoticeAlert("tunnel received component failure signal")
 	NoticeAlert("tunnel received component failure signal")
-	tunnel.Close()
+	tunnel.Close(false)
 }
 }
 
 
 // TunneledConn implements net.Conn and wraps a port foward connection.
 // TunneledConn implements net.Conn and wraps a port foward connection.
@@ -255,11 +271,11 @@ type TunneledConn struct {
 func (conn *TunneledConn) Read(buffer []byte) (n int, err error) {
 func (conn *TunneledConn) Read(buffer []byte) (n int, err error) {
 	n, err = conn.Conn.Read(buffer)
 	n, err = conn.Conn.Read(buffer)
 	if err != nil && err != io.EOF {
 	if err != nil && err != io.EOF {
-		// Report 1 new failure. Won't block; assumes the receiver
+		// Report new failure. Won't block; assumes the receiver
 		// has a sufficient buffer for the threshold number of reports.
 		// has a sufficient buffer for the threshold number of reports.
 		// TODO: conditional on type of error or error message?
 		// TODO: conditional on type of error or error message?
 		select {
 		select {
-		case conn.tunnel.portForwardFailures <- 1:
+		case conn.tunnel.signalPortForwardFailure <- *new(struct{}):
 		default:
 		default:
 		}
 		}
 	}
 	}
@@ -271,7 +287,7 @@ func (conn *TunneledConn) Write(buffer []byte) (n int, err error) {
 	if err != nil && err != io.EOF {
 	if err != nil && err != io.EOF {
 		// Same as TunneledConn.Read()
 		// Same as TunneledConn.Read()
 		select {
 		select {
-		case conn.tunnel.portForwardFailures <- 1:
+		case conn.tunnel.signalPortForwardFailure <- *new(struct{}):
 		default:
 		default:
 		}
 		}
 	}
 	}
@@ -485,121 +501,261 @@ func dialSsh(
 	return conn, result.sshClient, nil
 	return conn, result.sshClient, nil
 }
 }
 
 
-// operateTunnel periodically sends status requests (traffic stats updates updates)
-// to the Psiphon API; and monitors the tunnel for failures:
+// operateTunnel monitors the health of the tunnel and performs
+// periodic work.
+//
+// BytesTransferred and TotalBytesTransferred notices are emitted
+// for live reporting and diagnostics reporting, respectively.
+//
+// Status requests are sent to the Psiphon API to report bytes
+// transferred.
+//
+// Periodic SSH keep alive packets are sent to ensure the underlying
+// TCP connection isn't terminated by NAT, or other network
+// interference -- or test if it has been terminated while the device
+// has been asleep. When a keep alive times out, the tunnel is
+// considered failed.
 //
 //
-// 1. Overall tunnel failure: the tunnel sends a signal to the ClosedSignal
-// channel on keep-alive failure and other transport I/O errors. In case
-// of such a failure, the tunnel is marked as failed.
+// An immediate SSH keep alive "probe" is sent to test the tunnel and
+// server responsiveness when a port forward failure is detected: a
+// failed dial or failed read/write. This keep alive has a shorter
+// timeout.
 //
 //
-// 2. Tunnel port forward failures: the tunnel connection may stay up but
-// the client may still fail to establish port forwards due to server load
-// and other conditions. After a threshold number of such failures, the
-// overall tunnel is marked as failed.
+// Note that port foward failures may be due to non-failure conditions.
+// For example, when the user inputs an invalid domain name and
+// resolution is done by the ssh server; or trying to connect to a
+// non-white-listed port; and the error message in these cases is not
+// distinguishable from a a true server error (a common error message,
+// "ssh: rejected: administratively prohibited (open failed)", may be
+// returned for these cases but also if the server has run out of
+// ephemeral ports, for example).
 //
 //
-// TODO: currently, any connect (dial), read, or write error associated with
-// a port forward is counted as a failure. It may be important to differentiate
-// between failures due to Psiphon server conditions and failures due to the
-// origin/target server (in the latter case, the tunnel is healthy). Here are
-// some typical error messages to consider matching against (or ignoring):
+// SSH keep alives are not sent when the tunnel has been recently
+// active (not only does tunnel activity obviate the necessity of a keep
+// alive, testing has shown that keep alives may time out for "busy"
+// tunnels, especially over meek protocol and other high latency
+// conditions).
 //
 //
-// - "ssh: rejected: administratively prohibited (open failed)"
-//   (this error message is reported in both actual and false cases: when a server
-//    is overloaded and has no free ephemeral ports; and when the user mistypes
-//    a domain in a browser address bar and name resolution fails)
-// - "ssh: rejected: connect failed (Connection timed out)"
-// - "write tcp ... broken pipe"
-// - "read tcp ... connection reset by peer"
-// - "ssh: unexpected packet in response to channel open: <nil>"
+// "Recently active" is defined has having received payload bytes. Sent
+// bytes are not considered as testing has shown bytes may appear to
+// send when certain NAT devices have interfered with the tunnel, while
+// no bytes are received. In a pathological case, with DNS implemented
+// as tunneled UDP, a browser may wait excessively for a domain name to
+// resolve, while no new port forward is attempted which would otherwise
+// result in a tunnel failure detection.
 //
 //
-// Update: the above is superceded by SSH keep alives with timeouts. When a keep
-// alive times out, the tunnel is marked as failed. Keep alives are triggered
-// periodically, and also immediately in the case of a port forward failure (so
-// as to immediately detect a situation such as a device waking up and trying
-// to use a dead tunnel). By default, port forward theshold counting does not
-// cause a tunnel to be marked as failed, with the conservative assumption that
-// a server which responds to an SSH keep alive is fully functional.
+// TODO: change "recently active" to include having received any
+// SSH protocol messages from the server, not just user payload?
 //
 //
-func (tunnel *Tunnel) operateTunnel(config *Config, tunnelOwner TunnelOwner) {
+func (tunnel *Tunnel) operateTunnel(tunnelOwner TunnelOwner) {
 	defer tunnel.operateWaitGroup.Done()
 	defer tunnel.operateWaitGroup.Done()
 
 
+	lastBytesReceivedTime := time.Now()
+
+	lastTotalBytesTransferedTime := time.Now()
+	totalSent := int64(0)
+	totalReceived := int64(0)
+
+	// Always emit a final NoticeTotalBytesTransferred
+	defer func() {
+		NoticeTotalBytesTransferred(tunnel.serverEntry.IpAddress, totalSent, totalReceived)
+	}()
+
+	noticeBytesTransferredTicker := time.NewTicker(1 * time.Second)
+	defer noticeBytesTransferredTicker.Stop()
+
 	// The next status request and ssh keep alive times are picked at random,
 	// The next status request and ssh keep alive times are picked at random,
 	// from a range, to make the resulting traffic less fingerprintable,
 	// from a range, to make the resulting traffic less fingerprintable,
-	// especially when then tunnel is otherwise idle.
 	// Note: not using Tickers since these are not fixed time periods.
 	// Note: not using Tickers since these are not fixed time periods.
-
 	nextStatusRequestPeriod := func() time.Duration {
 	nextStatusRequestPeriod := func() time.Duration {
 		return MakeRandomPeriod(
 		return MakeRandomPeriod(
 			PSIPHON_API_STATUS_REQUEST_PERIOD_MIN,
 			PSIPHON_API_STATUS_REQUEST_PERIOD_MIN,
 			PSIPHON_API_STATUS_REQUEST_PERIOD_MAX)
 			PSIPHON_API_STATUS_REQUEST_PERIOD_MAX)
 	}
 	}
+
+	statsTimer := time.NewTimer(nextStatusRequestPeriod())
+	defer statsTimer.Stop()
+
+	// Schedule an immediate status request to deliver any unreported
+	// tunnel stats.
+	// Note: this may not be effective when there's an outstanding
+	// asynchronous untunneled final status request is holding the
+	// tunnel stats records. It may also conflict with other
+	// tunnel candidates which attempt to send an immediate request
+	// before being discarded. For now, we mitigate this with a short,
+	// random delay.
+	unreported := CountUnreportedTunnelStats()
+	if unreported > 0 {
+		NoticeInfo("Unreported tunnel stats: %d", unreported)
+		statsTimer.Reset(MakeRandomPeriod(
+			PSIPHON_API_STATUS_REQUEST_SHORT_PERIOD_MIN,
+			PSIPHON_API_STATUS_REQUEST_SHORT_PERIOD_MAX))
+	}
+
 	nextSshKeepAlivePeriod := func() time.Duration {
 	nextSshKeepAlivePeriod := func() time.Duration {
 		return MakeRandomPeriod(
 		return MakeRandomPeriod(
 			TUNNEL_SSH_KEEP_ALIVE_PERIOD_MIN,
 			TUNNEL_SSH_KEEP_ALIVE_PERIOD_MIN,
 			TUNNEL_SSH_KEEP_ALIVE_PERIOD_MAX)
 			TUNNEL_SSH_KEEP_ALIVE_PERIOD_MAX)
 	}
 	}
 
 
-	// TODO: don't initialize if !config.EmitBytesTransferred
-	noticeBytesTransferredTicker := time.NewTicker(1 * time.Second)
-	if !config.EmitBytesTransferred {
-		noticeBytesTransferredTicker.Stop()
+	// TODO: don't initialize timer when config.DisablePeriodicSshKeepAlive is set
+	sshKeepAliveTimer := time.NewTimer(nextSshKeepAlivePeriod())
+	if tunnel.config.DisablePeriodicSshKeepAlive {
+		sshKeepAliveTimer.Stop()
 	} else {
 	} else {
-		defer noticeBytesTransferredTicker.Stop()
+		defer sshKeepAliveTimer.Stop()
 	}
 	}
 
 
-	statsTimer := time.NewTimer(nextStatusRequestPeriod())
-	defer statsTimer.Stop()
+	// Perform network requests in separate goroutines so as not to block
+	// other operations.
+	requestsWaitGroup := new(sync.WaitGroup)
 
 
-	sshKeepAliveTimer := time.NewTimer(nextSshKeepAlivePeriod())
-	defer sshKeepAliveTimer.Stop()
+	requestsWaitGroup.Add(1)
+	signalStatusRequest := make(chan struct{})
+	go func() {
+		defer requestsWaitGroup.Done()
+		for _ = range signalStatusRequest {
+			sendStats(tunnel)
+		}
+	}()
+
+	requestsWaitGroup.Add(1)
+	signalSshKeepAlive := make(chan time.Duration)
+	sshKeepAliveError := make(chan error, 1)
+	go func() {
+		defer requestsWaitGroup.Done()
+		for timeout := range signalSshKeepAlive {
+			err := sendSshKeepAlive(tunnel.sshClient, tunnel.conn, timeout)
+			if err != nil {
+				select {
+				case sshKeepAliveError <- err:
+				default:
+				}
+			}
+		}
+	}()
 
 
+	shutdown := false
 	var err error
 	var err error
-	for err == nil {
+	for !shutdown && err == nil {
 		select {
 		select {
 		case <-noticeBytesTransferredTicker.C:
 		case <-noticeBytesTransferredTicker.C:
-			sent, received := transferstats.GetBytesTransferredForServer(
+			sent, received := transferstats.ReportRecentBytesTransferredForServer(
 				tunnel.serverEntry.IpAddress)
 				tunnel.serverEntry.IpAddress)
-			// Only emit notice when tunnel is not idle.
-			if sent > 0 || received > 0 {
-				NoticeBytesTransferred(sent, received)
+
+			if received > 0 {
+				lastBytesReceivedTime = time.Now()
+			}
+
+			totalSent += sent
+			totalReceived += received
+
+			if lastTotalBytesTransferedTime.Add(TOTAL_BYTES_TRANSFERRED_NOTICE_PERIOD).Before(time.Now()) {
+				NoticeTotalBytesTransferred(tunnel.serverEntry.IpAddress, totalSent, totalReceived)
+				lastTotalBytesTransferedTime = time.Now()
+			}
+
+			// Only emit the frequent BytesTransferred notice when tunnel is not idle.
+			if tunnel.config.EmitBytesTransferred && (sent > 0 || received > 0) {
+				NoticeBytesTransferred(tunnel.serverEntry.IpAddress, sent, received)
 			}
 			}
 
 
 		case <-statsTimer.C:
 		case <-statsTimer.C:
-			sendStats(tunnel)
+			select {
+			case signalStatusRequest <- *new(struct{}):
+			default:
+			}
 			statsTimer.Reset(nextStatusRequestPeriod())
 			statsTimer.Reset(nextStatusRequestPeriod())
 
 
 		case <-sshKeepAliveTimer.C:
 		case <-sshKeepAliveTimer.C:
-			err = sendSshKeepAlive(tunnel.sshClient, tunnel.conn)
+			if lastBytesReceivedTime.Add(TUNNEL_SSH_KEEP_ALIVE_PERIODIC_INACTIVE_PERIOD).Before(time.Now()) {
+				select {
+				case signalSshKeepAlive <- TUNNEL_SSH_KEEP_ALIVE_PERIODIC_TIMEOUT:
+				default:
+				}
+			}
 			sshKeepAliveTimer.Reset(nextSshKeepAlivePeriod())
 			sshKeepAliveTimer.Reset(nextSshKeepAlivePeriod())
 
 
-		case failures := <-tunnel.portForwardFailures:
+		case <-tunnel.signalPortForwardFailure:
 			// Note: no mutex on portForwardFailureTotal; only referenced here
 			// Note: no mutex on portForwardFailureTotal; only referenced here
-			tunnel.portForwardFailureTotal += failures
+			tunnel.totalPortForwardFailures++
 			NoticeInfo("port forward failures for %s: %d",
 			NoticeInfo("port forward failures for %s: %d",
-				tunnel.serverEntry.IpAddress, tunnel.portForwardFailureTotal)
-			if config.PortForwardFailureThreshold > 0 &&
-				tunnel.portForwardFailureTotal > config.PortForwardFailureThreshold {
-				err = errors.New("tunnel exceeded port forward failure threshold")
-			} else {
-				// Try an SSH keep alive to check the state of the SSH connection
-				// Some port forward failures are due to intermittent conditions
-				// on the server, so we don't abort the connection until the threshold
-				// is hit. But if we can't make a simple round trip request to the
-				// server, we'll immediately abort.
-				err = sendSshKeepAlive(tunnel.sshClient, tunnel.conn)
+				tunnel.serverEntry.IpAddress, tunnel.totalPortForwardFailures)
+
+			if lastBytesReceivedTime.Add(TUNNEL_SSH_KEEP_ALIVE_PROBE_INACTIVE_PERIOD).Before(time.Now()) {
+				select {
+				case signalSshKeepAlive <- TUNNEL_SSH_KEEP_ALIVE_PROBE_TIMEOUT:
+				default:
+				}
+			}
+			if !tunnel.config.DisablePeriodicSshKeepAlive {
 				sshKeepAliveTimer.Reset(nextSshKeepAlivePeriod())
 				sshKeepAliveTimer.Reset(nextSshKeepAlivePeriod())
 			}
 			}
 
 
+		case err = <-sshKeepAliveError:
+
 		case <-tunnel.shutdownOperateBroadcast:
 		case <-tunnel.shutdownOperateBroadcast:
-			// Attempt to send any remaining stats
-			sendStats(tunnel)
-			NoticeInfo("shutdown operate tunnel")
-			return
+			shutdown = true
 		}
 		}
 	}
 	}
 
 
-	if err != nil {
+	close(signalSshKeepAlive)
+	close(signalStatusRequest)
+	requestsWaitGroup.Wait()
+
+	// The stats for this tunnel will be reported via the next successful
+	// status request.
+	// Note: Since client clocks are unreliable, we use the server's reported
+	// timestamp in the handshake response as the tunnel start time. This time
+	// will be slightly earlier than the actual tunnel activation time, as the
+	// client has to receive and parse the response and activate the tunnel.
+	if !tunnel.IsDiscarded() {
+		err := RecordTunnelStats(
+			tunnel.serverContext.sessionId,
+			tunnel.serverContext.tunnelNumber,
+			tunnel.serverEntry.IpAddress,
+			tunnel.serverContext.serverHandshakeTimestamp,
+			fmt.Sprintf("%d", time.Now().Sub(tunnel.startTime)),
+			totalSent,
+			totalReceived)
+		if err != nil {
+			NoticeAlert("RecordTunnelStats failed: %s", ContextError(err))
+		}
+	}
+
+	// Final status request notes:
+	//
+	// It's highly desirable to send a final status request in order to report
+	// domain bytes transferred stats as well as to report tunnel stats as
+	// soon as possible. For this reason, we attempt untunneled requests when
+	// the tunneled request isn't possible or has failed.
+	//
+	// In an orderly shutdown (err == nil), the Controller is stopping and
+	// everything must be wrapped up quickly. Also, we still have a working
+	// tunnel. So we first attempt a tunneled status request (with a short
+	// timeout) and then attempt, synchronously -- otherwise the Contoller's
+	// untunneledPendingConns.CloseAll() will immediately interrupt untunneled
+	// requests -- untunneled requests (also with short timeouts).
+	// Note that this depends on the order of untunneledPendingConns.CloseAll()
+	// coming after tunnel.Close(): see note in Controller.Run().
+	//
+	// If the tunnel has failed, the Controller may continue working. We want
+	// to re-establish as soon as possible (so don't want to block on status
+	// requests, even for a second). We may have a long time to attempt
+	// untunneled requests in the background. And there is no tunnel through
+	// which to attempt tunneled requests. So we spawn a goroutine to run the
+	// untunneled requests, which are allowed a longer timeout. These requests
+	// will be interrupted by the Controller's untunneledPendingConns.CloseAll()
+	// in the case of a shutdown.
+
+	if err == nil {
+		NoticeInfo("shutdown operate tunnel")
+		if !sendStats(tunnel) {
+			sendUntunneledStats(tunnel, true)
+		}
+	} else {
 		NoticeAlert("operate tunnel error for %s: %s", tunnel.serverEntry.IpAddress, err)
 		NoticeAlert("operate tunnel error for %s: %s", tunnel.serverEntry.IpAddress, err)
+		go sendUntunneledStats(tunnel, false)
 		tunnelOwner.SignalTunnelFailure(tunnel)
 		tunnelOwner.SignalTunnelFailure(tunnel)
 	}
 	}
 }
 }
@@ -607,10 +763,11 @@ func (tunnel *Tunnel) operateTunnel(config *Config, tunnelOwner TunnelOwner) {
 // sendSshKeepAlive is a helper which sends a keepalive@openssh.com request
 // sendSshKeepAlive is a helper which sends a keepalive@openssh.com request
 // on the specified SSH connections and returns true of the request succeeds
 // on the specified SSH connections and returns true of the request succeeds
 // within a specified timeout.
 // within a specified timeout.
-func sendSshKeepAlive(sshClient *ssh.Client, conn net.Conn) error {
+func sendSshKeepAlive(
+	sshClient *ssh.Client, conn net.Conn, timeout time.Duration) error {
 
 
 	errChannel := make(chan error, 2)
 	errChannel := make(chan error, 2)
-	time.AfterFunc(TUNNEL_SSH_KEEP_ALIVE_TIMEOUT, func() {
+	time.AfterFunc(timeout, func() {
 		errChannel <- TimeoutError{}
 		errChannel <- TimeoutError{}
 	})
 	})
 
 
@@ -632,19 +789,43 @@ func sendSshKeepAlive(sshClient *ssh.Client, conn net.Conn) error {
 }
 }
 
 
 // sendStats is a helper for sending session stats to the server.
 // sendStats is a helper for sending session stats to the server.
-func sendStats(tunnel *Tunnel) {
+func sendStats(tunnel *Tunnel) bool {
+
+	// Tunnel does not have a serverContext when DisableApi is set
+	if tunnel.serverContext == nil {
+		return true
+	}
+
+	// Skip when tunnel is discarded
+	if tunnel.IsDiscarded() {
+		return true
+	}
+
+	err := tunnel.serverContext.DoStatusRequest(tunnel)
+	if err != nil {
+		NoticeAlert("DoStatusRequest failed for %s: %s", tunnel.serverEntry.IpAddress, err)
+	}
 
 
-	// Tunnel does not have a session when DisableApi is set
-	if tunnel.session == nil {
+	return err == nil
+}
+
+// sendUntunnelStats sends final status requests directly to Psiphon
+// servers after the tunnel has already failed. This is an attempt
+// to retain useful bytes transferred stats.
+func sendUntunneledStats(tunnel *Tunnel, isShutdown bool) {
+
+	// Tunnel does not have a serverContext when DisableApi is set
+	if tunnel.serverContext == nil {
 		return
 		return
 	}
 	}
 
 
-	payload := transferstats.GetForServer(tunnel.serverEntry.IpAddress)
-	if payload != nil {
-		err := tunnel.session.DoStatusRequest(payload)
-		if err != nil {
-			NoticeAlert("DoStatusRequest failed for %s: %s", tunnel.serverEntry.IpAddress, err)
-			transferstats.PutBack(tunnel.serverEntry.IpAddress, payload)
-		}
+	// Skip when tunnel is discarded
+	if tunnel.IsDiscarded() {
+		return
+	}
+
+	err := TryUntunneledStatusRequest(tunnel, isShutdown)
+	if err != nil {
+		NoticeAlert("TryUntunneledStatusRequest failed for %s: %s", tunnel.serverEntry.IpAddress, err)
 	}
 	}
 }
 }