Просмотр исходного кода

Integrate packet tunnel into tunnel-core client

Rod Hynes 8 лет назад
Родитель
Сommit
ee12da7454

+ 18 - 0
psiphon/config.go

@@ -212,6 +212,9 @@ type Config struct {
 	// If there are multiple IP addresses on an interface use the first IPv4 address.
 	ListenInterface string
 
+	// DisableLocalSocksProxy disables running the local SOCKS proxy.
+	DisableLocalSocksProxy bool
+
 	// LocalSocksProxyPort specifies a port number for the local SOCKS proxy
 	// running at 127.0.0.1. For the default value, 0, the system selects a free
 	// port (a notice reporting the selected port is emitted).
@@ -222,6 +225,9 @@ type Config struct {
 	// port (a notice reporting the selected port is emitted).
 	LocalHttpProxyPort int
 
+	// DisableLocalHTTPProxy disables running the local HTTP proxy.
+	DisableLocalHTTPProxy bool
+
 	// ConnectionWorkerPoolSize specifies how many connection attempts to attempt
 	// in parallel. The default, 0, uses CONNECTION_WORKER_POOL_SIZE which is
 	// recommended.
@@ -461,6 +467,13 @@ type Config struct {
 	// could reveal user browsing activity, it's intended for debugging and testing
 	// only.
 	EmitSLOKs bool
+
+	// PacketTunnelTunDeviceFileDescriptor specifies a tun device file descriptor
+	// to use for running a packet tunnel. When set, a packet tunnel is established
+	// through the server and packets are relayed via the tun device. The file
+	// descriptor is duped in NewController.
+	// When PacketTunnelTunDeviceFileDescriptor is set, TunnelPoolSize must be 1.
+	PacketTunnelTunFileDescriptor *int
 }
 
 // DownloadURL specifies a URL for downloading resources along with parameters
@@ -643,6 +656,11 @@ func LoadConfig(configJson []byte) (*Config, error) {
 		}
 	}
 
+	// This constraint is expected by logic in Controller.runTunnels()
+	if config.PacketTunnelTunFileDescriptor != nil && config.TunnelPoolSize != 1 {
+		return nil, common.ContextError(errors.New("PacketTunnelTunFileDescriptor requires TunnelPoolSize to be 1"))
+	}
+
 	if config.TunnelConnectTimeoutSeconds == nil {
 		defaultTunnelConnectTimeoutSeconds := TUNNEL_CONNECT_TIMEOUT_SECONDS
 		config.TunnelConnectTimeoutSeconds = &defaultTunnelConnectTimeoutSeconds

+ 64 - 11
psiphon/controller.go

@@ -34,6 +34,7 @@ import (
 	"github.com/Psiphon-Inc/goarista/monotime"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/tun"
 )
 
 // Controller is a tunnel lifecycle coordinator. It manages lists of servers to
@@ -67,6 +68,8 @@ type Controller struct {
 	signalReportConnected             chan struct{}
 	serverAffinityDoneBroadcast       chan struct{}
 	newClientVerificationPayload      chan string
+	packetTunnelClient                *tun.Client
+	packetTunnelTransport             *PacketTunnelTransport
 }
 
 type candidateServerEntry struct {
@@ -142,6 +145,29 @@ func NewController(config *Config) (controller *Controller, err error) {
 
 	controller.splitTunnelClassifier = NewSplitTunnelClassifier(config, controller)
 
+	if config.PacketTunnelTunFileDescriptor != nil {
+
+		// Run a packet tunnel client. The lifetime of the tun.Client is the
+		// lifetime of the Controller, so it exists across tunnel establishments
+		// and reestablishments. The PacketTunnelTransport provides a layer
+		// that presents a continuosuly existing transport to the tun.Client;
+		// it's set to use new SSH channels after new SSH tunnel establishes.
+
+		packetTunnelTransport := NewPacketTunnelTransport()
+
+		packetTunnelClient, err := tun.NewClient(&tun.ClientConfig{
+			Logger:    NoticeCommonLogger(),
+			TunFD:     *config.PacketTunnelTunFileDescriptor,
+			Transport: packetTunnelTransport,
+		})
+		if err != nil {
+			return nil, common.ContextError(err)
+		}
+
+		controller.packetTunnelClient = packetTunnelClient
+		controller.packetTunnelTransport = packetTunnelTransport
+	}
+
 	return controller, nil
 }
 
@@ -178,20 +204,24 @@ func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
 		listenIP = IPv4Address.String()
 	}
 
-	socksProxy, err := NewSocksProxy(controller.config, controller, listenIP)
-	if err != nil {
-		NoticeAlert("error initializing local SOCKS proxy: %s", err)
-		return
+	if !controller.config.DisableLocalSocksProxy {
+		socksProxy, err := NewSocksProxy(controller.config, controller, listenIP)
+		if err != nil {
+			NoticeAlert("error initializing local SOCKS proxy: %s", err)
+			return
+		}
+		defer socksProxy.Close()
 	}
-	defer socksProxy.Close()
 
-	httpProxy, err := NewHttpProxy(
-		controller.config, controller.untunneledDialConfig, controller, listenIP)
-	if err != nil {
-		NoticeAlert("error initializing local HTTP proxy: %s", err)
-		return
+	if !controller.config.DisableLocalHTTPProxy {
+		httpProxy, err := NewHttpProxy(
+			controller.config, controller.untunneledDialConfig, controller, listenIP)
+		if err != nil {
+			NoticeAlert("error initializing local HTTP proxy: %s", err)
+			return
+		}
+		defer httpProxy.Close()
 	}
-	defer httpProxy.Close()
 
 	if !controller.config.DisableRemoteServerListFetcher {
 
@@ -235,6 +265,10 @@ func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
 		go controller.establishTunnelWatcher()
 	}
 
+	if controller.packetTunnelClient != nil {
+		controller.packetTunnelClient.Start()
+	}
+
 	// Wait while running
 
 	select {
@@ -246,6 +280,10 @@ func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
 
 	close(controller.shutdownBroadcast)
 
+	if controller.packetTunnelClient != nil {
+		controller.packetTunnelClient.Stop()
+	}
+
 	// Interrupts and stops establish workers blocking on
 	// tunnel establishment network operations.
 	controller.establishPendingConns.CloseAll()
@@ -652,6 +690,21 @@ loop:
 				}
 			}
 
+			// Set the new tunnel as the transport for the packet tunnel. The packet tunnel
+			// client remains up when reestablishing, but no packets are relayed while there
+			// is no connected tunnel. UseNewTunnel will establish a new packet tunnel SSH
+			// channel over the new SSH tunnel and configure the packet tunnel client to use
+			// the new SSH channel as its transport.
+			//
+			// Note: as is, this logic is suboptimal for TunnelPoolSize > 1, as this would
+			// continuously initialize new packet tunnel sessions for each established
+			// server. For now, config validation requires TunnelPoolSize == 1 when
+			// the packet tunnel is used.
+
+			if controller.packetTunnelTransport != nil {
+				controller.packetTunnelTransport.UseNewTunnel(establishedTunnel)
+			}
+
 			// TODO: design issue -- might not be enough server entries with region/caps to ever fill tunnel slots;
 			// possible solution is establish target MIN(CountServerEntries(region, protocol), TunnelPoolSize)
 			if controller.isFullyEstablished() {

+ 59 - 0
psiphon/notice.go

@@ -550,3 +550,62 @@ func (writer *NoticeWriter) Write(p []byte) (n int, err error) {
 	outputNotice(writer.noticeType, noticeIsDiagnostic, "message", string(p))
 	return len(p), nil
 }
+
+// NoticeCommonLogger maps the common.Logger interface to the notice facility.
+// This is used to make the notice facility available to other packages that
+// don't import the "psiphon" package.
+func NoticeCommonLogger() common.Logger {
+	return &commonLogger{}
+}
+
+type commonLogger struct {
+}
+
+func (logger *commonLogger) WithContext() common.LogContext {
+	return &commonLogContext{
+		context: common.GetParentContext(),
+	}
+}
+
+func (logger *commonLogger) WithContextFields(fields common.LogFields) common.LogContext {
+	return &commonLogContext{
+		context: common.GetParentContext(),
+		fields:  fields,
+	}
+}
+
+func (logger *commonLogger) LogMetric(metric string, fields common.LogFields) {
+	outputNotice(metric, noticeIsDiagnostic, "fields", fmt.Sprintf("%#v", fields))
+}
+
+type commonLogContext struct {
+	context string
+	fields  common.LogFields
+}
+
+func (context *commonLogContext) outputNotice(
+	noticeType string, args ...interface{}) {
+
+	outputNotice(
+		noticeType,
+		noticeIsDiagnostic,
+		"message", fmt.Sprint(args...),
+		"context", context.context,
+		"fields", fmt.Sprintf("%#v", context.fields))
+}
+
+func (context *commonLogContext) Debug(args ...interface{}) {
+	// Ignored.
+}
+
+func (context *commonLogContext) Info(args ...interface{}) {
+	context.outputNotice("Info", args)
+}
+
+func (context *commonLogContext) Warning(args ...interface{}) {
+	context.outputNotice("Alert", args)
+}
+
+func (context *commonLogContext) Error(args ...interface{}) {
+	context.outputNotice("Error", args)
+}

+ 327 - 0
psiphon/packetTunnelTransport.go

@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2017, Psiphon Inc.
+ * All rights reserved.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+package psiphon
+
+import (
+	"context"
+	"errors"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/Psiphon-Inc/goarista/monotime"
+	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
+)
+
+const (
+	PACKET_TUNNEL_PROBE_SLOW_READ  = 3 * time.Second
+	PACKET_TUNNEL_PROBE_SLOW_WRITE = 3 * time.Second
+)
+
+// PacketTunnelTransport is an integration layer that presents an io.ReadWriteCloser interface
+// to a tun.Client as the transport for relaying packets. The Psiphon client may periodically
+// disconnect from and reconnect to the same or different Psiphon servers. PacketTunnelTransport
+// allows the Psiphon client to substitute new transport channels on-the-fly.
+// PacketTunnelTransport implements transport monitoring, using heuristics to determine when
+// the channel tunnel should be probed as a failure check.
+type PacketTunnelTransport struct {
+	// Note: 64-bit ints used with atomic operations are placed
+	// at the start of struct to ensure 64-bit alignment.
+	// (https://golang.org/pkg/sync/atomic/#pkg-note-BUG)
+	lastReadComplete  int64
+	lastWriteStart    int64
+	lastWriteComplete int64
+
+	runContext    context.Context
+	stopRunning   context.CancelFunc
+	workers       *sync.WaitGroup
+	readMutex     sync.Mutex
+	writeMutex    sync.Mutex
+	channelReady  *sync.Cond
+	channelMutex  sync.Mutex
+	channelConn   net.Conn
+	channelTunnel *Tunnel
+}
+
+// NewPacketTunnelTransport initializes a PacketTunnelTransport.
+func NewPacketTunnelTransport() *PacketTunnelTransport {
+
+	runContext, stopRunning := context.WithCancel(context.Background())
+
+	p := &PacketTunnelTransport{
+		runContext:   runContext,
+		stopRunning:  stopRunning,
+		workers:      new(sync.WaitGroup),
+		channelReady: sync.NewCond(new(sync.Mutex)),
+	}
+
+	// The monitor worker will signal the tunnel channel when it
+	// suspects that the packet tunnel channel has failed.
+
+	p.workers.Add(1)
+	go p.monitor()
+
+	return p
+}
+
+// Read implements the io.Reader interface. It uses the current transport channel
+// to read packet data, or waits for a new transport channel to be established
+// after a failure.
+func (p *PacketTunnelTransport) Read(data []byte) (int, error) {
+
+	p.readMutex.Lock()
+	defer p.readMutex.Unlock()
+
+	// getChannel will block if there's no channel.
+
+	channelConn, channelTunnel, err := p.getChannel()
+	if err != nil {
+		return 0, common.ContextError(err)
+	}
+
+	n, err := channelConn.Read(data)
+
+	atomic.StoreInt64(&p.lastReadComplete, int64(monotime.Now()))
+
+	if err != nil {
+
+		// This assumes that any error means the channel has failed, which
+		// is the case for ssh.Channel reads. io.EOF is not ignored, since
+		// a single ssh.Channel may EOF and still get substituted with a new
+		// channel.
+
+		p.failedChannel(channelConn, channelTunnel)
+	}
+
+	return n, err
+}
+
+// Write implements the io.Writer interface. It uses the current transport channel
+// to write packet data, or waits for a new transport channel to be established
+// after a failure.
+func (p *PacketTunnelTransport) Write(data []byte) (int, error) {
+
+	p.writeMutex.Lock()
+	defer p.writeMutex.Unlock()
+
+	channelConn, channelTunnel, err := p.getChannel()
+	if err != nil {
+		return 0, common.ContextError(err)
+	}
+
+	// ssh.Channels are pseudo net.Conns and don't support timeouts/deadlines.
+	// Instead of spawning a goroutine per write, record time values that the
+	// monitor worker will use to detect possible failures, such as writes taking
+	// too long.
+
+	atomic.StoreInt64(&p.lastWriteStart, int64(monotime.Now()))
+
+	n, err := channelConn.Write(data)
+
+	atomic.StoreInt64(&p.lastWriteComplete, int64(monotime.Now()))
+
+	if err != nil {
+
+		// This assumes that any error means the channel has failed, which
+		// is the case for ssh.Channel writes.
+
+		p.failedChannel(channelConn, channelTunnel)
+	}
+
+	return n, err
+}
+
+// Close implements the io.Closer interface. Any underlying transport channel is
+// called, the monitor worker is stopped, and any blocking Read/Write calls will
+// be interrupted.
+func (p *PacketTunnelTransport) Close() error {
+
+	p.stopRunning()
+
+	p.workers.Wait()
+
+	// This broadcast is to wake up reads or writes blocking in getChannel; those
+	// getChannel calls should then abort on the p.runContext.Done() check.
+	p.channelReady.Broadcast()
+
+	p.channelMutex.Lock()
+	if p.channelConn != nil {
+		p.channelConn.Close()
+		p.channelConn = nil
+	}
+	p.channelMutex.Unlock()
+
+	return nil
+}
+
+// UseNewTunnel sets the PacketTunnelTransport to use a new transport channel within
+// the specified tunnel. UseNewTunnel does not block on the open channel call; it spawns
+// a worker that calls tunnel.DialPacketTunnelChannel and uses the resulting channel.
+func (p *PacketTunnelTransport) UseNewTunnel(tunnel *Tunnel) {
+
+	p.workers.Add(1)
+	go func(tunnel *Tunnel) {
+		defer p.workers.Done()
+
+		// channelConn is a net.Conn, since some layering has been applied
+		// (e.g., transferstats.Conn). PacketTunnelTransport assumes the
+		// channelConn is ultimately an ssh.Channel, which is not a fully
+		// functional net.Conn.
+
+		channelConn, err := tunnel.DialPacketTunnelChannel()
+		if err != nil {
+			// Note: DialPacketTunnelChannel will signal a probe on failure,
+			// so it's not necessary to do so here.
+
+			NoticeAlert("dial packet tunnel channel failed : %s", err)
+			// TODO: retry?
+			return
+		}
+
+		p.setChannel(channelConn, tunnel)
+
+	}(tunnel)
+}
+
+func (p *PacketTunnelTransport) setChannel(
+	channelConn net.Conn, channelTunnel *Tunnel) {
+
+	p.channelMutex.Lock()
+	defer p.channelMutex.Unlock()
+
+	// Concurrency note: this check is within the mutex to ensure that a
+	// UseNewTunnel call concurrent with a Close call doesn't leave a channel
+	// set.
+	select {
+	case <-p.runContext.Done():
+		return
+	default:
+	}
+
+	p.channelConn = channelConn
+	p.channelTunnel = channelTunnel
+
+	p.channelMutex.Unlock()
+
+	p.channelReady.Broadcast()
+}
+
+func (p *PacketTunnelTransport) getChannel() (net.Conn, *Tunnel, error) {
+
+	var channelConn net.Conn
+	var channelTunnel *Tunnel
+
+	p.channelReady.L.Lock()
+	defer p.channelReady.L.Unlock()
+	for {
+
+		select {
+		case <-p.runContext.Done():
+			return nil, nil, common.ContextError(errors.New("already closed"))
+		default:
+		}
+
+		p.channelMutex.Lock()
+		channelConn = p.channelConn
+		channelTunnel = p.channelTunnel
+		p.channelMutex.Unlock()
+		if channelConn != nil {
+			break
+		}
+
+		p.channelReady.Wait()
+	}
+
+	return channelConn, channelTunnel, nil
+}
+
+func (p *PacketTunnelTransport) failedChannel(
+	channelConn net.Conn, channelTunnel *Tunnel) {
+
+	// In case the channel read/write failed and the tunnel isn't
+	// yet in the failed state, trigger a probe.
+
+	select {
+	case channelTunnel.signalPortForwardFailure <- *new(struct{}):
+	default:
+	}
+
+	// Clear the current channel. This will cause subsequent Read/Write
+	// calls to block in getChannel until a new channel is provided.
+	// Concurrency note: must check, within the mutex, that the channelConn
+	// is still the one that failed before clearing, since both Read and
+	// Write could call failedChannel concurrently.
+
+	p.channelMutex.Lock()
+	if p.channelConn == channelConn {
+		p.channelConn.Close()
+		p.channelConn = nil
+		p.channelTunnel = nil
+	}
+	p.channelMutex.Unlock()
+}
+
+func (p *PacketTunnelTransport) monitor() {
+
+	defer p.workers.Done()
+
+	monitorTicker := time.NewTicker(1 * time.Second)
+	defer monitorTicker.Stop()
+
+	for {
+		select {
+		case <-p.runContext.Done():
+			return
+		case <-monitorTicker.C:
+			lastReadComplete := monotime.Time(atomic.LoadInt64(&p.lastReadComplete))
+			lastWriteStart := monotime.Time(atomic.LoadInt64(&p.lastWriteStart))
+			lastWriteComplete := monotime.Time(atomic.LoadInt64(&p.lastWriteComplete))
+
+			// Heuristics to determine if the tunnel channel may have failed:
+			// - a Write has blocked for too long
+			// - no Reads after recent Writes
+			//
+			// When a heuristic is hit, a signal is sent to the channel tunnel
+			// which will invoke and SSH keep alive probe of the tunnel. Nothing
+			// is torn down here. If the tunnel determines it has failed, it will
+			// close itself, which closes its channels, which will cause blocking
+			// PacketTunnelTransport Reads/Writes to fail and call failedChannel.
+
+			if (lastWriteStart != 0 &&
+				lastWriteStart.Sub(lastWriteComplete) > PACKET_TUNNEL_PROBE_SLOW_WRITE) ||
+				(lastWriteComplete.Sub(lastReadComplete) > PACKET_TUNNEL_PROBE_SLOW_READ) {
+
+				p.channelMutex.Lock()
+				channelTunnel := p.channelTunnel
+				p.channelMutex.Unlock()
+
+				// TODO: store/check last probe signal time to prevent continuous probe signals?
+
+				if channelTunnel != nil {
+					select {
+					case channelTunnel.signalPortForwardFailure <- *new(struct{}):
+					default:
+					}
+				}
+			}
+		}
+	}
+}

+ 11 - 5
psiphon/remoteServerList_test.go

@@ -57,10 +57,16 @@ func TestObfuscatedRemoteServerLists(t *testing.T) {
 	// create a server
 	//
 
-	serverIPaddress := ""
+	serverIPAddress := ""
 	for _, interfaceName := range []string{"eth0", "en0"} {
-		serverIPaddress, err = common.GetInterfaceIPAddress(interfaceName)
+		var serverIPv4Address, serverIPv6Address net.IP
+		serverIPv4Address, serverIPv6Address, err = common.GetInterfaceIPAddresses(interfaceName)
 		if err == nil {
+			if serverIPv4Address != nil {
+				serverIPAddress = serverIPv4Address.String()
+			} else {
+				serverIPAddress = serverIPv6Address.String()
+			}
 			break
 		}
 	}
@@ -70,7 +76,7 @@ func TestObfuscatedRemoteServerLists(t *testing.T) {
 
 	serverConfigJSON, _, encodedServerEntry, err := server.GenerateConfig(
 		&server.GenerateConfigParams{
-			ServerIPAddress:      serverIPaddress,
+			ServerIPAddress:      serverIPAddress,
 			EnableSSHAPIRequests: true,
 			WebServerPort:        8001,
 			TunnelProtocolPorts:  map[string]int{"OSSH": 4001},
@@ -205,8 +211,8 @@ func TestObfuscatedRemoteServerLists(t *testing.T) {
 
 	// Exercise using multiple download URLs
 	remoteServerListHostAddresses := []string{
-		net.JoinHostPort(serverIPaddress, "8081"),
-		net.JoinHostPort(serverIPaddress, "8082"),
+		net.JoinHostPort(serverIPAddress, "8081"),
+		net.JoinHostPort(serverIPAddress, "8082"),
 	}
 
 	// The common remote server list fetches will 404

+ 7 - 1
psiphon/server/server_test.go

@@ -50,8 +50,14 @@ func TestMain(m *testing.M) {
 
 	var err error
 	for _, interfaceName := range []string{"eth0", "en0"} {
-		serverIPAddress, err = common.GetInterfaceIPAddress(interfaceName)
+		var serverIPv4Address, serverIPv6Address net.IP
+		serverIPv4Address, serverIPv6Address, err = common.GetInterfaceIPAddress(interfaceName)
 		if err == nil {
+			if serverIPv4Address != nil {
+				serverIPAddress = serverIPv4Address.String()
+			} else {
+				serverIPAddress = serverIPv6Address.String()
+			}
 			break
 		}
 	}

+ 36 - 2
psiphon/tunnel.go

@@ -308,6 +308,41 @@ func (tunnel *Tunnel) Dial(
 		tunnel:         tunnel,
 		downstreamConn: downstreamConn}
 
+	return tunnel.wrapWithTransferStats(conn), nil
+}
+
+func (tunnel *Tunnel) DialPacketTunnelChannel() (net.Conn, error) {
+
+	channel, requests, err := tunnel.sshClient.OpenChannel(
+		protocol.PACKET_TUNNEL_CHANNEL_TYPE, nil)
+	if err != nil {
+
+		// TODO: conditional on type of error or error message?
+		select {
+		case tunnel.signalPortForwardFailure <- *new(struct{}):
+		default:
+		}
+
+		return nil, common.ContextError(err)
+	}
+	go ssh.DiscardRequests(requests)
+
+	conn := newChannelConn(channel)
+
+	// wrapWithTransferStats will track bytes transferred for the
+	// packet tunnel. It will count packet overhead (TCP/UDP/IP headers).
+	//
+	// Since the data in the channel is not HTTP or TLS, no domain bytes
+	// counting is expected.
+	//
+	// transferstats are also used to determine that there's been recent
+	// activity and skip periodic SSH keep alives; see Tunnel.operateTunnel.
+
+	return tunnel.wrapWithTransferStats(conn), nil
+}
+
+func (tunnel *Tunnel) wrapWithTransferStats(conn net.Conn) net.Conn {
+
 	// Tunnel does not have a serverContext when DisableApi is set. We still use
 	// transferstats.Conn to count bytes transferred for monitoring tunnel
 	// quality.
@@ -315,9 +350,8 @@ func (tunnel *Tunnel) Dial(
 	if tunnel.serverContext != nil {
 		regexps = tunnel.serverContext.StatsRegexps()
 	}
-	conn = transferstats.NewConn(conn, tunnel.serverEntry.IpAddress, regexps)
 
-	return conn, nil
+	return transferstats.NewConn(conn, tunnel.serverEntry.IpAddress, regexps)
 }
 
 // SignalComponentFailure notifies the tunnel that an associated component has failed.

+ 8 - 1
psiphon/userAgent_test.go

@@ -21,6 +21,7 @@ package psiphon
 
 import (
 	"fmt"
+	"net"
 	"net/http"
 	"sync"
 	"testing"
@@ -157,8 +158,14 @@ func attemptConnectionsWithUserAgent(
 	var err error
 	serverIPaddress := ""
 	for _, interfaceName := range []string{"eth0", "en0"} {
-		serverIPaddress, err = common.GetInterfaceIPAddress(interfaceName)
+		var serverIPv4Address, serverIPv6Address net.IP
+		serverIPv4Address, serverIPv6Address, err = common.GetInterfaceIPAddresses(interfaceName)
 		if err == nil {
+			if serverIPv4Address != nil {
+				serverIPaddress = serverIPv4Address.String()
+			} else {
+				serverIPaddress = serverIPv6Address.String()
+			}
 			break
 		}
 	}

+ 49 - 0
psiphon/utils.go

@@ -28,7 +28,9 @@ import (
 	"net/url"
 	"os"
 	"syscall"
+	"time"
 
+	"github.com/Psiphon-Inc/crypto/ssh"
 	"github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
 )
 
@@ -136,3 +138,50 @@ func (writer *SyncFileWriter) Write(p []byte) (n int, err error) {
 	}
 	return
 }
+
+// emptyAddr implements the net.Addr interface. emptyAddr is intended to be
+// used as a stub, when a net.Addr is required but not used.
+type emptyAddr struct {
+}
+
+func (e *emptyAddr) String() string {
+	return ""
+}
+
+func (e *emptyAddr) Network() string {
+	return ""
+}
+
+// channelConn implements the net.Conn interface. channelConn allows use of
+// SSH.Channels in contexts where a net.Conn is expected. Only Read/Write/Close
+// are implemented and the remaining functions are stubs and expected to not
+// be used.
+type channelConn struct {
+	ssh.Channel
+}
+
+func newChannelConn(channel ssh.Channel) *channelConn {
+	return &channelConn{
+		Channel: channel,
+	}
+}
+
+func (conn *channelConn) LocalAddr() net.Addr {
+	return new(emptyAddr)
+}
+
+func (conn *channelConn) RemoteAddr() net.Addr {
+	return new(emptyAddr)
+}
+
+func (conn *channelConn) SetDeadline(_ time.Time) error {
+	return common.ContextError(errors.New("unsupported"))
+}
+
+func (conn *channelConn) SetReadDeadline(_ time.Time) error {
+	return common.ContextError(errors.New("unsupported"))
+}
+
+func (conn *channelConn) SetWriteDeadline(_ time.Time) error {
+	return common.ContextError(errors.New("unsupported"))
+}