proxy.go 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993
  1. /*
  2. * Copyright (c) 2023, Psiphon Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. */
  19. package inproxy
  20. import (
  21. "context"
  22. "io"
  23. "sync"
  24. "sync/atomic"
  25. "time"
  26. "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common"
  27. "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/errors"
  28. "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/prng"
  29. "github.com/Psiphon-Labs/psiphon-tunnel-core/psiphon/common/protocol"
  30. )
  31. const (
  32. proxyAnnounceDelay = 1 * time.Second
  33. proxyAnnounceDelayJitter = 0.5
  34. proxyAnnounceMaxBackoffDelay = 1 * time.Hour
  35. proxyAnnounceLogSampleSize = 2
  36. proxyAnnounceLogSamplePeriod = 30 * time.Minute
  37. proxyWebRTCAnswerTimeout = 20 * time.Second
  38. proxyDestinationDialTimeout = 20 * time.Second
  39. proxyRelayInactivityTimeout = 5 * time.Minute
  40. )
  41. // Proxy is the in-proxy proxying component, which relays traffic from a
  42. // client to a Psiphon server.
  43. type Proxy struct {
  44. // Note: 64-bit ints used with atomic operations are placed
  45. // at the start of struct to ensure 64-bit alignment.
  46. // (https://golang.org/pkg/sync/atomic/#pkg-note-BUG)
  47. bytesUp int64
  48. bytesDown int64
  49. peakBytesUp int64
  50. peakBytesDown int64
  51. connectingClients int32
  52. connectedClients int32
  53. config *ProxyConfig
  54. activityUpdateWrapper *activityUpdateWrapper
  55. networkDiscoveryMutex sync.Mutex
  56. networkDiscoveryRunOnce bool
  57. networkDiscoveryNetworkID string
  58. nextAnnounceMutex sync.Mutex
  59. nextAnnounceBrokerClient *BrokerClient
  60. nextAnnounceNotBefore time.Time
  61. }
  62. // TODO: add PublicNetworkAddress/ListenNetworkAddress to facilitate manually
  63. // configured, permanent port mappings.
  64. // ProxyConfig specifies the configuration for a Proxy run.
  65. type ProxyConfig struct {
  66. // Logger is used to log events.
  67. Logger common.Logger
  68. // EnableWebRTCDebugLogging indicates whether to emit WebRTC debug logs.
  69. EnableWebRTCDebugLogging bool
  70. // WaitForNetworkConnectivity is a callback that should block until there
  71. // is network connectivity or shutdown. The return value is true when
  72. // there is network connectivity, and false for shutdown.
  73. WaitForNetworkConnectivity func() bool
  74. // GetBrokerClient provides a BrokerClient which the proxy will use for
  75. // making broker requests. If GetBrokerClient returns a shared
  76. // BrokerClient instance, the BrokerClient must support multiple,
  77. // concurrent round trips, as the proxy will use it to concurrently
  78. // announce many proxy instances. The BrokerClient should be implemented
  79. // using multiplexing over a shared network connection -- for example,
  80. // HTTP/2 -- and a shared broker session for optimal performance.
  81. GetBrokerClient func() (*BrokerClient, error)
  82. // GetBaseAPIParameters returns Psiphon API parameters to be sent to and
  83. // logged by the broker. Expected parameters include client/proxy
  84. // application and build version information. GetBaseAPIParameters also
  85. // returns the network ID, corresponding to the parameters, to be used in
  86. // tactics logic; the network ID is not sent to the broker.
  87. GetBaseAPIParameters func() (common.APIParameters, string, error)
  88. // MakeWebRTCDialCoordinator provides a WebRTCDialCoordinator which
  89. // specifies WebRTC-related dial parameters, including selected STUN
  90. // server addresses; network topology information for the current netork;
  91. // NAT logic settings; and other settings.
  92. //
  93. // MakeWebRTCDialCoordinator is invoked for each proxy/client connection,
  94. // and the provider can select new parameters per connection as reqired.
  95. MakeWebRTCDialCoordinator func() (WebRTCDialCoordinator, error)
  96. // HandleTacticsPayload is a callback that receives any tactics payload,
  97. // provided by the broker in proxy announcement request responses.
  98. // HandleTacticsPayload must return true when the tacticsPayload includes
  99. // new tactics, indicating that the proxy should reinitialize components
  100. // controlled by tactics parameters.
  101. HandleTacticsPayload func(networkID string, tacticsPayload []byte) bool
  102. // MustUpgrade is a callback that is invoked when a MustUpgrade flag is
  103. // received from the broker. When MustUpgrade is received, the proxy
  104. // should be stopped and the user should be prompted to upgrade before
  105. // restarting the proxy.
  106. MustUpgrade func()
  107. // MaxClients is the maximum number of clients that are allowed to connect
  108. // to the proxy. Must be > 0.
  109. MaxClients int
  110. // LimitUpstreamBytesPerSecond limits the upstream data transfer rate for
  111. // a single client. When 0, there is no limit.
  112. LimitUpstreamBytesPerSecond int
  113. // LimitDownstreamBytesPerSecond limits the downstream data transfer rate
  114. // for a single client. When 0, there is no limit.
  115. LimitDownstreamBytesPerSecond int
  116. // ActivityUpdater specifies an ActivityUpdater for activity associated
  117. // with this proxy.
  118. ActivityUpdater ActivityUpdater
  119. }
  120. // ActivityUpdater is a callback that is invoked when clients connect and
  121. // disconnect and periodically with data transfer updates (unless idle). This
  122. // callback may be used to update an activity UI. This callback should post
  123. // this data to another thread or handler and return immediately and not
  124. // block on UI updates.
  125. type ActivityUpdater func(
  126. connectingClients int32,
  127. connectedClients int32,
  128. bytesUp int64,
  129. bytesDown int64,
  130. bytesDuration time.Duration)
  131. // NewProxy initializes a new Proxy with the specified configuration.
  132. func NewProxy(config *ProxyConfig) (*Proxy, error) {
  133. if config.MaxClients <= 0 {
  134. return nil, errors.TraceNew("invalid MaxClients")
  135. }
  136. p := &Proxy{
  137. config: config,
  138. }
  139. p.activityUpdateWrapper = &activityUpdateWrapper{p: p}
  140. return p, nil
  141. }
  142. // activityUpdateWrapper implements the psiphon/common.ActivityUpdater
  143. // interface and is used to receive bytes transferred updates from the
  144. // ActivityConns wrapping proxied traffic. A wrapper is used so that
  145. // UpdateProgress is not exported from Proxy.
  146. type activityUpdateWrapper struct {
  147. p *Proxy
  148. }
  149. func (w *activityUpdateWrapper) UpdateProgress(bytesRead, bytesWritten int64, _ int64) {
  150. atomic.AddInt64(&w.p.bytesUp, bytesWritten)
  151. atomic.AddInt64(&w.p.bytesDown, bytesRead)
  152. }
  153. // Run runs the proxy. The proxy sends requests to the Broker announcing its
  154. // availability; the Broker matches the proxy with clients, and facilitates
  155. // an exchange of WebRTC connection information; the proxy and each client
  156. // attempt to establish a connection; and the client's traffic is relayed to
  157. // Psiphon server.
  158. //
  159. // Run ends when ctx is Done. A proxy run may continue across underlying
  160. // network changes assuming that the ProxyConfig GetBrokerClient and
  161. // MakeWebRTCDialCoordinator callbacks react to network changes and provide
  162. // instances that are reflect network changes.
  163. func (p *Proxy) Run(ctx context.Context) {
  164. // Run MaxClient proxying workers. Each worker handles one client at a time.
  165. proxyWaitGroup := new(sync.WaitGroup)
  166. // Launch the first proxy worker, passing a signal to be triggered once
  167. // the very first announcement round trip is complete. The first round
  168. // trip is awaited so that:
  169. //
  170. // - The first announce response will arrive with any new tactics,
  171. // avoiding a start up case where MaxClients initial, concurrent
  172. // announces all return with no-match and a tactics payload.
  173. //
  174. // - The first worker gets no announcement delay and is also guaranteed to
  175. // be the shared session establisher. Since the announcement delays are
  176. // applied _after_ waitToShareSession, it would otherwise be possible,
  177. // with a race of MaxClient initial, concurrent announces, for the
  178. // session establisher to be a different worker than the no-delay worker.
  179. signalFirstAnnounceCtx, signalFirstAnnounceDone :=
  180. context.WithCancel(context.Background())
  181. proxyWaitGroup.Add(1)
  182. go func() {
  183. defer proxyWaitGroup.Done()
  184. p.proxyClients(ctx, signalFirstAnnounceDone)
  185. }()
  186. select {
  187. case <-signalFirstAnnounceCtx.Done():
  188. case <-ctx.Done():
  189. return
  190. }
  191. // Launch the remaining workers.
  192. for i := 0; i < p.config.MaxClients-1; i++ {
  193. proxyWaitGroup.Add(1)
  194. go func() {
  195. defer proxyWaitGroup.Done()
  196. p.proxyClients(ctx, nil)
  197. }()
  198. }
  199. // Capture activity updates every second, which is the required frequency
  200. // for PeakUp/DownstreamBytesPerSecond. This is also a reasonable
  201. // frequency for invoking the ActivityUpdater and updating UI widgets.
  202. activityUpdatePeriod := 1 * time.Second
  203. ticker := time.NewTicker(activityUpdatePeriod)
  204. defer ticker.Stop()
  205. loop:
  206. for {
  207. select {
  208. case <-ticker.C:
  209. p.activityUpdate(activityUpdatePeriod)
  210. case <-ctx.Done():
  211. break loop
  212. }
  213. }
  214. proxyWaitGroup.Wait()
  215. }
  216. // getAnnounceDelayParameters is a helper that fetches the proxy announcement
  217. // delay parameters from the current broker client.
  218. //
  219. // getAnnounceDelayParameters is used to configure a delay when
  220. // proxyOneClient fails. As having no broker clients is a possible
  221. // proxyOneClient failure case, GetBrokerClient errors are ignored here and
  222. // defaults used in that case.
  223. func (p *Proxy) getAnnounceDelayParameters() (time.Duration, float64) {
  224. brokerClient, err := p.config.GetBrokerClient()
  225. if err != nil {
  226. return proxyAnnounceDelay, proxyAnnounceDelayJitter
  227. }
  228. brokerCoordinator := brokerClient.GetBrokerDialCoordinator()
  229. return common.ValueOrDefault(brokerCoordinator.AnnounceDelay(), proxyAnnounceDelay),
  230. common.ValueOrDefault(brokerCoordinator.AnnounceDelayJitter(), proxyAnnounceDelayJitter)
  231. }
  232. func (p *Proxy) activityUpdate(period time.Duration) {
  233. connectingClients := atomic.LoadInt32(&p.connectingClients)
  234. connectedClients := atomic.LoadInt32(&p.connectedClients)
  235. bytesUp := atomic.SwapInt64(&p.bytesUp, 0)
  236. bytesDown := atomic.SwapInt64(&p.bytesDown, 0)
  237. greaterThanSwapInt64(&p.peakBytesUp, bytesUp)
  238. greaterThanSwapInt64(&p.peakBytesDown, bytesDown)
  239. if connectingClients == 0 &&
  240. connectedClients == 0 &&
  241. bytesUp == 0 &&
  242. bytesDown == 0 {
  243. // Skip the activity callback on idle.
  244. return
  245. }
  246. p.config.ActivityUpdater(
  247. connectingClients,
  248. connectedClients,
  249. bytesUp,
  250. bytesDown,
  251. period)
  252. }
  253. func greaterThanSwapInt64(addr *int64, new int64) bool {
  254. // Limitation: if there are two concurrent calls, the greater value could
  255. // get overwritten.
  256. old := atomic.LoadInt64(addr)
  257. if new > old {
  258. return atomic.CompareAndSwapInt64(addr, old, new)
  259. }
  260. return false
  261. }
  262. func (p *Proxy) proxyClients(
  263. ctx context.Context, signalAnnounceDone func()) {
  264. // Proxy one client, repeating until ctx is done.
  265. //
  266. // This worker starts with posting a long-polling announcement request.
  267. // The broker response with a matched client, and the proxy and client
  268. // attempt to establish a WebRTC connection for relaying traffic.
  269. //
  270. // Limitation: this design may not maximize the utility of the proxy,
  271. // since some proxy/client connections will fail at the WebRTC stage due
  272. // to NAT traversal failure, and at most MaxClient concurrent
  273. // establishments are attempted. Another scenario comes from the Psiphon
  274. // client horse race, which may start in-proxy dials but then abort them
  275. // when some other tunnel protocol succeeds.
  276. //
  277. // As a future enhancement, consider using M announcement goroutines and N
  278. // WebRTC dial goroutines. When an announcement gets a response,
  279. // immediately announce again unless there are already MaxClient active
  280. // connections established. This approach may require the proxy to
  281. // backpedal and reject connections when establishment is too successful.
  282. //
  283. // Another enhancement could be a signal from the client, to the broker,
  284. // relayed to the proxy, when a dial is aborted.
  285. failureDelayFactor := time.Duration(1)
  286. // To reduce diagnostic log noise, only log an initial sample of
  287. // announcement request timings (delays/elapsed time) and a periodic
  288. // sample of repeating errors such as "no match".
  289. logAnnounceCount := proxyAnnounceLogSampleSize
  290. logErrorsCount := proxyAnnounceLogSampleSize
  291. lastErrMsg := ""
  292. startLogSampleTime := time.Now()
  293. logAnnounce := func() bool {
  294. if logAnnounceCount > 0 {
  295. logAnnounceCount -= 1
  296. return true
  297. }
  298. return false
  299. }
  300. for ctx.Err() == nil {
  301. if !p.config.WaitForNetworkConnectivity() {
  302. break
  303. }
  304. if time.Since(startLogSampleTime) >= proxyAnnounceLogSamplePeriod {
  305. logAnnounceCount = proxyAnnounceLogSampleSize
  306. logErrorsCount = proxyAnnounceLogSampleSize
  307. lastErrMsg = ""
  308. startLogSampleTime = time.Now()
  309. }
  310. backOff, err := p.proxyOneClient(
  311. ctx, logAnnounce, signalAnnounceDone)
  312. if err != nil && ctx.Err() == nil {
  313. // Limitation: the lastErrMsg string comparison isn't compatible
  314. // with errors with minor variations, such as "unexpected
  315. // response status code %d after %v" from
  316. // InproxyBrokerRoundTripper.RoundTrip, with a time duration in
  317. // the second parameter.
  318. errMsg := err.Error()
  319. if lastErrMsg != errMsg {
  320. logErrorsCount = proxyAnnounceLogSampleSize
  321. lastErrMsg = errMsg
  322. }
  323. if logErrorsCount > 0 {
  324. p.config.Logger.WithTraceFields(
  325. common.LogFields{
  326. "error": errMsg,
  327. }).Error("proxy client failed")
  328. logErrorsCount -= 1
  329. }
  330. // Apply a simple exponential backoff based on whether
  331. // proxyOneClient either relayed client traffic or got no match,
  332. // or encountered a failure.
  333. //
  334. // The proxyOneClient failure could range from local
  335. // configuration (no broker clients) to network issues(failure to
  336. // completely establish WebRTC connection) and this backoff
  337. // prevents both excess local logging and churning in the former
  338. // case and excessive bad service to clients or unintentionally
  339. // overloading the broker in the latter case.
  340. //
  341. // TODO: specific tactics parameters to control this logic.
  342. delay, jitter := p.getAnnounceDelayParameters()
  343. if !backOff {
  344. failureDelayFactor = 1
  345. }
  346. delay = delay * failureDelayFactor
  347. if delay > proxyAnnounceMaxBackoffDelay {
  348. delay = proxyAnnounceMaxBackoffDelay
  349. }
  350. if failureDelayFactor < 1<<20 {
  351. failureDelayFactor *= 2
  352. }
  353. common.SleepWithJitter(ctx, delay, jitter)
  354. }
  355. }
  356. }
  357. // resetNetworkDiscovery resets the network discovery state, which will force
  358. // another network discovery when doNetworkDiscovery is invoked.
  359. // resetNetworkDiscovery is called when new tactics have been received from
  360. // the broker, as new tactics may change parameters that control network
  361. // discovery.
  362. func (p *Proxy) resetNetworkDiscovery() {
  363. p.networkDiscoveryMutex.Lock()
  364. defer p.networkDiscoveryMutex.Unlock()
  365. p.networkDiscoveryRunOnce = false
  366. p.networkDiscoveryNetworkID = ""
  367. }
  368. func (p *Proxy) doNetworkDiscovery(
  369. ctx context.Context,
  370. webRTCCoordinator WebRTCDialCoordinator) {
  371. // Allow only one concurrent network discovery. In practise, this may
  372. // block all other proxyOneClient goroutines while one single goroutine
  373. // runs doNetworkDiscovery. Subsequently, all other goroutines will find
  374. // networkDiscoveryRunOnce is true and use the cached results.
  375. p.networkDiscoveryMutex.Lock()
  376. defer p.networkDiscoveryMutex.Unlock()
  377. networkID := webRTCCoordinator.NetworkID()
  378. if p.networkDiscoveryRunOnce &&
  379. p.networkDiscoveryNetworkID == networkID {
  380. // Already ran discovery for this network.
  381. //
  382. // TODO: periodically re-probe for port mapping services?
  383. return
  384. }
  385. // Reset and configure port mapper component, as required. See
  386. // initPortMapper comment.
  387. initPortMapper(webRTCCoordinator)
  388. // Gather local network NAT/port mapping metrics and configuration before
  389. // sending any announce requests. NAT topology metrics are used by the
  390. // Broker to optimize client and in-proxy matching. Unlike the client, we
  391. // always perform this synchronous step here, since waiting doesn't
  392. // necessarily block a client tunnel dial.
  393. waitGroup := new(sync.WaitGroup)
  394. waitGroup.Add(1)
  395. go func() {
  396. defer waitGroup.Done()
  397. // NATDiscover may use cached NAT type/port mapping values from
  398. // DialParameters, based on the network ID. If discovery is not
  399. // successful, the proxy still proceeds to announce.
  400. NATDiscover(
  401. ctx,
  402. &NATDiscoverConfig{
  403. Logger: p.config.Logger,
  404. WebRTCDialCoordinator: webRTCCoordinator,
  405. })
  406. }()
  407. waitGroup.Wait()
  408. p.networkDiscoveryRunOnce = true
  409. p.networkDiscoveryNetworkID = networkID
  410. }
  411. func (p *Proxy) proxyOneClient(
  412. ctx context.Context,
  413. logAnnounce func() bool,
  414. signalAnnounceDone func()) (bool, error) {
  415. // Do not trigger back-off unless the proxy successfully announces and
  416. // only then performs poorly.
  417. //
  418. // A no-match response should not trigger back-off, nor should broker
  419. // request transport errors which may include non-200 responses due to
  420. // CDN timeout mismatches or TLS errors due to CDN TLS fingerprint
  421. // incompatibility.
  422. backOff := false
  423. // Get a new WebRTCDialCoordinator, which should be configured with the
  424. // latest network tactics.
  425. webRTCCoordinator, err := p.config.MakeWebRTCDialCoordinator()
  426. if err != nil {
  427. return backOff, errors.Trace(err)
  428. }
  429. // Perform network discovery, to determine NAT type and other network
  430. // topology information that is reported to the broker in the proxy
  431. // announcement and used to optimize proxy/client matching. Unlike
  432. // clients, which can't easily delay dials in the tunnel establishment
  433. // horse race, proxies will always perform network discovery.
  434. // doNetworkDiscovery allows only one concurrent discovery and caches
  435. // results for the current network (as determined by
  436. // WebRTCCoordinator.GetNetworkID), so when multiple proxyOneClient
  437. // goroutines call doNetworkDiscovery, at most one discovery is performed
  438. // per network.
  439. p.doNetworkDiscovery(ctx, webRTCCoordinator)
  440. // Send the announce request
  441. // At this point, no NAT traversal operations have been performed by the
  442. // proxy, since its announcement may sit idle for the long-polling period
  443. // and NAT hole punches or port mappings could expire before the
  444. // long-polling period.
  445. //
  446. // As a future enhancement, the proxy could begin gathering WebRTC ICE
  447. // candidates while awaiting a client match, reducing the turn around
  448. // time after a match. This would make sense if there's high demand for
  449. // proxies, and so hole punches unlikely to expire while awaiting a client match.
  450. //
  451. // Another possibility may be to prepare and send a full offer SDP in the
  452. // announcment; and have the broker modify either the proxy or client
  453. // offer SDP to produce an answer SDP. In this case, the entire
  454. // ProxyAnswerRequest could be skipped as the WebRTC dial can begin after
  455. // the ProxyAnnounceRequest response (and ClientOfferRequest response).
  456. //
  457. // Furthermore, if a port mapping can be established, instead of using
  458. // WebRTC the proxy could run a Psiphon tunnel protocol listener at the
  459. // mapped port and send the dial information -- including some secret to
  460. // authenticate the client -- in its announcement. The client would then
  461. // receive this direct dial information from the broker and connect. The
  462. // proxy should be able to send keep alives to extend the port mapping
  463. // lifetime.
  464. brokerClient, err := p.config.GetBrokerClient()
  465. if err != nil {
  466. return backOff, errors.Trace(err)
  467. }
  468. brokerCoordinator := brokerClient.GetBrokerDialCoordinator()
  469. // Get the base Psiphon API parameters and additional proxy metrics,
  470. // including performance information, which is sent to the broker in the
  471. // proxy announcment.
  472. //
  473. // tacticsNetworkID is the exact network ID that corresponds to the
  474. // tactics tag sent in the base parameters; this is passed to
  475. // HandleTacticsPayload in order to double check that any tactics
  476. // returned in the proxy announcment response are associated and stored
  477. // with the original network ID.
  478. metrics, tacticsNetworkID, err := p.getMetrics(webRTCCoordinator)
  479. if err != nil {
  480. return backOff, errors.Trace(err)
  481. }
  482. // Set a delay before announcing, to stagger the announce request times.
  483. // The delay helps to avoid triggering rate limits or similar errors from
  484. // any intermediate CDN between the proxy and the broker; and provides a
  485. // nudge towards better load balancing across multiple large MaxClients
  486. // proxies, as the broker primarily matches enqueued announces in FIFO
  487. // order, since older announces expire earlier.
  488. //
  489. // The delay is intended to be applied after doNetworkDiscovery, which has
  490. // no reason to be delayed; and also after any waitToShareSession delay,
  491. // as delaying before waitToShareSession can result in the announce
  492. // request times collapsing back together. Delaying after
  493. // waitToShareSession is handled by brokerClient.ProxyAnnounce, which
  494. // will also extend the base request timeout, as required, to account for
  495. // any deliberate delay.
  496. requestDelay := time.Duration(0)
  497. announceDelay, announceDelayJitter := p.getAnnounceDelayParameters()
  498. p.nextAnnounceMutex.Lock()
  499. nextDelay := prng.JitterDuration(announceDelay, announceDelayJitter)
  500. if p.nextAnnounceBrokerClient != brokerClient {
  501. // Reset the delay when the broker client changes.
  502. p.nextAnnounceNotBefore = time.Time{}
  503. p.nextAnnounceBrokerClient = brokerClient
  504. }
  505. if p.nextAnnounceNotBefore.IsZero() {
  506. p.nextAnnounceNotBefore = time.Now().Add(nextDelay)
  507. // No delay for the very first announce request, so leave
  508. // announceRequestDelay set to 0.
  509. } else {
  510. requestDelay = time.Until(p.nextAnnounceNotBefore)
  511. if requestDelay < 0 {
  512. // This announce did not arrive until after the next delay already
  513. // passed, so proceed with no delay.
  514. p.nextAnnounceNotBefore = time.Now().Add(nextDelay)
  515. requestDelay = 0
  516. } else {
  517. p.nextAnnounceNotBefore = p.nextAnnounceNotBefore.Add(nextDelay)
  518. }
  519. }
  520. p.nextAnnounceMutex.Unlock()
  521. // A proxy ID is implicitly sent with requests; it's the proxy's session
  522. // public key.
  523. //
  524. // ProxyAnnounce applies an additional request timeout to facilitate
  525. // long-polling.
  526. announceStartTime := time.Now()
  527. personalCompartmentIDs := brokerCoordinator.PersonalCompartmentIDs()
  528. announceResponse, err := brokerClient.ProxyAnnounce(
  529. ctx,
  530. requestDelay,
  531. &ProxyAnnounceRequest{
  532. PersonalCompartmentIDs: personalCompartmentIDs,
  533. Metrics: metrics,
  534. })
  535. if logAnnounce() {
  536. p.config.Logger.WithTraceFields(common.LogFields{
  537. "delay": requestDelay.String(),
  538. "elapsedTime": time.Since(announceStartTime).String(),
  539. }).Info("announcement request")
  540. }
  541. if err != nil {
  542. return backOff, errors.Trace(err)
  543. }
  544. if len(announceResponse.TacticsPayload) > 0 {
  545. // The TacticsPayload may include new tactics, or may simply signal,
  546. // to the Psiphon client, that its tactics tag remains up-to-date and
  547. // to extend cached tactics TTL. HandleTacticsPayload returns true
  548. // when tactics haved changed; in this case we clear cached network
  549. // discovery but proceed with handling the proxy announcement
  550. // response as there may still be a match.
  551. if p.config.HandleTacticsPayload(tacticsNetworkID, announceResponse.TacticsPayload) {
  552. p.resetNetworkDiscovery()
  553. }
  554. }
  555. // Signal that the announce round trip is complete. At this point, the
  556. // broker Noise session should be established and any fresh tactics
  557. // applied.
  558. if signalAnnounceDone != nil {
  559. signalAnnounceDone()
  560. }
  561. // MustUpgrade has precedence over other cases, to ensure the callback is
  562. // invoked. Trigger back-off back off when rate/entry limited or must
  563. // upgrade; no back-off for no-match.
  564. if announceResponse.MustUpgrade {
  565. if p.config.MustUpgrade != nil {
  566. p.config.MustUpgrade()
  567. }
  568. backOff = true
  569. return backOff, errors.TraceNew("must upgrade")
  570. } else if announceResponse.Limited {
  571. backOff = true
  572. return backOff, errors.TraceNew("limited")
  573. } else if announceResponse.NoMatch {
  574. return backOff, errors.TraceNew("no match")
  575. }
  576. if announceResponse.ClientProxyProtocolVersion != ProxyProtocolVersion1 {
  577. // This case is currently unexpected, as all clients and proxies use
  578. // ProxyProtocolVersion1.
  579. backOff = true
  580. return backOff, errors.Tracef(
  581. "Unsupported proxy protocol version: %d",
  582. announceResponse.ClientProxyProtocolVersion)
  583. }
  584. // Trigger back-off if the following WebRTC operations fail to establish a
  585. // connections.
  586. //
  587. // Limitation: the proxy answer request to the broker may fail due to the
  588. // non-back-off reasons documented above for the proxy announcment request;
  589. // however, these should be unlikely assuming that the broker client is
  590. // using a persistent transport connection.
  591. backOff = true
  592. // For activity updates, indicate that a client connection is now underway.
  593. atomic.AddInt32(&p.connectingClients, 1)
  594. connected := false
  595. defer func() {
  596. if !connected {
  597. atomic.AddInt32(&p.connectingClients, -1)
  598. }
  599. }()
  600. // Initialize WebRTC using the client's offer SDP
  601. webRTCAnswerCtx, webRTCAnswerCancelFunc := context.WithTimeout(
  602. ctx, common.ValueOrDefault(webRTCCoordinator.WebRTCAnswerTimeout(), proxyWebRTCAnswerTimeout))
  603. defer webRTCAnswerCancelFunc()
  604. // In personal pairing mode, RFC 1918/4193 private IP addresses are
  605. // included in SDPs.
  606. hasPersonalCompartmentIDs := len(personalCompartmentIDs) > 0
  607. webRTCConn, SDP, sdpMetrics, webRTCErr := newWebRTCConnForAnswer(
  608. webRTCAnswerCtx,
  609. &webRTCConfig{
  610. Logger: p.config.Logger,
  611. EnableDebugLogging: p.config.EnableWebRTCDebugLogging,
  612. WebRTCDialCoordinator: webRTCCoordinator,
  613. ClientRootObfuscationSecret: announceResponse.ClientRootObfuscationSecret,
  614. DoDTLSRandomization: announceResponse.DoDTLSRandomization,
  615. TrafficShapingParameters: announceResponse.TrafficShapingParameters,
  616. },
  617. announceResponse.ClientOfferSDP,
  618. hasPersonalCompartmentIDs)
  619. var webRTCRequestErr string
  620. if webRTCErr != nil {
  621. webRTCErr = errors.Trace(webRTCErr)
  622. webRTCRequestErr = webRTCErr.Error()
  623. SDP = WebRTCSessionDescription{}
  624. sdpMetrics = &webRTCSDPMetrics{}
  625. // Continue to report the error to the broker. The broker will respond
  626. // with failure to the client's offer request.
  627. } else {
  628. defer webRTCConn.Close()
  629. }
  630. // Send answer request with SDP or error.
  631. _, err = brokerClient.ProxyAnswer(
  632. ctx,
  633. &ProxyAnswerRequest{
  634. ConnectionID: announceResponse.ConnectionID,
  635. SelectedProxyProtocolVersion: announceResponse.ClientProxyProtocolVersion,
  636. ProxyAnswerSDP: SDP,
  637. ICECandidateTypes: sdpMetrics.iceCandidateTypes,
  638. AnswerError: webRTCRequestErr,
  639. })
  640. if err != nil {
  641. if webRTCErr != nil {
  642. // Prioritize returning any WebRTC error for logging.
  643. return backOff, webRTCErr
  644. }
  645. return backOff, errors.Trace(err)
  646. }
  647. // Now that an answer is sent, stop if WebRTC initialization failed.
  648. if webRTCErr != nil {
  649. return backOff, webRTCErr
  650. }
  651. // Await the WebRTC connection.
  652. // We could concurrently dial the destination, to have that network
  653. // connection available immediately once the WebRTC channel is
  654. // established. This would work only for TCP, not UDP, network protocols
  655. // and could only include the TCP connection, as client traffic is
  656. // required for all higher layers such as TLS, SSH, etc. This could also
  657. // create wasted load on destination Psiphon servers, particularly when
  658. // WebRTC connections fail.
  659. awaitDataChannelCtx, awaitDataChannelCancelFunc := context.WithTimeout(
  660. ctx,
  661. common.ValueOrDefault(
  662. webRTCCoordinator.WebRTCAwaitDataChannelTimeout(), dataChannelAwaitTimeout))
  663. defer awaitDataChannelCancelFunc()
  664. err = webRTCConn.AwaitInitialDataChannel(awaitDataChannelCtx)
  665. if err != nil {
  666. return backOff, errors.Trace(err)
  667. }
  668. p.config.Logger.WithTraceFields(common.LogFields{
  669. "connectionID": announceResponse.ConnectionID,
  670. }).Info("WebRTC data channel established")
  671. // Dial the destination, a Psiphon server. The broker validates that the
  672. // dial destination is a Psiphon server.
  673. destinationDialContext, destinationDialCancelFunc := context.WithTimeout(
  674. ctx,
  675. common.ValueOrDefault(
  676. webRTCCoordinator.ProxyDestinationDialTimeout(), proxyDestinationDialTimeout))
  677. defer destinationDialCancelFunc()
  678. // Use the custom resolver when resolving destination hostnames, such as
  679. // those used in domain fronted protocols.
  680. //
  681. // - Resolving at the in-proxy should yield a more optimal CDN edge, vs.
  682. // resolving at the client.
  683. //
  684. // - Sending unresolved hostnames to in-proxies can expose some domain
  685. // fronting configuration. This can be mitigated by enabling domain
  686. // fronting on this 2nd hop only when the in-proxy is located in a
  687. // region that may be censored or blocked; this is to be enforced by
  688. // the broker.
  689. //
  690. // - Any DNSResolverPreresolved tactics applied will be relative to the
  691. // in-proxy location.
  692. destinationAddress, err := webRTCCoordinator.ResolveAddress(
  693. ctx, "ip", announceResponse.DestinationAddress)
  694. if err != nil {
  695. return backOff, errors.Trace(err)
  696. }
  697. destinationConn, err := webRTCCoordinator.ProxyUpstreamDial(
  698. destinationDialContext,
  699. announceResponse.NetworkProtocol.String(),
  700. destinationAddress)
  701. if err != nil {
  702. return backOff, errors.Trace(err)
  703. }
  704. defer destinationConn.Close()
  705. // For activity updates, indicate that a client connection is established.
  706. connected = true
  707. atomic.AddInt32(&p.connectingClients, -1)
  708. atomic.AddInt32(&p.connectedClients, 1)
  709. defer func() {
  710. atomic.AddInt32(&p.connectedClients, -1)
  711. }()
  712. // Throttle the relay connection.
  713. //
  714. // Here, each client gets LimitUp/DownstreamBytesPerSecond. Proxy
  715. // operators may to want to limit their bandwidth usage with a single
  716. // up/down value, an overall limit. The ProxyConfig can simply be
  717. // generated by dividing the limit by MaxClients. This approach favors
  718. // performance stability: each client gets the same throttling limits
  719. // regardless of how many other clients are connected.
  720. destinationConn = common.NewThrottledConn(
  721. destinationConn,
  722. announceResponse.NetworkProtocol.IsStream(),
  723. common.RateLimits{
  724. ReadBytesPerSecond: int64(p.config.LimitUpstreamBytesPerSecond),
  725. WriteBytesPerSecond: int64(p.config.LimitDownstreamBytesPerSecond),
  726. })
  727. // Hook up bytes transferred counting for activity updates.
  728. // The ActivityMonitoredConn inactivity timeout is configured. For
  729. // upstream TCP connections, the destinationConn will close when the TCP
  730. // connection to the Psiphon server closes. But for upstream UDP flows,
  731. // the relay does not know when the upstream "connection" has closed.
  732. // Well-behaved clients will close the WebRTC half of the relay when
  733. // those clients know the UDP-based tunnel protocol connection is closed;
  734. // the inactivity timeout handles the remaining cases.
  735. inactivityTimeout :=
  736. common.ValueOrDefault(
  737. webRTCCoordinator.ProxyRelayInactivityTimeout(),
  738. proxyRelayInactivityTimeout)
  739. destinationConn, err = common.NewActivityMonitoredConn(
  740. destinationConn, inactivityTimeout, false, nil, p.activityUpdateWrapper)
  741. if err != nil {
  742. return backOff, errors.Trace(err)
  743. }
  744. // Relay the client traffic to the destination. The client traffic is a
  745. // standard Psiphon tunnel protocol destinated to a Psiphon server. Any
  746. // blocking/censorship at the 2nd hop will be mitigated by the use of
  747. // Psiphon circumvention protocols and techniques.
  748. // Limitation: clients may apply fragmentation to traffic relayed over the
  749. // data channel, and there's no guarantee that the fragmentation write
  750. // sizes or delays will carry over to the egress side.
  751. // The proxy operator's ISP may be able to observe that the operator's
  752. // host has nearly matching ingress and egress traffic. The traffic
  753. // content won't be the same: the ingress traffic is wrapped in a WebRTC
  754. // data channel, and the egress traffic is a Psiphon tunnel protocol.
  755. // With padding and decoy packets, the ingress and egress traffic shape
  756. // will differ beyond the basic WebRTC overheader. Even with this
  757. // measure, over time the number of bytes in and out of the proxy may
  758. // still indicate proxying.
  759. waitGroup := new(sync.WaitGroup)
  760. relayErrors := make(chan error, 2)
  761. var relayedUp, relayedDown int32
  762. waitGroup.Add(1)
  763. go func() {
  764. defer waitGroup.Done()
  765. // WebRTC data channels are based on SCTP, which is actually
  766. // message-based, not a stream. The (default) max message size for
  767. // pion/sctp is 65536:
  768. // https://github.com/pion/sctp/blob/44ed465396c880e379aae9c1bf81809a9e06b580/association.go#L52.
  769. //
  770. // As io.Copy uses a buffer size of 32K, each relayed message will be
  771. // less than the maximum. Calls to ClientConn.Write are also expected
  772. // to use io.Copy, keeping messages at most 32K in size.
  773. // io.Copy doesn't return an error on EOF, but we still want to signal
  774. // that relaying is done, so in this case a nil error is sent to the
  775. // channel.
  776. //
  777. // Limitation: if one io.Copy goproutine sends nil and the other
  778. // io.Copy goroutine sends a non-nil error concurrently, the non-nil
  779. // error isn't prioritized.
  780. n, err := io.Copy(webRTCConn, destinationConn)
  781. if n > 0 {
  782. atomic.StoreInt32(&relayedDown, 1)
  783. }
  784. relayErrors <- errors.Trace(err)
  785. }()
  786. waitGroup.Add(1)
  787. go func() {
  788. defer waitGroup.Done()
  789. n, err := io.Copy(destinationConn, webRTCConn)
  790. if n > 0 {
  791. atomic.StoreInt32(&relayedUp, 1)
  792. }
  793. relayErrors <- errors.Trace(err)
  794. }()
  795. select {
  796. case err = <-relayErrors:
  797. case <-ctx.Done():
  798. }
  799. // Interrupt the relay goroutines by closing the connections.
  800. webRTCConn.Close()
  801. destinationConn.Close()
  802. waitGroup.Wait()
  803. p.config.Logger.WithTraceFields(common.LogFields{
  804. "connectionID": announceResponse.ConnectionID,
  805. }).Info("connection closed")
  806. // Don't apply a back-off delay to the next announcement since this
  807. // iteration successfully relayed bytes.
  808. if atomic.LoadInt32(&relayedUp) == 1 || atomic.LoadInt32(&relayedDown) == 1 {
  809. backOff = false
  810. }
  811. return backOff, err
  812. }
  813. func (p *Proxy) getMetrics(webRTCCoordinator WebRTCDialCoordinator) (*ProxyMetrics, string, error) {
  814. // tacticsNetworkID records the exact network ID that corresponds to the
  815. // tactics tag sent in the base parameters, and is used when applying any
  816. // new tactics returned by the broker.
  817. baseParams, tacticsNetworkID, err := p.config.GetBaseAPIParameters()
  818. if err != nil {
  819. return nil, "", errors.Trace(err)
  820. }
  821. packedBaseParams, err := protocol.EncodePackedAPIParameters(baseParams)
  822. if err != nil {
  823. return nil, "", errors.Trace(err)
  824. }
  825. return &ProxyMetrics{
  826. BaseAPIParameters: packedBaseParams,
  827. ProxyProtocolVersion: proxyProtocolVersion,
  828. NATType: webRTCCoordinator.NATType(),
  829. PortMappingTypes: webRTCCoordinator.PortMappingTypes(),
  830. MaxClients: int32(p.config.MaxClients),
  831. ConnectingClients: atomic.LoadInt32(&p.connectingClients),
  832. ConnectedClients: atomic.LoadInt32(&p.connectedClients),
  833. LimitUpstreamBytesPerSecond: int64(p.config.LimitUpstreamBytesPerSecond),
  834. LimitDownstreamBytesPerSecond: int64(p.config.LimitDownstreamBytesPerSecond),
  835. PeakUpstreamBytesPerSecond: atomic.LoadInt64(&p.peakBytesUp),
  836. PeakDownstreamBytesPerSecond: atomic.LoadInt64(&p.peakBytesDown),
  837. }, tacticsNetworkID, nil
  838. }