controller.go 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849
  1. /*
  2. * Copyright (c) 2015, Psiphon Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. */
  19. // Package psiphon implements the core tunnel functionality of a Psiphon client.
  20. // The main function is RunForever, which runs a Controller that obtains lists of
  21. // servers, establishes tunnel connections, and runs local proxies through which
  22. // tunneled traffic may be sent.
  23. package psiphon
  24. import (
  25. "errors"
  26. "net"
  27. "sync"
  28. "time"
  29. )
  30. // Controller is a tunnel lifecycle coordinator. It manages lists of servers to
  31. // connect to; establishes and monitors tunnels; and runs local proxies which
  32. // route traffic through the tunnels.
  33. type Controller struct {
  34. config *Config
  35. sessionId string
  36. componentFailureSignal chan struct{}
  37. shutdownBroadcast chan struct{}
  38. runWaitGroup *sync.WaitGroup
  39. establishedTunnels chan *Tunnel
  40. failedTunnels chan *Tunnel
  41. tunnelMutex sync.Mutex
  42. establishedOnce bool
  43. tunnels []*Tunnel
  44. nextTunnel int
  45. startedConnectedReporter bool
  46. startedUpgradeDownloader bool
  47. isEstablishing bool
  48. establishWaitGroup *sync.WaitGroup
  49. stopEstablishingBroadcast chan struct{}
  50. candidateServerEntries chan *ServerEntry
  51. establishPendingConns *Conns
  52. untunneledPendingConns *Conns
  53. untunneledDialConfig *DialConfig
  54. splitTunnelClassifier *SplitTunnelClassifier
  55. signalFetchRemoteServerList chan struct{}
  56. }
  57. // NewController initializes a new controller.
  58. func NewController(config *Config) (controller *Controller, err error) {
  59. // Generate a session ID for the Psiphon server API. This session ID is
  60. // used across all tunnels established by the controller.
  61. sessionId, err := MakeSessionId()
  62. if err != nil {
  63. return nil, ContextError(err)
  64. }
  65. // untunneledPendingConns may be used to interrupt the fetch remote server list
  66. // request and other untunneled connection establishments. BindToDevice may be
  67. // used to exclude these requests and connection from VPN routing.
  68. untunneledPendingConns := new(Conns)
  69. untunneledDialConfig := &DialConfig{
  70. UpstreamProxyUrl: config.UpstreamProxyUrl,
  71. PendingConns: untunneledPendingConns,
  72. DeviceBinder: config.DeviceBinder,
  73. DnsServerGetter: config.DnsServerGetter,
  74. }
  75. controller = &Controller{
  76. config: config,
  77. sessionId: sessionId,
  78. // componentFailureSignal receives a signal from a component (including socks and
  79. // http local proxies) if they unexpectedly fail. Senders should not block.
  80. // A buffer allows at least one stop signal to be sent before there is a receiver.
  81. componentFailureSignal: make(chan struct{}, 1),
  82. shutdownBroadcast: make(chan struct{}),
  83. runWaitGroup: new(sync.WaitGroup),
  84. // establishedTunnels and failedTunnels buffer sizes are large enough to
  85. // receive full pools of tunnels without blocking. Senders should not block.
  86. establishedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
  87. failedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
  88. tunnels: make([]*Tunnel, 0),
  89. establishedOnce: false,
  90. startedConnectedReporter: false,
  91. startedUpgradeDownloader: false,
  92. isEstablishing: false,
  93. establishPendingConns: new(Conns),
  94. untunneledPendingConns: untunneledPendingConns,
  95. untunneledDialConfig: untunneledDialConfig,
  96. // A buffer allows at least one signal to be sent even when the receiver is
  97. // not listening. Senders should not block.
  98. signalFetchRemoteServerList: make(chan struct{}, 1),
  99. }
  100. controller.splitTunnelClassifier = NewSplitTunnelClassifier(config, controller)
  101. return controller, nil
  102. }
  103. // Run executes the controller. It launches components and then monitors
  104. // for a shutdown signal; after receiving the signal it shuts down the
  105. // controller.
  106. // The components include:
  107. // - the periodic remote server list fetcher
  108. // - the connected reporter
  109. // - the tunnel manager
  110. // - a local SOCKS proxy that port forwards through the pool of tunnels
  111. // - a local HTTP proxy that port forwards through the pool of tunnels
  112. func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
  113. NoticeBuildInfo()
  114. NoticeCoreVersion(VERSION)
  115. ReportAvailableRegions()
  116. // Start components
  117. socksProxy, err := NewSocksProxy(controller.config, controller)
  118. if err != nil {
  119. NoticeAlert("error initializing local SOCKS proxy: %s", err)
  120. return
  121. }
  122. defer socksProxy.Close()
  123. httpProxy, err := NewHttpProxy(
  124. controller.config, controller.untunneledDialConfig, controller)
  125. if err != nil {
  126. NoticeAlert("error initializing local HTTP proxy: %s", err)
  127. return
  128. }
  129. defer httpProxy.Close()
  130. if !controller.config.DisableRemoteServerListFetcher {
  131. controller.runWaitGroup.Add(1)
  132. go controller.remoteServerListFetcher()
  133. }
  134. /// Note: the connected reporter isn't started until a tunnel is
  135. // established
  136. controller.runWaitGroup.Add(1)
  137. go controller.runTunnels()
  138. if *controller.config.EstablishTunnelTimeoutSeconds != 0 {
  139. controller.runWaitGroup.Add(1)
  140. go controller.establishTunnelWatcher()
  141. }
  142. // Wait while running
  143. select {
  144. case <-shutdownBroadcast:
  145. NoticeInfo("controller shutdown by request")
  146. case <-controller.componentFailureSignal:
  147. NoticeAlert("controller shutdown due to component failure")
  148. }
  149. close(controller.shutdownBroadcast)
  150. controller.establishPendingConns.CloseAll()
  151. controller.untunneledPendingConns.CloseAll()
  152. controller.runWaitGroup.Wait()
  153. controller.splitTunnelClassifier.Shutdown()
  154. NoticeInfo("exiting controller")
  155. }
  156. // SignalComponentFailure notifies the controller that an associated component has failed.
  157. // This will terminate the controller.
  158. func (controller *Controller) SignalComponentFailure() {
  159. select {
  160. case controller.componentFailureSignal <- *new(struct{}):
  161. default:
  162. }
  163. }
  164. // remoteServerListFetcher fetches an out-of-band list of server entries
  165. // for more tunnel candidates. It fetches when signalled, with retries
  166. // on failure.
  167. func (controller *Controller) remoteServerListFetcher() {
  168. defer controller.runWaitGroup.Done()
  169. var lastFetchTime time.Time
  170. fetcherLoop:
  171. for {
  172. // Wait for a signal before fetching
  173. select {
  174. case <-controller.signalFetchRemoteServerList:
  175. case <-controller.shutdownBroadcast:
  176. break fetcherLoop
  177. }
  178. // Skip fetch entirely (i.e., send no request at all, even when ETag would save
  179. // on response size) when a recent fetch was successful
  180. if time.Now().Before(lastFetchTime.Add(FETCH_REMOTE_SERVER_LIST_STALE_PERIOD)) {
  181. continue
  182. }
  183. retryLoop:
  184. for {
  185. // Don't attempt to fetch while there is no network connectivity,
  186. // to avoid alert notice noise.
  187. if !WaitForNetworkConnectivity(
  188. controller.config.NetworkConnectivityChecker,
  189. controller.shutdownBroadcast) {
  190. break fetcherLoop
  191. }
  192. err := FetchRemoteServerList(
  193. controller.config, controller.untunneledDialConfig)
  194. if err == nil {
  195. lastFetchTime = time.Now()
  196. break retryLoop
  197. }
  198. NoticeAlert("failed to fetch remote server list: %s", err)
  199. timeout := time.After(FETCH_REMOTE_SERVER_LIST_RETRY_PERIOD)
  200. select {
  201. case <-timeout:
  202. case <-controller.shutdownBroadcast:
  203. break fetcherLoop
  204. }
  205. }
  206. }
  207. NoticeInfo("exiting remote server list fetcher")
  208. }
  209. // establishTunnelWatcher terminates the controller if a tunnel
  210. // has not been established in the configured time period. This
  211. // is regardless of how many tunnels are presently active -- meaning
  212. // that if an active tunnel was established and lost the controller
  213. // is left running (to re-establish).
  214. func (controller *Controller) establishTunnelWatcher() {
  215. defer controller.runWaitGroup.Done()
  216. timeout := time.After(
  217. time.Duration(*controller.config.EstablishTunnelTimeoutSeconds) * time.Second)
  218. select {
  219. case <-timeout:
  220. if !controller.hasEstablishedOnce() {
  221. NoticeAlert("failed to establish tunnel before timeout")
  222. controller.SignalComponentFailure()
  223. }
  224. case <-controller.shutdownBroadcast:
  225. }
  226. NoticeInfo("exiting establish tunnel watcher")
  227. }
  228. // connectedReporter sends periodic "connected" requests to the Psiphon API.
  229. // These requests are for server-side unique user stats calculation. See the
  230. // comment in DoConnectedRequest for a description of the request mechanism.
  231. // To ensure we don't over- or under-count unique users, only one connected
  232. // request is made across all simultaneous multi-tunnels; and the connected
  233. // request is repeated periodically.
  234. func (controller *Controller) connectedReporter() {
  235. defer controller.runWaitGroup.Done()
  236. loop:
  237. for {
  238. // Pick any active tunnel and make the next connected request. No error
  239. // is logged if there's no active tunnel, as that's not an unexpected condition.
  240. reported := false
  241. tunnel := controller.getNextActiveTunnel()
  242. if tunnel != nil {
  243. err := tunnel.session.DoConnectedRequest()
  244. if err == nil {
  245. reported = true
  246. } else {
  247. NoticeAlert("failed to make connected request: %s", err)
  248. }
  249. }
  250. // Schedule the next connected request and wait.
  251. var duration time.Duration
  252. if reported {
  253. duration = PSIPHON_API_CONNECTED_REQUEST_PERIOD
  254. } else {
  255. duration = PSIPHON_API_CONNECTED_REQUEST_RETRY_PERIOD
  256. }
  257. timeout := time.After(duration)
  258. select {
  259. case <-timeout:
  260. // Make another connected request
  261. case <-controller.shutdownBroadcast:
  262. break loop
  263. }
  264. }
  265. NoticeInfo("exiting connected reporter")
  266. }
  267. func (controller *Controller) startConnectedReporter() {
  268. if controller.config.DisableApi {
  269. return
  270. }
  271. // Start the connected reporter after the first tunnel is established.
  272. // Concurrency note: only the runTunnels goroutine may access startedConnectedReporter.
  273. if !controller.startedConnectedReporter {
  274. controller.startedConnectedReporter = true
  275. controller.runWaitGroup.Add(1)
  276. go controller.connectedReporter()
  277. }
  278. }
  279. // upgradeDownloader makes periodic attemps to complete a client upgrade
  280. // download. DownloadUpgrade() is resumable, so each attempt has potential for
  281. // getting closer to completion, even in conditions where the download or
  282. // tunnel is repeatedly interrupted.
  283. // Once the download is complete, the downloader exits and is not run again:
  284. // We're assuming that the upgrade will be applied and the entire system
  285. // restarted before another upgrade is to be downloaded.
  286. func (controller *Controller) upgradeDownloader(clientUpgradeVersion string) {
  287. defer controller.runWaitGroup.Done()
  288. loop:
  289. for {
  290. // Pick any active tunnel and make the next download attempt. No error
  291. // is logged if there's no active tunnel, as that's not an unexpected condition.
  292. tunnel := controller.getNextActiveTunnel()
  293. if tunnel != nil {
  294. err := DownloadUpgrade(controller.config, clientUpgradeVersion, tunnel)
  295. if err == nil {
  296. break loop
  297. }
  298. NoticeAlert("upgrade download failed: ", err)
  299. }
  300. timeout := time.After(DOWNLOAD_UPGRADE_RETRY_PAUSE_PERIOD)
  301. select {
  302. case <-timeout:
  303. // Make another download attempt
  304. case <-controller.shutdownBroadcast:
  305. break loop
  306. }
  307. }
  308. NoticeInfo("exiting upgrade downloader")
  309. }
  310. func (controller *Controller) startClientUpgradeDownloader(clientUpgradeVersion string) {
  311. if controller.config.DisableApi {
  312. return
  313. }
  314. if controller.config.UpgradeDownloadUrl == "" ||
  315. controller.config.UpgradeDownloadFilename == "" {
  316. // No upgrade is desired
  317. return
  318. }
  319. if clientUpgradeVersion == "" {
  320. // No upgrade is offered
  321. return
  322. }
  323. // Start the client upgrade downloaded after the first tunnel is established.
  324. // Concurrency note: only the runTunnels goroutine may access startClientUpgradeDownloader.
  325. if !controller.startedUpgradeDownloader {
  326. controller.startedUpgradeDownloader = true
  327. controller.runWaitGroup.Add(1)
  328. go controller.upgradeDownloader(clientUpgradeVersion)
  329. }
  330. }
  331. // runTunnels is the controller tunnel management main loop. It starts and stops
  332. // establishing tunnels based on the target tunnel pool size and the current size
  333. // of the pool. Tunnels are established asynchronously using worker goroutines.
  334. //
  335. // When there are no server entries for the target region/protocol, the
  336. // establishCandidateGenerator will yield no candidates and wait before
  337. // trying again. In the meantime, a remote server entry fetch may supply
  338. // valid candidates.
  339. //
  340. // When a tunnel is established, it's added to the active pool. The tunnel's
  341. // operateTunnel goroutine monitors the tunnel.
  342. //
  343. // When a tunnel fails, it's removed from the pool and the establish process is
  344. // restarted to fill the pool.
  345. func (controller *Controller) runTunnels() {
  346. defer controller.runWaitGroup.Done()
  347. // Start running
  348. controller.startEstablishing()
  349. loop:
  350. for {
  351. select {
  352. case failedTunnel := <-controller.failedTunnels:
  353. NoticeAlert("tunnel failed: %s", failedTunnel.serverEntry.IpAddress)
  354. controller.terminateTunnel(failedTunnel)
  355. // Note: we make this extra check to ensure the shutdown signal takes priority
  356. // and that we do not start establishing. Critically, startEstablishing() calls
  357. // establishPendingConns.Reset() which clears the closed flag in
  358. // establishPendingConns; this causes the pendingConns.Add() within
  359. // interruptibleTCPDial to succeed instead of aborting, and the result
  360. // is that it's possible for extablish goroutines to run all the way through
  361. // NewSession before being discarded... delaying shutdown.
  362. select {
  363. case <-controller.shutdownBroadcast:
  364. break loop
  365. default:
  366. }
  367. // Concurrency note: only this goroutine may call startEstablishing/stopEstablishing
  368. // and access isEstablishing.
  369. if !controller.isEstablishing {
  370. controller.startEstablishing()
  371. }
  372. // !TODO! design issue: might not be enough server entries with region/caps to ever fill tunnel slots
  373. // solution(?) target MIN(CountServerEntries(region, protocol), TunnelPoolSize)
  374. case establishedTunnel := <-controller.establishedTunnels:
  375. if controller.registerTunnel(establishedTunnel) {
  376. NoticeActiveTunnel(establishedTunnel.serverEntry.IpAddress)
  377. } else {
  378. controller.discardTunnel(establishedTunnel)
  379. }
  380. if controller.isFullyEstablished() {
  381. controller.stopEstablishing()
  382. }
  383. controller.startConnectedReporter()
  384. controller.startClientUpgradeDownloader(establishedTunnel.session.clientUpgradeVersion)
  385. case <-controller.shutdownBroadcast:
  386. break loop
  387. }
  388. }
  389. // Stop running
  390. controller.stopEstablishing()
  391. controller.terminateAllTunnels()
  392. // Drain tunnel channels
  393. close(controller.establishedTunnels)
  394. for tunnel := range controller.establishedTunnels {
  395. controller.discardTunnel(tunnel)
  396. }
  397. close(controller.failedTunnels)
  398. for tunnel := range controller.failedTunnels {
  399. controller.discardTunnel(tunnel)
  400. }
  401. NoticeInfo("exiting run tunnels")
  402. }
  403. // SignalTunnelFailure implements the TunnelOwner interface. This function
  404. // is called by Tunnel.operateTunnel when the tunnel has detected that it
  405. // has failed. The Controller will signal runTunnels to create a new
  406. // tunnel and/or remove the tunnel from the list of active tunnels.
  407. func (controller *Controller) SignalTunnelFailure(tunnel *Tunnel) {
  408. // Don't block. Assumes the receiver has a buffer large enough for
  409. // the typical number of operated tunnels. In case there's no room,
  410. // terminate the tunnel (runTunnels won't get a signal in this case,
  411. // but the tunnel will be removed from the list of active tunnels).
  412. select {
  413. case controller.failedTunnels <- tunnel:
  414. default:
  415. controller.terminateTunnel(tunnel)
  416. }
  417. }
  418. // discardTunnel disposes of a successful connection that is no longer required.
  419. func (controller *Controller) discardTunnel(tunnel *Tunnel) {
  420. NoticeInfo("discard tunnel: %s", tunnel.serverEntry.IpAddress)
  421. // TODO: not calling PromoteServerEntry, since that would rank the
  422. // discarded tunnel before fully active tunnels. Can a discarded tunnel
  423. // be promoted (since it connects), but with lower rank than all active
  424. // tunnels?
  425. tunnel.Close()
  426. }
  427. // registerTunnel adds the connected tunnel to the pool of active tunnels
  428. // which are candidates for port forwarding. Returns true if the pool has an
  429. // empty slot and false if the pool is full (caller should discard the tunnel).
  430. func (controller *Controller) registerTunnel(tunnel *Tunnel) bool {
  431. controller.tunnelMutex.Lock()
  432. defer controller.tunnelMutex.Unlock()
  433. if len(controller.tunnels) >= controller.config.TunnelPoolSize {
  434. return false
  435. }
  436. // Perform a final check just in case we've established
  437. // a duplicate connection.
  438. for _, activeTunnel := range controller.tunnels {
  439. if activeTunnel.serverEntry.IpAddress == tunnel.serverEntry.IpAddress {
  440. NoticeAlert("duplicate tunnel: %s", tunnel.serverEntry.IpAddress)
  441. return false
  442. }
  443. }
  444. controller.establishedOnce = true
  445. controller.tunnels = append(controller.tunnels, tunnel)
  446. NoticeTunnels(len(controller.tunnels))
  447. // The split tunnel classifier is started once the first tunnel is
  448. // established. This first tunnel is passed in to be used to make
  449. // the routes data request.
  450. // A long-running controller may run while the host device is present
  451. // in different regions. In this case, we want the split tunnel logic
  452. // to switch to routes for new regions and not classify traffic based
  453. // on routes installed for older regions.
  454. // We assume that when regions change, the host network will also
  455. // change, and so all tunnels will fail and be re-established. Under
  456. // that assumption, the classifier will be re-Start()-ed here when
  457. // the region has changed.
  458. if len(controller.tunnels) == 1 {
  459. controller.splitTunnelClassifier.Start(tunnel)
  460. }
  461. return true
  462. }
  463. // hasEstablishedOnce indicates if at least one active tunnel has
  464. // been established up to this point. This is regardeless of how many
  465. // tunnels are presently active.
  466. func (controller *Controller) hasEstablishedOnce() bool {
  467. controller.tunnelMutex.Lock()
  468. defer controller.tunnelMutex.Unlock()
  469. return controller.establishedOnce
  470. }
  471. // isFullyEstablished indicates if the pool of active tunnels is full.
  472. func (controller *Controller) isFullyEstablished() bool {
  473. controller.tunnelMutex.Lock()
  474. defer controller.tunnelMutex.Unlock()
  475. return len(controller.tunnels) >= controller.config.TunnelPoolSize
  476. }
  477. // terminateTunnel removes a tunnel from the pool of active tunnels
  478. // and closes the tunnel. The next-tunnel state used by getNextActiveTunnel
  479. // is adjusted as required.
  480. func (controller *Controller) terminateTunnel(tunnel *Tunnel) {
  481. controller.tunnelMutex.Lock()
  482. defer controller.tunnelMutex.Unlock()
  483. for index, activeTunnel := range controller.tunnels {
  484. if tunnel == activeTunnel {
  485. controller.tunnels = append(
  486. controller.tunnels[:index], controller.tunnels[index+1:]...)
  487. if controller.nextTunnel > index {
  488. controller.nextTunnel--
  489. }
  490. if controller.nextTunnel >= len(controller.tunnels) {
  491. controller.nextTunnel = 0
  492. }
  493. activeTunnel.Close()
  494. NoticeTunnels(len(controller.tunnels))
  495. break
  496. }
  497. }
  498. }
  499. // terminateAllTunnels empties the tunnel pool, closing all active tunnels.
  500. // This is used when shutting down the controller.
  501. func (controller *Controller) terminateAllTunnels() {
  502. controller.tunnelMutex.Lock()
  503. defer controller.tunnelMutex.Unlock()
  504. // Closing all tunnels in parallel. In an orderly shutdown, each tunnel
  505. // may take a few seconds to send a final status request. We only want
  506. // to wait as long as the single slowest tunnel.
  507. closeWaitGroup := new(sync.WaitGroup)
  508. closeWaitGroup.Add(len(controller.tunnels))
  509. for _, activeTunnel := range controller.tunnels {
  510. tunnel := activeTunnel
  511. go func() {
  512. defer closeWaitGroup.Done()
  513. tunnel.Close()
  514. }()
  515. }
  516. closeWaitGroup.Wait()
  517. controller.tunnels = make([]*Tunnel, 0)
  518. controller.nextTunnel = 0
  519. NoticeTunnels(len(controller.tunnels))
  520. }
  521. // getNextActiveTunnel returns the next tunnel from the pool of active
  522. // tunnels. Currently, tunnel selection order is simple round-robin.
  523. func (controller *Controller) getNextActiveTunnel() (tunnel *Tunnel) {
  524. controller.tunnelMutex.Lock()
  525. defer controller.tunnelMutex.Unlock()
  526. for i := len(controller.tunnels); i > 0; i-- {
  527. tunnel = controller.tunnels[controller.nextTunnel]
  528. controller.nextTunnel =
  529. (controller.nextTunnel + 1) % len(controller.tunnels)
  530. return tunnel
  531. }
  532. return nil
  533. }
  534. // isActiveTunnelServerEntry is used to check if there's already
  535. // an existing tunnel to a candidate server.
  536. func (controller *Controller) isActiveTunnelServerEntry(serverEntry *ServerEntry) bool {
  537. controller.tunnelMutex.Lock()
  538. defer controller.tunnelMutex.Unlock()
  539. for _, activeTunnel := range controller.tunnels {
  540. if activeTunnel.serverEntry.IpAddress == serverEntry.IpAddress {
  541. return true
  542. }
  543. }
  544. return false
  545. }
  546. // Dial selects an active tunnel and establishes a port forward
  547. // connection through the selected tunnel. Failure to connect is considered
  548. // a port foward failure, for the purpose of monitoring tunnel health.
  549. func (controller *Controller) Dial(
  550. remoteAddr string, alwaysTunnel bool, downstreamConn net.Conn) (conn net.Conn, err error) {
  551. tunnel := controller.getNextActiveTunnel()
  552. if tunnel == nil {
  553. return nil, ContextError(errors.New("no active tunnels"))
  554. }
  555. // Perform split tunnel classification when feature is enabled, and if the remote
  556. // address is classified as untunneled, dial directly.
  557. if !alwaysTunnel && controller.config.SplitTunnelDnsServer != "" {
  558. host, _, err := net.SplitHostPort(remoteAddr)
  559. if err != nil {
  560. return nil, ContextError(err)
  561. }
  562. // Note: a possible optimization, when split tunnel is active and IsUntunneled performs
  563. // a DNS resolution in order to make its classification, is to reuse that IP address in
  564. // the following Dials so they do not need to make their own resolutions. However, the
  565. // way this is currently implemented ensures that, e.g., DNS geo load balancing occurs
  566. // relative to the outbound network.
  567. if controller.splitTunnelClassifier.IsUntunneled(host) {
  568. // !TODO! track downstreamConn and close it when the DialTCP conn closes, as with tunnel.Dial conns?
  569. return DialTCP(remoteAddr, controller.untunneledDialConfig)
  570. }
  571. }
  572. tunneledConn, err := tunnel.Dial(remoteAddr, alwaysTunnel, downstreamConn)
  573. if err != nil {
  574. return nil, ContextError(err)
  575. }
  576. return tunneledConn, nil
  577. }
  578. // startEstablishing creates a pool of worker goroutines which will
  579. // attempt to establish tunnels to candidate servers. The candidates
  580. // are generated by another goroutine.
  581. func (controller *Controller) startEstablishing() {
  582. if controller.isEstablishing {
  583. return
  584. }
  585. NoticeInfo("start establishing")
  586. controller.isEstablishing = true
  587. controller.establishWaitGroup = new(sync.WaitGroup)
  588. controller.stopEstablishingBroadcast = make(chan struct{})
  589. controller.candidateServerEntries = make(chan *ServerEntry)
  590. controller.establishPendingConns.Reset()
  591. for i := 0; i < controller.config.ConnectionWorkerPoolSize; i++ {
  592. controller.establishWaitGroup.Add(1)
  593. go controller.establishTunnelWorker()
  594. }
  595. controller.establishWaitGroup.Add(1)
  596. go controller.establishCandidateGenerator()
  597. }
  598. // stopEstablishing signals the establish goroutines to stop and waits
  599. // for the group to halt. pendingConns is used to interrupt any worker
  600. // blocked on a socket connect.
  601. func (controller *Controller) stopEstablishing() {
  602. if !controller.isEstablishing {
  603. return
  604. }
  605. NoticeInfo("stop establishing")
  606. close(controller.stopEstablishingBroadcast)
  607. // Note: on Windows, interruptibleTCPClose doesn't really interrupt socket connects
  608. // and may leave goroutines running for a time after the Wait call.
  609. controller.establishPendingConns.CloseAll()
  610. // Note: establishCandidateGenerator closes controller.candidateServerEntries
  611. // (as it may be sending to that channel).
  612. controller.establishWaitGroup.Wait()
  613. controller.isEstablishing = false
  614. controller.establishWaitGroup = nil
  615. controller.stopEstablishingBroadcast = nil
  616. controller.candidateServerEntries = nil
  617. }
  618. // establishCandidateGenerator populates the candidate queue with server entries
  619. // from the data store. Server entries are iterated in rank order, so that promoted
  620. // servers with higher rank are priority candidates.
  621. func (controller *Controller) establishCandidateGenerator() {
  622. defer controller.establishWaitGroup.Done()
  623. defer close(controller.candidateServerEntries)
  624. iterator, err := NewServerEntryIterator(controller.config)
  625. if err != nil {
  626. NoticeAlert("failed to iterate over candidates: %s", err)
  627. controller.SignalComponentFailure()
  628. return
  629. }
  630. defer iterator.Close()
  631. loop:
  632. // Repeat until stopped
  633. for {
  634. // Send each iterator server entry to the establish workers
  635. startTime := time.Now()
  636. for {
  637. serverEntry, err := iterator.Next()
  638. if err != nil {
  639. NoticeAlert("failed to get next candidate: %s", err)
  640. controller.SignalComponentFailure()
  641. break loop
  642. }
  643. if serverEntry == nil {
  644. // Completed this iteration
  645. break
  646. }
  647. // TODO: here we could generate multiple candidates from the
  648. // server entry when there are many MeekFrontingAddresses.
  649. select {
  650. case controller.candidateServerEntries <- serverEntry:
  651. case <-controller.stopEstablishingBroadcast:
  652. break loop
  653. case <-controller.shutdownBroadcast:
  654. break loop
  655. }
  656. if time.Now().After(startTime.Add(ESTABLISH_TUNNEL_WORK_TIME_SECONDS)) {
  657. // Start over, after a brief pause, with a new shuffle of the server
  658. // entries, and potentially some newly fetched server entries.
  659. break
  660. }
  661. }
  662. // Free up resources now, but don't reset until after the pause.
  663. iterator.Close()
  664. // Trigger a fetch remote server list, since we may have failed to
  665. // connect with all known servers. Don't block sending signal, since
  666. // this signal may have already been sent.
  667. // Don't wait for fetch remote to succeed, since it may fail and
  668. // enter a retry loop and we're better off trying more known servers.
  669. // TODO: synchronize the fetch response, so it can be incorporated
  670. // into the server entry iterator as soon as available.
  671. select {
  672. case controller.signalFetchRemoteServerList <- *new(struct{}):
  673. default:
  674. }
  675. // After a complete iteration of candidate servers, pause before iterating again.
  676. // This helps avoid some busy wait loop conditions, and also allows some time for
  677. // network conditions to change. Also allows for fetch remote to complete,
  678. // in typical conditions (it isn't strictly necessary to wait for this, there will
  679. // be more rounds if required).
  680. timeout := time.After(ESTABLISH_TUNNEL_PAUSE_PERIOD)
  681. select {
  682. case <-timeout:
  683. // Retry iterating
  684. case <-controller.stopEstablishingBroadcast:
  685. break loop
  686. case <-controller.shutdownBroadcast:
  687. break loop
  688. }
  689. iterator.Reset()
  690. }
  691. NoticeInfo("stopped candidate generator")
  692. }
  693. // establishTunnelWorker pulls candidates from the candidate queue, establishes
  694. // a connection to the tunnel server, and delivers the established tunnel to a channel.
  695. func (controller *Controller) establishTunnelWorker() {
  696. defer controller.establishWaitGroup.Done()
  697. loop:
  698. for serverEntry := range controller.candidateServerEntries {
  699. // Note: don't receive from candidateServerEntries and stopEstablishingBroadcast
  700. // in the same select, since we want to prioritize receiving the stop signal
  701. if controller.isStopEstablishingBroadcast() {
  702. break loop
  703. }
  704. // There may already be a tunnel to this candidate. If so, skip it.
  705. if controller.isActiveTunnelServerEntry(serverEntry) {
  706. continue
  707. }
  708. if !WaitForNetworkConnectivity(
  709. controller.config.NetworkConnectivityChecker,
  710. controller.stopEstablishingBroadcast) {
  711. break loop
  712. }
  713. tunnel, err := EstablishTunnel(
  714. controller.config,
  715. controller.sessionId,
  716. controller.establishPendingConns,
  717. serverEntry,
  718. controller) // TunnelOwner
  719. if err != nil {
  720. // Before emitting error, check if establish interrupted, in which
  721. // case the error is noise.
  722. if controller.isStopEstablishingBroadcast() {
  723. break loop
  724. }
  725. NoticeInfo("failed to connect to %s: %s", serverEntry.IpAddress, err)
  726. continue
  727. }
  728. // Deliver established tunnel.
  729. // Don't block. Assumes the receiver has a buffer large enough for
  730. // the number of desired tunnels. If there's no room, the tunnel must
  731. // not be required so it's discarded.
  732. select {
  733. case controller.establishedTunnels <- tunnel:
  734. default:
  735. controller.discardTunnel(tunnel)
  736. }
  737. }
  738. NoticeInfo("stopped establish worker")
  739. }
  740. func (controller *Controller) isStopEstablishingBroadcast() bool {
  741. select {
  742. case <-controller.stopEstablishingBroadcast:
  743. return true
  744. default:
  745. }
  746. return false
  747. }