controller.go 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643
  1. /*
  2. * Copyright (c) 2014, Psiphon Inc.
  3. * All rights reserved.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. */
  19. // Package psiphon implements the core tunnel functionality of a Psiphon client.
  20. // The main function is RunForever, which runs a Controller that obtains lists of
  21. // servers, establishes tunnel connections, and runs local proxies through which
  22. // tunneled traffic may be sent.
  23. package psiphon
  24. import (
  25. "errors"
  26. "fmt"
  27. "io"
  28. "net"
  29. "sync"
  30. "time"
  31. )
  32. // Controller is a tunnel lifecycle coordinator. It manages lists of servers to
  33. // connect to; establishes and monitors tunnels; and runs local proxies which
  34. // route traffic through the tunnels.
  35. type Controller struct {
  36. config *Config
  37. failureSignal chan struct{}
  38. shutdownBroadcast chan struct{}
  39. runWaitGroup *sync.WaitGroup
  40. establishedTunnels chan *Tunnel
  41. failedTunnels chan *Tunnel
  42. tunnelMutex sync.Mutex
  43. tunnels []*Tunnel
  44. nextTunnel int
  45. operateWaitGroup *sync.WaitGroup
  46. isEstablishing bool
  47. establishWaitGroup *sync.WaitGroup
  48. stopEstablishingBroadcast chan struct{}
  49. candidateServerEntries chan *ServerEntry
  50. pendingConns *Conns
  51. }
  52. // NewController initializes a new controller.
  53. func NewController(config *Config) (controller *Controller) {
  54. return &Controller{
  55. config: config,
  56. // failureSignal receives a signal from a component (including socks and
  57. // http local proxies) if they unexpectedly fail. Senders should not block.
  58. // A buffer allows at least one stop signal to be sent before there is a receiver.
  59. failureSignal: make(chan struct{}, 1),
  60. shutdownBroadcast: make(chan struct{}),
  61. runWaitGroup: new(sync.WaitGroup),
  62. // establishedTunnels and failedTunnels buffer sizes are large enough to
  63. // receive full pools of tunnels without blocking. Senders should not block.
  64. establishedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
  65. failedTunnels: make(chan *Tunnel, config.TunnelPoolSize),
  66. tunnels: make([]*Tunnel, 0),
  67. operateWaitGroup: new(sync.WaitGroup),
  68. isEstablishing: false,
  69. pendingConns: new(Conns),
  70. }
  71. }
  72. // Run executes the controller. It launches components and then monitors
  73. // for a shutdown signal; after receiving the signal it shuts down the
  74. // controller.
  75. // The components include:
  76. // - the periodic remote server list fetcher
  77. // - the tunnel manager
  78. // - a local SOCKS proxy that port forwards through the pool of tunnels
  79. // - a local HTTP proxy that port forwards through the pool of tunnels
  80. func (controller *Controller) Run(shutdownBroadcast <-chan struct{}) {
  81. Notice(NOTICE_VERSION, VERSION)
  82. Stats_Start()
  83. defer Stats_Stop()
  84. socksProxy, err := NewSocksProxy(controller.config, controller)
  85. if err != nil {
  86. Notice(NOTICE_ALERT, "error initializing local SOCKS proxy: %s", err)
  87. return
  88. }
  89. defer socksProxy.Close()
  90. httpProxy, err := NewHttpProxy(controller.config, controller)
  91. if err != nil {
  92. Notice(NOTICE_ALERT, "error initializing local SOCKS proxy: %s", err)
  93. return
  94. }
  95. defer httpProxy.Close()
  96. controller.runWaitGroup.Add(2)
  97. go controller.remoteServerListFetcher()
  98. go controller.runTunnels()
  99. select {
  100. case <-shutdownBroadcast:
  101. Notice(NOTICE_INFO, "controller shutdown by request")
  102. case <-controller.failureSignal:
  103. Notice(NOTICE_ALERT, "controller shutdown due to failure")
  104. }
  105. // Note: in addition to establish(), this pendingConns will interrupt
  106. // FetchRemoteServerList
  107. controller.pendingConns.CloseAll()
  108. close(controller.shutdownBroadcast)
  109. controller.runWaitGroup.Wait()
  110. Notice(NOTICE_INFO, "exiting controller")
  111. }
  112. // SignalFailure notifies the controller that an associated component has failed.
  113. // This will terminate the controller.
  114. func (controller *Controller) SignalFailure() {
  115. select {
  116. case controller.failureSignal <- *new(struct{}):
  117. default:
  118. }
  119. }
  120. // remoteServerListFetcher fetches an out-of-band list of server entries
  121. // for more tunnel candidates. It fetches immediately, retries after failure
  122. // with a wait period, and refetches after success with a longer wait period.
  123. func (controller *Controller) remoteServerListFetcher() {
  124. defer controller.runWaitGroup.Done()
  125. // Note: unlike existing Psiphon clients, this code
  126. // always makes the fetch remote server list request
  127. loop:
  128. for {
  129. // TODO: FetchRemoteServerList should have its own pendingConns,
  130. // otherwise it may needlessly abort when establish is stopped.
  131. err := FetchRemoteServerList(controller.config, controller.pendingConns)
  132. var duration time.Duration
  133. if err != nil {
  134. Notice(NOTICE_ALERT, "failed to fetch remote server list: %s", err)
  135. duration = FETCH_REMOTE_SERVER_LIST_RETRY_TIMEOUT
  136. } else {
  137. duration = FETCH_REMOTE_SERVER_LIST_STALE_TIMEOUT
  138. }
  139. timeout := time.After(duration)
  140. select {
  141. case <-timeout:
  142. // Fetch again
  143. case <-controller.shutdownBroadcast:
  144. break loop
  145. }
  146. }
  147. Notice(NOTICE_INFO, "exiting remote server list fetcher")
  148. }
  149. // runTunnels is the controller tunnel management main loop. It starts and stops
  150. // establishing tunnels based on the target tunnel pool size and the current size
  151. // of the pool. Tunnels are established asynchronously using worker goroutines.
  152. // When a tunnel is established, it's added to the active pool and a corresponding
  153. // operateTunnel goroutine is launched which starts a session in the tunnel and
  154. // monitors the tunnel for failures.
  155. // When a tunnel fails, it's removed from the pool and the establish process is
  156. // restarted to fill the pool.
  157. func (controller *Controller) runTunnels() {
  158. defer controller.runWaitGroup.Done()
  159. // Don't start establishing until there are some server candidates. The
  160. // typical case is a client with no server entries which will wait for
  161. // the first successful FetchRemoteServerList to populate the data store.
  162. for {
  163. if HasServerEntries(
  164. controller.config.EgressRegion, controller.config.TunnelProtocol) {
  165. break
  166. }
  167. // TODO: replace polling with signal
  168. timeout := time.After(5 * time.Second)
  169. select {
  170. case <-timeout:
  171. case <-controller.shutdownBroadcast:
  172. return
  173. }
  174. }
  175. controller.startEstablishing()
  176. loop:
  177. for {
  178. select {
  179. case failedTunnel := <-controller.failedTunnels:
  180. Notice(NOTICE_ALERT, "tunnel failed: %s", failedTunnel.serverEntry.IpAddress)
  181. controller.terminateTunnel(failedTunnel)
  182. // Note: only this goroutine may call startEstablishing/stopEstablishing and access
  183. // isEstablishing.
  184. if !controller.isEstablishing {
  185. controller.startEstablishing()
  186. }
  187. // !TODO! design issue: might not be enough server entries with region/caps to ever fill tunnel slots
  188. // solution(?) target MIN(CountServerEntries(region, protocol), TunnelPoolSize)
  189. case establishedTunnel := <-controller.establishedTunnels:
  190. Notice(NOTICE_INFO, "established tunnel: %s", establishedTunnel.serverEntry.IpAddress)
  191. if controller.registerTunnel(establishedTunnel) {
  192. Notice(NOTICE_INFO, "active tunnel: %s", establishedTunnel.serverEntry.IpAddress)
  193. controller.operateWaitGroup.Add(1)
  194. go controller.operateTunnel(establishedTunnel)
  195. } else {
  196. controller.discardTunnel(establishedTunnel)
  197. }
  198. if controller.isFullyEstablished() {
  199. controller.stopEstablishing()
  200. }
  201. case <-controller.shutdownBroadcast:
  202. break loop
  203. }
  204. }
  205. controller.stopEstablishing()
  206. controller.terminateAllTunnels()
  207. controller.operateWaitGroup.Wait()
  208. // Drain tunnel channels
  209. close(controller.establishedTunnels)
  210. for tunnel := range controller.establishedTunnels {
  211. controller.discardTunnel(tunnel)
  212. }
  213. close(controller.failedTunnels)
  214. for tunnel := range controller.failedTunnels {
  215. controller.discardTunnel(tunnel)
  216. }
  217. Notice(NOTICE_INFO, "exiting run tunnels")
  218. }
  219. // discardTunnel disposes of a successful connection that is no longer required.
  220. func (controller *Controller) discardTunnel(tunnel *Tunnel) {
  221. Notice(NOTICE_INFO, "discard tunnel: %s", tunnel.serverEntry.IpAddress)
  222. // TODO: not calling PromoteServerEntry, since that would rank the
  223. // discarded tunnel before fully active tunnels. Can a discarded tunnel
  224. // be promoted (since it connects), but with lower rank than all active
  225. // tunnels?
  226. tunnel.Close()
  227. }
  228. // registerTunnel adds the connected tunnel to the pool of active tunnels
  229. // which are candidates for port forwarding. Returns true if the pool has an
  230. // empty slot and false if the pool is full (caller should discard the tunnel).
  231. func (controller *Controller) registerTunnel(tunnel *Tunnel) bool {
  232. controller.tunnelMutex.Lock()
  233. defer controller.tunnelMutex.Unlock()
  234. if len(controller.tunnels) >= controller.config.TunnelPoolSize {
  235. return false
  236. }
  237. // Perform a final check just in case we've established
  238. // a duplicate connection.
  239. for _, activeTunnel := range controller.tunnels {
  240. if activeTunnel.serverEntry.IpAddress == tunnel.serverEntry.IpAddress {
  241. Notice(NOTICE_ALERT, "duplicate tunnel: %s", tunnel.serverEntry.IpAddress)
  242. return false
  243. }
  244. }
  245. controller.tunnels = append(controller.tunnels, tunnel)
  246. Notice(NOTICE_TUNNELS, "%d", len(controller.tunnels))
  247. return true
  248. }
  249. // isFullyEstablished indicates if the pool of active tunnels is full.
  250. func (controller *Controller) isFullyEstablished() bool {
  251. controller.tunnelMutex.Lock()
  252. defer controller.tunnelMutex.Unlock()
  253. return len(controller.tunnels) >= controller.config.TunnelPoolSize
  254. }
  255. // terminateTunnel removes a tunnel from the pool of active tunnels
  256. // and closes the tunnel. The next-tunnel state used by getNextActiveTunnel
  257. // is adjusted as required.
  258. func (controller *Controller) terminateTunnel(tunnel *Tunnel) {
  259. controller.tunnelMutex.Lock()
  260. defer controller.tunnelMutex.Unlock()
  261. for index, activeTunnel := range controller.tunnels {
  262. if tunnel == activeTunnel {
  263. controller.tunnels = append(
  264. controller.tunnels[:index], controller.tunnels[index+1:]...)
  265. if controller.nextTunnel > index {
  266. controller.nextTunnel--
  267. }
  268. if controller.nextTunnel >= len(controller.tunnels) {
  269. controller.nextTunnel = 0
  270. }
  271. activeTunnel.Close()
  272. Notice(NOTICE_TUNNELS, "%d", len(controller.tunnels))
  273. break
  274. }
  275. }
  276. }
  277. // terminateAllTunnels empties the tunnel pool, closing all active tunnels.
  278. // This is used when shutting down the controller.
  279. func (controller *Controller) terminateAllTunnels() {
  280. controller.tunnelMutex.Lock()
  281. defer controller.tunnelMutex.Unlock()
  282. for _, activeTunnel := range controller.tunnels {
  283. activeTunnel.Close()
  284. }
  285. controller.tunnels = make([]*Tunnel, 0)
  286. controller.nextTunnel = 0
  287. Notice(NOTICE_TUNNELS, "%d", len(controller.tunnels))
  288. }
  289. // getNextActiveTunnel returns the next tunnel from the pool of active
  290. // tunnels. Currently, tunnel selection order is simple round-robin.
  291. func (controller *Controller) getNextActiveTunnel() (tunnel *Tunnel) {
  292. controller.tunnelMutex.Lock()
  293. defer controller.tunnelMutex.Unlock()
  294. for i := len(controller.tunnels); i > 0; i-- {
  295. tunnel = controller.tunnels[controller.nextTunnel]
  296. controller.nextTunnel =
  297. (controller.nextTunnel + 1) % len(controller.tunnels)
  298. // A tunnel must[*] have started its session (performed the server
  299. // API handshake sequence) before it may be used for tunneling traffic
  300. // [*]currently not enforced by the server, but may be in the future.
  301. if tunnel.IsSessionStarted() {
  302. return tunnel
  303. }
  304. }
  305. return nil
  306. }
  307. // isActiveTunnelServerEntries is used to check if there's already
  308. // an existing tunnel to a candidate server.
  309. func (controller *Controller) isActiveTunnelServerEntry(serverEntry *ServerEntry) bool {
  310. controller.tunnelMutex.Lock()
  311. defer controller.tunnelMutex.Unlock()
  312. for _, activeTunnel := range controller.tunnels {
  313. if activeTunnel.serverEntry.IpAddress == serverEntry.IpAddress {
  314. return true
  315. }
  316. }
  317. return false
  318. }
  319. // operateTunnel starts a Psiphon session (handshake, etc.) on a newly
  320. // connected tunnel, and then monitors the tunnel for failures:
  321. //
  322. // 1. Overall tunnel failure: the tunnel sends a signal to the ClosedSignal
  323. // channel on keep-alive failure and other transport I/O errors. In case
  324. // of such a failure, the tunnel is marked as failed.
  325. //
  326. // 2. Tunnel port forward failures: the tunnel connection may stay up but
  327. // the client may still fail to establish port forwards due to server load
  328. // and other conditions. After a threshold number of such failures, the
  329. // overall tunnel is marked as failed.
  330. //
  331. // TODO: currently, any connect (dial), read, or write error associated with
  332. // a port forward is counted as a failure. It may be important to differentiate
  333. // between failures due to Psiphon server conditions and failures due to the
  334. // origin/target server (in the latter case, the tunnel is healthy). Here are
  335. // some typical error messages to consider matching against (or ignoring):
  336. //
  337. // - "ssh: rejected: administratively prohibited (open failed)"
  338. // - "ssh: rejected: connect failed (Connection timed out)"
  339. // - "write tcp ... broken pipe"
  340. // - "read tcp ... connection reset by peer"
  341. // - "ssh: unexpected packet in response to channel open: <nil>"
  342. //
  343. func (controller *Controller) operateTunnel(tunnel *Tunnel) {
  344. defer controller.operateWaitGroup.Done()
  345. tunnelClosedSignal := make(chan struct{}, 1)
  346. err := tunnel.conn.SetClosedSignal(tunnelClosedSignal)
  347. if err != nil {
  348. err = fmt.Errorf("failed to set closed signal: %s", err)
  349. }
  350. Notice(NOTICE_INFO, "starting session for %s", tunnel.serverEntry.IpAddress)
  351. // TODO: NewSession server API calls may block shutdown
  352. session, err := NewSession(controller.config, tunnel)
  353. if err != nil {
  354. err = fmt.Errorf("error starting session for %s: %s", tunnel.serverEntry.IpAddress, err)
  355. }
  356. // Tunnel may now be used for port forwarding
  357. tunnel.SetSessionStarted()
  358. // Promote this successful tunnel to first rank so it's one
  359. // of the first candidates next time establish runs.
  360. PromoteServerEntry(tunnel.serverEntry.IpAddress)
  361. statsTimer := time.NewTimer(NextSendPeriod())
  362. for err == nil {
  363. select {
  364. case failures := <-tunnel.portForwardFailures:
  365. tunnel.portForwardFailureTotal += failures
  366. Notice(
  367. NOTICE_INFO, "port forward failures for %s: %d",
  368. tunnel.serverEntry.IpAddress, tunnel.portForwardFailureTotal)
  369. if tunnel.portForwardFailureTotal > controller.config.PortForwardFailureThreshold {
  370. err = errors.New("tunnel exceeded port forward failure threshold")
  371. }
  372. case <-tunnelClosedSignal:
  373. // TODO: this signal can be received during a commanded shutdown due to
  374. // how tunnels are closed; should rework this to avoid log noise.
  375. err = errors.New("tunnel closed unexpectedly")
  376. case <-controller.shutdownBroadcast:
  377. // Send final stats
  378. sendStats(tunnel, session, true)
  379. Notice(NOTICE_INFO, "shutdown operate tunnel")
  380. return
  381. case <-statsTimer.C:
  382. sendStats(tunnel, session, false)
  383. statsTimer.Reset(NextSendPeriod())
  384. }
  385. }
  386. if err != nil {
  387. Notice(NOTICE_ALERT, "operate tunnel error for %s: %s", tunnel.serverEntry.IpAddress, err)
  388. // Don't block. Assumes the receiver has a buffer large enough for
  389. // the typical number of operated tunnels. In case there's no room,
  390. // terminate the tunnel (runTunnels won't get a signal in this case).
  391. select {
  392. case controller.failedTunnels <- tunnel:
  393. default:
  394. controller.terminateTunnel(tunnel)
  395. }
  396. }
  397. }
  398. // sendStats is a helper for sending session stats to the server.
  399. func sendStats(tunnel *Tunnel, session *Session, final bool) {
  400. payload := GetForServer(tunnel.serverEntry.IpAddress)
  401. if payload != nil {
  402. err := session.DoStatusRequest(payload, final)
  403. if err != nil {
  404. Notice(NOTICE_ALERT, "DoStatusRequest failed for %s: %s", tunnel.serverEntry.IpAddress, err)
  405. PutBack(tunnel.serverEntry.IpAddress, payload)
  406. }
  407. }
  408. }
  409. // TunneledConn implements net.Conn and wraps a port foward connection.
  410. // It is used to hook into Read and Write to observe I/O errors and
  411. // report these errors back to the tunnel monitor as port forward failures.
  412. type TunneledConn struct {
  413. net.Conn
  414. tunnel *Tunnel
  415. }
  416. func (conn *TunneledConn) Read(buffer []byte) (n int, err error) {
  417. n, err = conn.Conn.Read(buffer)
  418. if err != nil && err != io.EOF {
  419. // Report 1 new failure. Won't block; assumes the receiver
  420. // has a sufficient buffer for the threshold number of reports.
  421. // TODO: conditional on type of error or error message?
  422. select {
  423. case conn.tunnel.portForwardFailures <- 1:
  424. default:
  425. }
  426. }
  427. return
  428. }
  429. func (conn *TunneledConn) Write(buffer []byte) (n int, err error) {
  430. n, err = conn.Conn.Write(buffer)
  431. if err != nil && err != io.EOF {
  432. // Same as TunneledConn.Read()
  433. select {
  434. case conn.tunnel.portForwardFailures <- 1:
  435. default:
  436. }
  437. }
  438. return
  439. }
  440. // Dial selects an active tunnel and establishes a port forward
  441. // connection through the selected tunnel. Failure to connect is considered
  442. // a port foward failure, for the purpose of monitoring tunnel health.
  443. func (controller *Controller) Dial(remoteAddr string) (conn net.Conn, err error) {
  444. tunnel := controller.getNextActiveTunnel()
  445. if tunnel == nil {
  446. return nil, ContextError(errors.New("no active tunnels"))
  447. }
  448. tunnelConn, err := tunnel.Dial(remoteAddr)
  449. if err != nil {
  450. // TODO: conditional on type of error or error message?
  451. select {
  452. case tunnel.portForwardFailures <- 1:
  453. default:
  454. }
  455. return nil, ContextError(err)
  456. }
  457. statsConn := NewStatsConn(tunnelConn, tunnel.ServerID(), tunnel.StatsRegexps())
  458. conn = &TunneledConn{
  459. Conn: statsConn,
  460. tunnel: tunnel}
  461. return
  462. }
  463. // startEstablishing creates a pool of worker goroutines which will
  464. // attempt to establish tunnels to candidate servers. The candidates
  465. // are generated by another goroutine.
  466. func (controller *Controller) startEstablishing() {
  467. if controller.isEstablishing {
  468. return
  469. }
  470. Notice(NOTICE_INFO, "start establishing")
  471. controller.isEstablishing = true
  472. controller.establishWaitGroup = new(sync.WaitGroup)
  473. controller.stopEstablishingBroadcast = make(chan struct{})
  474. controller.candidateServerEntries = make(chan *ServerEntry)
  475. for i := 0; i < controller.config.ConnectionWorkerPoolSize; i++ {
  476. controller.establishWaitGroup.Add(1)
  477. go controller.establishTunnelWorker()
  478. }
  479. controller.establishWaitGroup.Add(1)
  480. go controller.establishCandidateGenerator()
  481. }
  482. // stopEstablishing signals the establish goroutines to stop and waits
  483. // for the group to halt. pendingConns is used to interrupt any worker
  484. // blocked on a socket connect.
  485. func (controller *Controller) stopEstablishing() {
  486. if !controller.isEstablishing {
  487. return
  488. }
  489. Notice(NOTICE_INFO, "stop establishing")
  490. // Note: on Windows, interruptibleTCPClose doesn't really interrupt socket connects
  491. // and may leave goroutines running for a time after the Wait call.
  492. controller.pendingConns.CloseAll()
  493. close(controller.stopEstablishingBroadcast)
  494. // Note: establishCandidateGenerator closes controller.candidateServerEntries
  495. // (as it may be sending to that channel).
  496. controller.establishWaitGroup.Wait()
  497. controller.isEstablishing = false
  498. controller.establishWaitGroup = nil
  499. controller.stopEstablishingBroadcast = nil
  500. controller.candidateServerEntries = nil
  501. }
  502. // establishCandidateGenerator populates the candidate queue with server entries
  503. // from the data store. Server entries are iterated in rank order, so that promoted
  504. // servers with higher rank are priority candidates.
  505. func (controller *Controller) establishCandidateGenerator() {
  506. defer controller.establishWaitGroup.Done()
  507. iterator, err := NewServerEntryIterator(
  508. controller.config.EgressRegion, controller.config.TunnelProtocol)
  509. if err != nil {
  510. Notice(NOTICE_ALERT, "failed to iterate over candidates: %s", err)
  511. controller.SignalFailure()
  512. return
  513. }
  514. defer iterator.Close()
  515. loop:
  516. for {
  517. for {
  518. serverEntry, err := iterator.Next()
  519. if err != nil {
  520. Notice(NOTICE_ALERT, "failed to get next candidate: %s", err)
  521. controller.SignalFailure()
  522. break loop
  523. }
  524. if serverEntry == nil {
  525. // Completed this iteration
  526. break
  527. }
  528. select {
  529. case controller.candidateServerEntries <- serverEntry:
  530. case <-controller.stopEstablishingBroadcast:
  531. break loop
  532. case <-controller.shutdownBroadcast:
  533. break loop
  534. }
  535. }
  536. iterator.Reset()
  537. // After a complete iteration of candidate servers, pause before iterating again.
  538. // This helps avoid some busy wait loop conditions, and also allows some time for
  539. // network conditions to change.
  540. timeout := time.After(ESTABLISH_TUNNEL_PAUSE_PERIOD)
  541. select {
  542. case <-timeout:
  543. // Retry iterating
  544. case <-controller.stopEstablishingBroadcast:
  545. break loop
  546. case <-controller.shutdownBroadcast:
  547. break loop
  548. }
  549. }
  550. close(controller.candidateServerEntries)
  551. Notice(NOTICE_INFO, "stopped candidate generator")
  552. }
  553. // establishTunnelWorker pulls candidates from the candidate queue, establishes
  554. // a connection to the tunnel server, and delivers the established tunnel to a channel.
  555. func (controller *Controller) establishTunnelWorker() {
  556. defer controller.establishWaitGroup.Done()
  557. for serverEntry := range controller.candidateServerEntries {
  558. // Note: don't receive from candidateQueue and broadcastStopWorkers in the same
  559. // select, since we want to prioritize receiving the stop signal
  560. select {
  561. case <-controller.stopEstablishingBroadcast:
  562. return
  563. default:
  564. }
  565. // There may already be a tunnel to this candidate. If so, skip it.
  566. if controller.isActiveTunnelServerEntry(serverEntry) {
  567. continue
  568. }
  569. tunnel, err := EstablishTunnel(
  570. controller.config, controller.pendingConns, serverEntry)
  571. if err != nil {
  572. // TODO: distingush case where conn is interrupted?
  573. Notice(NOTICE_INFO, "failed to connect to %s: %s", serverEntry.IpAddress, err)
  574. } else {
  575. // Don't block. Assumes the receiver has a buffer large enough for
  576. // the number of desired tunnels. If there's no room, the tunnel must
  577. // not be required so it's discarded.
  578. select {
  579. case controller.establishedTunnels <- tunnel:
  580. default:
  581. controller.discardTunnel(tunnel)
  582. }
  583. }
  584. }
  585. Notice(NOTICE_INFO, "stopped establish worker")
  586. }